diff --git a/paddle/fluid/operators/detail/strided_memcpy.h b/paddle/fluid/operators/detail/strided_memcpy.h index 9ebdb369feb8cd1ac98ec63302eb3afd3260771e..e29b057ed57a7ddcf3df975ba3ccdb15f3c2791b 100644 --- a/paddle/fluid/operators/detail/strided_memcpy.h +++ b/paddle/fluid/operators/detail/strided_memcpy.h @@ -41,7 +41,8 @@ struct StridedMemcpyFunctor { memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T), cuda_ctx.stream()); #else - PADDLE_THROW("Paddle is not compiled with GPU"); + PADDLE_THROW( + platform::errors::Unavailable("Paddle is not compiled with GPU.")); #endif } } @@ -64,7 +65,8 @@ struct StridedMemcpyFunctor { memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T) * dst_dim[0], cuda_ctx.stream()); #else - PADDLE_THROW("Paddle is not compiled with GPU"); + PADDLE_THROW( + platform::errors::Unavailable("Paddle is not compiled with GPU.")); #endif } } diff --git a/paddle/fluid/operators/distributed/parameter_recv.cc b/paddle/fluid/operators/distributed/parameter_recv.cc index 3dfa1d4ff933eb0e4cb1563162324637f8861812..d5d3c9c3c7c48fa162e18823bb237901e064315c 100644 --- a/paddle/fluid/operators/distributed/parameter_recv.cc +++ b/paddle/fluid/operators/distributed/parameter_recv.cc @@ -86,8 +86,10 @@ void RecvSparseLodTensor(const CommContext &rpc_ctx, height += splited_var->Get().dims()[0]; } - PADDLE_ENFORCE_EQ(merged_var->Get().dims()[0], height, - "recved var must has same dims with local var"); + PADDLE_ENFORCE_EQ( + merged_var->Get().dims()[0], height, + platform::errors::InvalidArgument( + "Received variable must has same dimension with local variable.")); auto *merged_t = merged_var->GetMutable(); auto *merged_d = merged_t->mutable_data(cpu_place); diff --git a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc index 08909bcb6fcb9b231ba2c703cf4760d7870a25ba..6fbe3b8d3bbeda6d2cc8ff73327fdd7054202e54 100644 --- a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc +++ b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc @@ -218,9 +218,11 @@ class FusedFCElementwiseLayerNormOpMaker .SetDefault(1e-5) .AddCustomChecker([](const float &epsilon) { PADDLE_ENFORCE_GE(epsilon, 0.0f, - "'epsilon' should be between 0.0 and 0.001."); + platform::errors::InvalidArgument( + "'epsilon' should be between 0.0 and 0.001.")); PADDLE_ENFORCE_LE(epsilon, 0.001f, - "'epsilon' should be between 0.0 and 0.001."); + platform::errors::InvalidArgument( + "'epsilon' should be between 0.0 and 0.001.")); }); AddAttr("begin_norm_axis", "the axis of `begin_norm_axis ... Rank(Y) - 1` will be " @@ -228,8 +230,10 @@ class FusedFCElementwiseLayerNormOpMaker "matrix [N,H]. [default 1].") .SetDefault(1) .AddCustomChecker([](const int &begin_norm_axis) { - PADDLE_ENFORCE_GT(begin_norm_axis, 0, - "'begin_norm_axis' should be greater than zero."); + PADDLE_ENFORCE_GT( + begin_norm_axis, 0, + platform::errors::InvalidArgument( + "'begin_norm_axis' should be greater than zero.")); }); AddComment(R"DOC( fc_out <= fc(X, W, Bias0) diff --git a/paddle/fluid/operators/interpolate_v2_op.cc b/paddle/fluid/operators/interpolate_v2_op.cc index 1f7dde9b931dafa4b8e0bee211e64461b1c21dc5..3362f2474fe25274ea0c19b6f77e46c045e4a232 100644 --- a/paddle/fluid/operators/interpolate_v2_op.cc +++ b/paddle/fluid/operators/interpolate_v2_op.cc @@ -118,9 +118,10 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { PADDLE_ENFORCE( "bilinear" == interp_method || "nearest" == interp_method || "bicubic" == interp_method, - "Interpolation method can only be \"bilinear\" or \"nearest\" when " - "Input(X) dimension is 4, but got method = %s .", - interp_method); + platform::errors::InvalidArgument( + "Interpolation method can only be \"bilinear\" or \"nearest\" when " + "Input(X) dimension is 4, but got method = %s.", + interp_method)); const DataLayout data_layout = framework::StringToDataLayout( ctx->Attrs().Get("data_layout")); @@ -305,12 +306,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) { if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { auto out_size_dim = ctx->GetInputDim("OutSize"); - PADDLE_ENFORCE_EQ(out_size_dim.size(), 1, - "OutSize's dimension size must be 1, but got size =%d .", - out_size_dim.size()); + PADDLE_ENFORCE_EQ( + out_size_dim.size(), 1, + platform::errors::InvalidArgument( + "OutSize's dimension size must be 1, but got size is %d.", + out_size_dim.size())); PADDLE_ENFORCE_EQ(out_size_dim[0], 3, - "OutSize's dim[0] must be 3, but got size = %d .", - out_size_dim[0]); + platform::errors::InvalidArgument( + "OutSize's dim[0] must be 3, but got size is %d.", + out_size_dim[0])); ctx->ShareLoD("X", "Out"); return; } @@ -330,10 +334,8 @@ class InterpolateV2Op : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of InterpolateV2Op should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of InterpolationOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate"); auto dim_x = ctx->GetInputDim("X"); // NCHW format PADDLE_ENFORCE( @@ -576,9 +578,10 @@ class InterpolateV2OpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@GRAD", "InterpolateGrad"); + auto dim_x = ctx->GetInputDim("X"); if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), dim_x); diff --git a/paddle/fluid/operators/metrics/accuracy_op_xpu.cc b/paddle/fluid/operators/metrics/accuracy_op_xpu.cc index c0aa00e79341ea0f2daffce8637f92b3c56b3670..294539b696f6320b410f9507082f715812c73ccb 100644 --- a/paddle/fluid/operators/metrics/accuracy_op_xpu.cc +++ b/paddle/fluid/operators/metrics/accuracy_op_xpu.cc @@ -98,7 +98,7 @@ class AccuracyXPUKernel : public framework::OpKernel { label_int32_device, num_samples, class_dim, correct_data, total_data, accuracy_data); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU accuracy kernel error!")); dev_ctx.Wait(); xpu_free(indices_int32_device); xpu_free(label_int32_device); diff --git a/paddle/fluid/operators/optimizers/adadelta_op.cc b/paddle/fluid/operators/optimizers/adadelta_op.cc index e3da79125be24f3156b10a4d1daedd3db2b841cf..21249d2375a4a06dfa9f37175bb813b321c22e01 100644 --- a/paddle/fluid/operators/optimizers/adadelta_op.cc +++ b/paddle/fluid/operators/optimizers/adadelta_op.cc @@ -71,7 +71,8 @@ class AdadeltaOp : public framework::OperatorWithKernel { auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("Grad"), - "param and grad input of AdadeltaOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and grad input of AdadeltaOp should have same dimension.")); PADDLE_ENFORCE_NE( framework::product(ctx->GetInputDim("AvgSquaredGrad")), 0, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/optimizers/dpsgd_op.h b/paddle/fluid/operators/optimizers/dpsgd_op.h index e52a1dd9db1791e0be82eb1ee47999d2b8f51175..688a7f1ad8435bda3197fd6d7d385802b46821b6 100644 --- a/paddle/fluid/operators/optimizers/dpsgd_op.h +++ b/paddle/fluid/operators/optimizers/dpsgd_op.h @@ -50,8 +50,14 @@ class DpsgdOpKernel : public framework::OpKernel { auto *param_out = ctx.Output("ParamOut"); auto sz = param_out->numel(); - PADDLE_ENFORCE_EQ(param->numel(), sz); - PADDLE_ENFORCE_EQ(grad->numel(), sz); + PADDLE_ENFORCE_EQ(param->numel(), sz, + platform::errors::InvalidArgument( + "Input parameter's number of elements is error, " + "expected %zu, but received %zu.")); + PADDLE_ENFORCE_EQ(grad->numel(), sz, + platform::errors::InvalidArgument( + "Input gradient's number of elements is error, " + "expected %zu, but received %zu.")); const T *lr = learning_rate->data(); const T *param_data = param->data(); diff --git a/paddle/fluid/operators/scale_op_xpu.cc b/paddle/fluid/operators/scale_op_xpu.cc index 4002be81001521bda07d818a2ff4c8255708aa96..b778bab8f93087e08709273b3859939a6358b855 100644 --- a/paddle/fluid/operators/scale_op_xpu.cc +++ b/paddle/fluid/operators/scale_op_xpu.cc @@ -49,7 +49,7 @@ class ScaleXPUKernel : public framework::OpKernel { int r = xpu::scale(dev_ctx.x_context(), in->numel(), scale, bias, bias_after_scale, in->data(), out->data()); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU scale kernel error!")); } }; diff --git a/paddle/fluid/operators/sign_op_xpu.cc b/paddle/fluid/operators/sign_op_xpu.cc index 44fd555544e7fe23cc80432a03250e3b23f5d80b..86fe826c659ef94a2473048f580d7cb698075522 100644 --- a/paddle/fluid/operators/sign_op_xpu.cc +++ b/paddle/fluid/operators/sign_op_xpu.cc @@ -30,7 +30,7 @@ class SignXPUKernel : public framework::OpKernel { int r = xpu::activation_forward(xpu_context, xpu::Activation_t::SIGN, in->numel(), in->data(), out->data()); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU sign kernel error!")); } }; diff --git a/paddle/fluid/operators/sum_op_xpu.cc b/paddle/fluid/operators/sum_op_xpu.cc index 14928061d23dd9e1fac03a242b10666999aea4d1..f15910fd4f65b50f55b884bf90d5a84bfe3bb601 100644 --- a/paddle/fluid/operators/sum_op_xpu.cc +++ b/paddle/fluid/operators/sum_op_xpu.cc @@ -51,7 +51,7 @@ class SumXPUKernel : public framework::OpKernel { int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data(), valid_count, out->numel()); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU sum kernel error!")); } }; diff --git a/paddle/fluid/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc index d3f9754d307c6040a66a3452d7bb008159ff46e5..cce5ad2631733392fb08dffaba71d931bb0a6bcc 100644 --- a/paddle/fluid/operators/top_k_op.cc +++ b/paddle/fluid/operators/top_k_op.cc @@ -36,7 +36,9 @@ class TopkOp : public framework::OperatorWithKernel { auto input_dims = ctx->GetInputDim("X"); const int k = static_cast(ctx->Attrs().Get("k")); - PADDLE_ENFORCE_GE(k, 1, "k must >= 1"); + PADDLE_ENFORCE_GE(k, 1, + platform::errors::InvalidArgument( + "Attribute k must be >= 1, but got k is %d.", k)); PADDLE_ENFORCE_GE(input_dims.size(), 1, platform::errors::InvalidArgument( "input must have >= 1d shape")); diff --git a/paddle/fluid/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu index 0a694e1ad5b012d70a89ddcca2d70fbe8c9e24ba..39a56f874d95029017f35e46792edd0935bb35cf 100644 --- a/paddle/fluid/operators/top_k_op.cu +++ b/paddle/fluid/operators/top_k_op.cu @@ -96,7 +96,8 @@ class TopkOpCUDAKernel : public framework::OpKernel { output_data, k, indices_data, input_data, input_width, input_width, static_cast(k), gridx, input_height)); default: - PADDLE_THROW("Error"); + PADDLE_THROW(platform::errors::Unavailable( + "Calculation error occurred in TopK Operator's CUDA Kernel.")); } } };