From b337110aa4a26ce0baea2e49e374c46a5c3f8ac7 Mon Sep 17 00:00:00 2001 From: ceci3 Date: Wed, 13 May 2020 15:46:25 +0800 Subject: [PATCH] [Cherry pick] update err message (#24464) * update err info, test=develop * update, test=release/1.8 --- paddle/fluid/operators/batch_norm_op.cc | 40 +++++++++----- paddle/fluid/operators/batch_norm_op.cu | 64 ++++++++++++++++++---- paddle/fluid/operators/dropout_op.h | 5 +- paddle/fluid/operators/instance_norm_op.cc | 12 ++-- paddle/fluid/operators/instance_norm_op.cu | 48 ++++++++++++---- 5 files changed, 128 insertions(+), 41 deletions(-) diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 684d2ef8628..d93f72b30a4 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -62,7 +62,9 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const { auto mom = ctx->Inputs("MomentumTensor"); PADDLE_ENFORCE_EQ(mom.size(), 1, platform::errors::InvalidArgument( - "Input(MomentumTensor) size must be 1")); + "The input tensor MomentumTensor's size must be 1" + "But received: MomentumTensor's size is [%d]", + mom.size())); } PADDLE_ENFORCE_GE( @@ -298,12 +300,18 @@ class BatchNormKernel const auto *x = ctx.Input("X"); const auto &x_dims = x->dims(); - PADDLE_ENFORCE_GE(x_dims.size(), 2, - platform::errors::InvalidArgument( - "The Input X dim size should be larger than 1.")); - PADDLE_ENFORCE_LE(x_dims.size(), 5, - platform::errors::InvalidArgument( - "The Input X dim size should be less than 6.")); + PADDLE_ENFORCE_GE( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "The size of input X's dimensions should be larger than 1." + "But received: the size of input X's dimensions is [%d]", + x_dims.size())); + PADDLE_ENFORCE_LE( + x_dims.size(), 5, + platform::errors::InvalidArgument( + "The size of input X's dimensions should be less than 6." + "But received: the size of input X's dimensionss is [%d]", + x_dims.size())); const int N = x_dims[0]; const int C = (data_layout == DataLayout::kNCHW ? x_dims[1] @@ -606,12 +614,18 @@ class BatchNormGradKernel // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] const auto &x_dims = x->dims(); - PADDLE_ENFORCE_GE(x_dims.size(), 2, - platform::errors::InvalidArgument( - "The Input X dim size should be larger than 1.")); - PADDLE_ENFORCE_LE(x_dims.size(), 5, - platform::errors::InvalidArgument( - "The Input X dim size should be less than 6.")); + PADDLE_ENFORCE_GE( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "The size of input X's dimensions should be larger than 1." + "But received: the size of input X's dimensions is [%d]", + x_dims.size())); + PADDLE_ENFORCE_LE( + x_dims.size(), 5, + platform::errors::InvalidArgument( + "The size of input X's dimensions should be less than 6." + "But received: the size of input X's dimensions is [%d]", + x_dims.size())); const int N = x_dims[0]; const int C = (data_layout == DataLayout::kNCHW ? x_dims[1] diff --git a/paddle/fluid/operators/batch_norm_op.cu b/paddle/fluid/operators/batch_norm_op.cu index e40049a51d1..be834772679 100644 --- a/paddle/fluid/operators/batch_norm_op.cu +++ b/paddle/fluid/operators/batch_norm_op.cu @@ -58,8 +58,12 @@ class BatchNormKernel // NCHW [batch_size, in_channels, in_height, in_width] const auto *x = ctx.Input("X"); const auto &x_dims = x->dims(); - PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, - "The Input dim size should be between 2 and 5"); + PADDLE_ENFORCE_EQ( + x_dims.size() >= 2 && x_dims.size() <= 5, true, + platform::errors::InvalidArgument( + "The size of input's dimensions should be between 2 and 5" + "But received: the size of input's dimensions is [%d]", + x_dims.size())); auto *y = ctx.Output("Y"); y->mutable_data(ctx.GetPlace()); @@ -151,10 +155,34 @@ class BatchNormKernel const auto *est_mean = ctx.Input("Mean"); const auto *est_var = ctx.Input("Variance"); // Run inference mode. - PADDLE_ENFORCE_EQ(est_mean->dims().size(), 1UL); - PADDLE_ENFORCE_EQ(est_var->dims().size(), 1UL); - PADDLE_ENFORCE_EQ(est_mean->dims()[0], C); - PADDLE_ENFORCE_EQ(est_var->dims()[0], C); + PADDLE_ENFORCE_EQ( + est_mean->dims().size(), 1UL, + platform::errors::InvalidArgument( + "The size of mean's dimensions must equal to 1." + "But received: the size of mean's dimensions mean is [%d]," + "the dimensions of mean is [%s].", + est_mean->dims().size(), est_mean->dims())); + PADDLE_ENFORCE_EQ( + est_var->dims().size(), 1UL, + platform::errors::InvalidArgument( + "The size of variance's dimensions must equal to 1." + "But received: the size of variance's dimensions is [%d]," + "the dimensions of variance is [%s].", + est_var->dims().size(), est_var->dims())); + PADDLE_ENFORCE_EQ( + est_mean->dims()[0], C, + platform::errors::InvalidArgument( + "The first dimension of mean must equal to the number of " + "Channels, which is [%d]. But received: the first dimension" + "of mean is [%d], the dimensions of mean is [%s].", + C, est_mean->dims()[0], est_mean->dims())); + PADDLE_ENFORCE_EQ( + est_var->dims()[0], C, + platform::errors::InvalidArgument( + "The first dimension of variance must equal to the number" + "of Channels, which is [%d]. But received: the first dimension of" + "variance is [%d], the dimensions of variance is [%s].", + C, est_var->dims()[0], est_var->dims())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardInference( @@ -503,8 +531,13 @@ class BatchNormGradKernel const auto &x_dims = x->dims(); - PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, - "The Input dim size should be between 2 and 5"); + PADDLE_ENFORCE_EQ( + x_dims.size() >= 2 && x_dims.size() <= 5, true, + platform::errors::InvalidArgument( + "The size of input's dimensions should be between 2 and 5." + "But received: the size of input's dimensions is [%d]," + "the dimensions of input is [%s]", + x_dims.size(), x_dims)); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); @@ -515,8 +548,19 @@ class BatchNormGradKernel d_scale->mutable_data>(ctx.GetPlace()); d_bias->mutable_data>(ctx.GetPlace()); } - PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL); - PADDLE_ENFORCE_EQ(scale->dims()[0], C); + PADDLE_ENFORCE_EQ( + scale->dims().size(), 1UL, + platform::errors::InvalidArgument( + "The size of scale's dimensions must equal to 1. But received: " + "the size of scale's dimensions is [%d], the dimensions of scale " + "is [%s].", + scale->dims().size(), scale->dims())); + PADDLE_ENFORCE_EQ( + scale->dims()[0], C, + platform::errors::InvalidArgument( + "The first dimension of scale must equal to Channels[%d]. But " + "received: the first dimension of scale is [%d]", + C, scale->dims()[0])); auto dtype = platform::CudnnDataType::type; const auto *reserve_space = ctx.Input("ReserveSpace"); diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index b2bfbc1f826..676361289e8 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -108,8 +108,9 @@ template class DropoutGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - PADDLE_ENFORCE(!context.Attr("is_test"), - "GradOp is only callable when is_test is false"); + PADDLE_ENFORCE_EQ(!context.Attr("is_test"), true, + platform::errors::PreconditionNotMet( + "GradOp is only callable when is_test is false")); auto* grad_x = context.Output(framework::GradVarName("X")); auto* grad_y = context.Input(framework::GradVarName("Out")); diff --git a/paddle/fluid/operators/instance_norm_op.cc b/paddle/fluid/operators/instance_norm_op.cc index 127987a1789..2609d243705 100644 --- a/paddle/fluid/operators/instance_norm_op.cc +++ b/paddle/fluid/operators/instance_norm_op.cc @@ -247,7 +247,8 @@ framework::OpKernelType InstanceNormGradOp::GetExpectedKernelType( const framework::ExecutionContext &ctx) const { const auto *var = ctx.InputVar(framework::GradVarName("Y")); if (var == nullptr) { - PADDLE_THROW("cannot find Y@GRAD"); + PADDLE_THROW( + platform::errors::NotFound("cannot find gradient variable of Y")); } const Tensor *t = nullptr; if (var->IsType()) { @@ -256,7 +257,8 @@ framework::OpKernelType InstanceNormGradOp::GetExpectedKernelType( t = &var->Get(); } if (t == nullptr) { - PADDLE_THROW("cannot find Y@GRAD"); + PADDLE_THROW( + platform::errors::InvalidArgument("gradient variable of Y is empty")); } return framework::OpKernelType( OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace()); @@ -387,7 +389,8 @@ framework::OpKernelType InstanceNormDoubleGradOp::GetExpectedKernelType( const framework::ExecutionContext &ctx) const { const auto *var = ctx.InputVar("DY"); if (var == nullptr) { - PADDLE_THROW("cannot find Y@GRAD"); + PADDLE_THROW( + platform::errors::NotFound("cannot find gradient variable of Y")); } const Tensor *t = nullptr; if (var->IsType()) { @@ -396,7 +399,8 @@ framework::OpKernelType InstanceNormDoubleGradOp::GetExpectedKernelType( t = &var->Get(); } if (t == nullptr) { - PADDLE_THROW("cannot find Y@GRAD"); + PADDLE_THROW( + platform::errors::InvalidArgument("gradient variable of Y is empty")); } return framework::OpKernelType( OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace()); diff --git a/paddle/fluid/operators/instance_norm_op.cu b/paddle/fluid/operators/instance_norm_op.cu index 86b1cef5e37..1567c229cdc 100644 --- a/paddle/fluid/operators/instance_norm_op.cu +++ b/paddle/fluid/operators/instance_norm_op.cu @@ -70,18 +70,27 @@ class InstanceNormKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "It must be CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("It must be CUDAPlace.")); double epsilon = static_cast(ctx.Attr("epsilon")); auto *x = ctx.Input("X"); auto &x_dims = x->dims(); - PADDLE_ENFORCE_GE( - x_dims.size(), 2, - "the dimension of input X must greater than or equal to 2"); - PADDLE_ENFORCE_LE( - x_dims.size(), 5, - "the dimension of input X must smaller than or equal to 5"); + PADDLE_ENFORCE_GE(x_dims.size(), 2, + platform::errors::InvalidArgument( + "The `shape` in InstanceNormOp is invalid: " + "the size of X's dimensions must greater than " + "or equal to 2. But received: " + "the size of X's dimensions is [%d]", + x_dims.size())); + PADDLE_ENFORCE_LE(x_dims.size(), 5, + platform::errors::InvalidArgument( + "The `shape` in InstanceNormOp is invalid: " + "the size of X's dimensions must smaller than" + "or equal to 5. But received: " + "the size of X's dimensions is [%d]", + x_dims.size())); int N, C, H, W, D; ExtractNCWHD(x_dims, DataLayout::kNCHW, &N, &C, &H, &W, &D); int NxC = N * C; @@ -231,8 +240,9 @@ class InstanceNormGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon = static_cast(ctx.Attr("epsilon")); const auto *scale = ctx.Input("Scale"); const auto *x = ctx.Input("X"); @@ -257,8 +267,22 @@ class InstanceNormGradKernel d_scale->mutable_data(ctx.GetPlace()); d_bias->mutable_data(ctx.GetPlace()); } - PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL); - PADDLE_ENFORCE_EQ(scale->dims()[0], C); + PADDLE_ENFORCE_EQ( + scale->dims().size(), 1UL, + platform::errors::InvalidArgument( + "The `shape` in InstanceNormOp is invalid: " + "the size of scale's dimensions must be equal to 1. But " + "received: the size of scale's dimensions" + "is [%d]", + scale->dims().size())); + PADDLE_ENFORCE_EQ(scale->dims()[0], C, + platform::errors::InvalidArgument( + "The `shape` in InstanceNormOp is invalid: " + "the first dimension of scale must be equal to " + "Channels([%d]). But received: " + "the first dimension of scale is [%d]," + "the dimensions of scale is [%s], ", + C, scale->dims()[0], scale->dims())); auto &dev_ctx = ctx.template device_context(); -- GitLab