From 9f83f0fe69e622925417b0200cd4ae6d5fdcb62d Mon Sep 17 00:00:00 2001 From: lijianshe02 <48898730+lijianshe02@users.noreply.github.com> Date: Thu, 14 May 2020 10:31:56 +0800 Subject: [PATCH] API/OP (group_norm, layer_norm, random_crop, unpool) error message enhancement (#24413) * API/OP (group_norm, layer_norm, unpool) error message enhancement test=develop --- paddle/fluid/operators/group_norm_op.cc | 14 +++++++++++--- paddle/fluid/operators/layer_norm_op.cc | 10 ++++------ paddle/fluid/operators/layer_norm_op.cu | 4 ++-- paddle/fluid/operators/random_crop_op.h | 25 ++++++++++++++++++++++--- paddle/fluid/operators/unpool_op.cc | 12 +++++++++--- 5 files changed, 48 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/operators/group_norm_op.cc b/paddle/fluid/operators/group_norm_op.cc index 0659f51e973..793b6079372 100644 --- a/paddle/fluid/operators/group_norm_op.cc +++ b/paddle/fluid/operators/group_norm_op.cc @@ -122,12 +122,20 @@ class GroupNormOpMaker : public framework::OpProtoAndCheckerMaker { "Constant for numerical stability [default 1e-5].") .SetDefault(1e-5) .AddCustomChecker([](const float &epsilon) { - PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 1.0f, - "'epsilon' should be between 0.0 and 1.0."); + PADDLE_ENFORCE_EQ(epsilon >= 0.0f && epsilon <= 1.0f, true, + platform::errors::InvalidArgument( + "'epsilon' in Op(GroupNorm) should be between" + "0.0 and 1.0f, But received [%s].", + epsilon)); }); AddAttr("groups", "The number of groups that divided from channels.") .AddCustomChecker([](const int &groups) { - PADDLE_ENFORCE_GT(groups, 0, "'groups' should be greater than zero."); + PADDLE_ENFORCE_GT( + groups, 0, + platform::errors::InvalidArgument( + "'groups' in Op(GroupNorm) should be greater than zero," + "But received [%s].", + groups)); }); AddAttr("data_layout", "An optional string from: \"NHWC\", \"NCHW\". ") diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index b738a80a0d2..3c308ecd510 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -179,18 +179,16 @@ class LayerNormGradOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { const auto *var = ctx.InputVar(framework::GradVarName("Y")); - if (var == nullptr) { - PADDLE_THROW("can't find Y@GRAD"); - } + PADDLE_ENFORCE_NOT_NULL(var, platform::errors::NotFound( + "Y@GRAD of LayerNorm Op is not found.")); const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); } else if (var->IsType()) { t = &var->Get(); } - if (t == nullptr) { - PADDLE_THROW("can't find Y@GRAD"); - } + PADDLE_ENFORCE_NOT_NULL( + t, platform::errors::NotFound("Y@GRAD of LayerNorm Op is not found.")); return framework::OpKernelType(t->type(), ctx.GetPlace()); } }; diff --git a/paddle/fluid/operators/layer_norm_op.cu b/paddle/fluid/operators/layer_norm_op.cu index 10a732e86ea..30bafb5c13e 100644 --- a/paddle/fluid/operators/layer_norm_op.cu +++ b/paddle/fluid/operators/layer_norm_op.cu @@ -528,8 +528,8 @@ class LayerNormKernel x_data, scale_data, bias_data, y_data, mean_data, var_data, epsilon, feature_size)); default: - PADDLE_THROW( - "Product from begin_norm_axis to end must be larger than 1"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Product from begin_norm_axis to end must be larger than 1")); break; } } diff --git a/paddle/fluid/operators/random_crop_op.h b/paddle/fluid/operators/random_crop_op.h index c84f886c80f..62edb298d1a 100644 --- a/paddle/fluid/operators/random_crop_op.h +++ b/paddle/fluid/operators/random_crop_op.h @@ -106,8 +106,21 @@ struct RandomCropFunctor { num_batchsize_dims_(num_batchsize_dims), rank_(x_dims.size()), seed_(seed) { - PADDLE_ENFORCE_EQ(x_dims.size(), out_dims.size()); - PADDLE_ENFORCE_GT(rank_, num_batchsize_dims_); + PADDLE_ENFORCE_EQ( + x_dims.size(), out_dims.size(), + platform::errors::InvalidArgument( + "The dimensions of Input(X) must equal to be the dimensions" + "of Output(Out), but received dimensions of Input(X) is [%d]," + "received dimensions of Output(Out) is [%d].", + x_dims.size(), out_dims.size())); + PADDLE_ENFORCE_GT( + rank_, num_batchsize_dims_, + platform::errors::InvalidArgument( + "The dimensions of Input(X) must be greater than the diff" + "value of Input(X)'s dimensions minus Atrr(shape)'s dimensions," + "But received Input(X)'s dimensions is [%d], received value of" + "Input(X)'s dimensions minus Attr(shape)'s dimensions is [%d].", + rank_, num_batchsize_dims_)); prod_batchsize_dims_ = 1; prod_x_ins_dims_ = 1; prod_out_ins_dims_ = 1; @@ -117,7 +130,13 @@ struct RandomCropFunctor { x_dims_[i] = x_dim_i; out_dims_[i] = out_dim_i; if (i < static_cast(num_batchsize_dims_)) { - PADDLE_ENFORCE_EQ(x_dim_i, out_dim_i); + PADDLE_ENFORCE_EQ( + x_dim_i, out_dim_i, + platform::errors::InvalidArgument( + "The first [%d] dimension value of Input(X) and Output(Out)" + "must be equal, but received the [%d] dimension value of" + "Input(X) and Output(Out) respectively are [%d] and [%d].", + num_batchsize_dims_, i, x_dim_i, out_dim_i)); prod_batchsize_dims_ *= x_dim_i; } else { prod_x_ins_dims_ *= x_dim_i; diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index 2da248aaea4..ad50d92c727 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -95,10 +95,16 @@ class UnpoolOp : public framework::OperatorWithKernel { std::vector paddings = ctx->Attrs().Get>("paddings"); PADDLE_ENFORCE_EQ(in_x_dims.size() == 4, true, platform::errors::InvalidArgument( - "Unpooling Intput(X) must be of 4-dimensional, but " - "received Input(X)'s dimension is %d.", + "Unpool Intput(X) must be of 4-dimensional, but " + "received Input(X)'s dimensions is %d.", in_x_dims.size())); - PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims); + PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims, + platform::errors::InvalidArgument( + "The dimensions of Input(X) must equal to be" + "the dimensions of Input(Indices), but received" + "dimensions of Input(X) is [%d], received dimensions" + "of Input(Indices) is [%d]", + in_x_dims, in_y_dims)); std::vector output_shape({in_x_dims[0], in_x_dims[1]}); for (size_t i = 0; i < ksize.size(); ++i) { -- GitLab