未验证 提交 101fcf81 编写于 作者: L lijianshe02 提交者: GitHub

API/OP (group_norm, layer_norm, random_crop, unpool) error message enhancement (#24413) (#24525)

* API/OP (group_norm, layer_norm, unpool) error message enhancement test=develop
上级 d49c3061
......@@ -122,12 +122,20 @@ class GroupNormOpMaker : public framework::OpProtoAndCheckerMaker {
"Constant for numerical stability [default 1e-5].")
.SetDefault(1e-5)
.AddCustomChecker([](const float &epsilon) {
PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 1.0f,
"'epsilon' should be between 0.0 and 1.0.");
PADDLE_ENFORCE_EQ(epsilon >= 0.0f && epsilon <= 1.0f, true,
platform::errors::InvalidArgument(
"'epsilon' in Op(GroupNorm) should be between"
"0.0 and 1.0f, But received [%s].",
epsilon));
});
AddAttr<int>("groups", "The number of groups that divided from channels.")
.AddCustomChecker([](const int &groups) {
PADDLE_ENFORCE_GT(groups, 0, "'groups' should be greater than zero.");
PADDLE_ENFORCE_GT(
groups, 0,
platform::errors::InvalidArgument(
"'groups' in Op(GroupNorm) should be greater than zero,"
"But received [%s].",
groups));
});
AddAttr<std::string>("data_layout",
"An optional string from: \"NHWC\", \"NCHW\". ")
......
......@@ -179,18 +179,16 @@ class LayerNormGradOp : public framework::OperatorWithKernel {
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
const auto *var = ctx.InputVar(framework::GradVarName("Y"));
if (var == nullptr) {
PADDLE_THROW("can't find Y@GRAD");
}
PADDLE_ENFORCE_NOT_NULL(var, platform::errors::NotFound(
"Y@GRAD of LayerNorm Op is not found."));
const Tensor *t = nullptr;
if (var->IsType<Tensor>()) {
t = &var->Get<Tensor>();
} else if (var->IsType<LoDTensor>()) {
t = &var->Get<LoDTensor>();
}
if (t == nullptr) {
PADDLE_THROW("can't find Y@GRAD");
}
PADDLE_ENFORCE_NOT_NULL(
t, platform::errors::NotFound("Y@GRAD of LayerNorm Op is not found."));
return framework::OpKernelType(t->type(), ctx.GetPlace());
}
};
......
......@@ -528,8 +528,8 @@ class LayerNormKernel<platform::CUDADeviceContext, T>
x_data, scale_data, bias_data, y_data, mean_data, var_data,
epsilon, feature_size));
default:
PADDLE_THROW(
"Product from begin_norm_axis to end must be larger than 1");
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end must be larger than 1"));
break;
}
}
......
......@@ -106,8 +106,21 @@ struct RandomCropFunctor {
num_batchsize_dims_(num_batchsize_dims),
rank_(x_dims.size()),
seed_(seed) {
PADDLE_ENFORCE_EQ(x_dims.size(), out_dims.size());
PADDLE_ENFORCE_GT(rank_, num_batchsize_dims_);
PADDLE_ENFORCE_EQ(
x_dims.size(), out_dims.size(),
platform::errors::InvalidArgument(
"The dimensions of Input(X) must equal to be the dimensions"
"of Output(Out), but received dimensions of Input(X) is [%d],"
"received dimensions of Output(Out) is [%d].",
x_dims.size(), out_dims.size()));
PADDLE_ENFORCE_GT(
rank_, num_batchsize_dims_,
platform::errors::InvalidArgument(
"The dimensions of Input(X) must be greater than the diff"
"value of Input(X)'s dimensions minus Atrr(shape)'s dimensions,"
"But received Input(X)'s dimensions is [%d], received value of"
"Input(X)'s dimensions minus Attr(shape)'s dimensions is [%d].",
rank_, num_batchsize_dims_));
prod_batchsize_dims_ = 1;
prod_x_ins_dims_ = 1;
prod_out_ins_dims_ = 1;
......@@ -117,7 +130,13 @@ struct RandomCropFunctor {
x_dims_[i] = x_dim_i;
out_dims_[i] = out_dim_i;
if (i < static_cast<size_t>(num_batchsize_dims_)) {
PADDLE_ENFORCE_EQ(x_dim_i, out_dim_i);
PADDLE_ENFORCE_EQ(
x_dim_i, out_dim_i,
platform::errors::InvalidArgument(
"The first [%d] dimension value of Input(X) and Output(Out)"
"must be equal, but received the [%d] dimension value of"
"Input(X) and Output(Out) respectively are [%d] and [%d].",
num_batchsize_dims_, i, x_dim_i, out_dim_i));
prod_batchsize_dims_ *= x_dim_i;
} else {
prod_x_ins_dims_ *= x_dim_i;
......
......@@ -95,10 +95,16 @@ class UnpoolOp : public framework::OperatorWithKernel {
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE_EQ(in_x_dims.size() == 4, true,
platform::errors::InvalidArgument(
"Unpooling Intput(X) must be of 4-dimensional, but "
"received Input(X)'s dimension is %d.",
"Unpool Intput(X) must be of 4-dimensional, but "
"received Input(X)'s dimensions is %d.",
in_x_dims.size()));
PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims);
PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims,
platform::errors::InvalidArgument(
"The dimensions of Input(X) must equal to be"
"the dimensions of Input(Indices), but received"
"dimensions of Input(X) is [%d], received dimensions"
"of Input(Indices) is [%d]",
in_x_dims, in_y_dims));
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册