未验证 提交 8c25dfaa 编写于 作者: T Thunderbrook 提交者: GitHub

op error info (#27856)

* op error info

* style

* code format
上级 79b5db13
......@@ -29,10 +29,12 @@ class SliceOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true,
"Input (Input) of slice op should not be null.");
platform::errors::InvalidArgument(
"Input (Input) of slice op should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output (Out) of slice op should not be null.");
platform::errors::InvalidArgument(
"Output (Out) of slice op should not be null."));
auto x_var_type = ctx->GetInputsVarType("Input")[0];
auto axes = ctx->Attrs().Get<std::vector<int>>("axes");
if (x_var_type == framework::proto::VarType::LOD_TENSOR_ARRAY) {
......@@ -57,7 +59,8 @@ class SliceOp : public framework::OperatorWithKernel {
}
auto in_dims = ctx->GetInputDim("Input");
PADDLE_ENFORCE_LT(in_dims.size(), 7,
"The rank of input should be less than 7.");
platform::errors::InvalidArgument(
"The rank of input should be less than 7."));
framework::DDim out_dims(in_dims);
auto starts = ctx->Attrs().Get<std::vector<int>>("starts");
......@@ -76,31 +79,37 @@ class SliceOp : public framework::OperatorWithKernel {
if (ctx->HasInputs("StartsTensorList")) {
auto StartsTensorList = ctx->Inputs("StartsTensorList");
PADDLE_ENFORCE_GT(StartsTensorList.size(), 0,
"StartsTensorList size can't be zero");
platform::errors::InvalidArgument(
"StartsTensorList size can't be zero"));
starts_size = StartsTensorList.size();
}
if (ctx->HasInputs("EndsTensorList")) {
auto EndsTensorList = ctx->Inputs("EndsTensorList");
PADDLE_ENFORCE_GT(EndsTensorList.size(), 0,
"EndsTensorList size can't be zero");
platform::errors::InvalidArgument(
"EndsTensorList size can't be zero"));
ends_size = EndsTensorList.size();
}
if (ctx->HasInput("StartsTensor") == false) {
PADDLE_ENFORCE_EQ(
starts_size, axes.size(),
"The size of starts must be equal to the size of axes.");
platform::errors::InvalidArgument(
"The size of starts must be equal to the size of axes."));
}
if (ctx->HasInput("EndsTensor") == false) {
PADDLE_ENFORCE_EQ(ends_size, axes.size(),
"The size of ends must be equal to the size of axes.");
PADDLE_ENFORCE_EQ(
ends_size, axes.size(),
platform::errors::InvalidArgument(
"The size of ends must be equal to the size of axes."));
}
int dim_value, start, end;
for (size_t i = 0; i < axes.size(); ++i) {
PADDLE_ENFORCE_LT(static_cast<int>(axes[i]), in_dims.size(),
"The index of dimension in axes must be less "
"than the size of input shape.");
platform::errors::InvalidArgument(
"The index of dimension in axes must be less "
"than the size of input shape."));
if (infer_flags[i] == -1) {
out_dims[axes[i]] = -1;
} else {
......@@ -112,7 +121,8 @@ class SliceOp : public framework::OperatorWithKernel {
start = std::max(start, 0);
end = std::max(end, 0);
end = std::min(end, dim_value);
PADDLE_ENFORCE_GT(end, start, "end should greater than start");
PADDLE_ENFORCE_GT(end, start, platform::errors::InvalidArgument(
"end should greater than start"));
out_dims[axes[i]] = end - start;
}
}
......@@ -122,8 +132,9 @@ class SliceOp : public framework::OperatorWithKernel {
std::vector<int> new_out_shape;
for (size_t i = 0; i < decrease_axis.size(); ++i) {
if (ctx->IsRuntime() && infer_flags[i] != -1) {
PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1,
"decrease dim should be 1");
PADDLE_ENFORCE_EQ(
out_dims[decrease_axis[i]], 1,
platform::errors::InvalidArgument("decrease dim should be 1"));
}
out_dims[decrease_axis[i]] = 0;
}
......@@ -284,9 +295,12 @@ class SliceOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, "Input should not be null");
PADDLE_ENFORCE_EQ(
ctx->HasInput("Input"), true,
platform::errors::InvalidArgument("Input should not be null"));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) should not be null");
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null"));
auto x_var_type = ctx->GetInputsVarType("Input")[0];
if (x_var_type == framework::proto::VarType::LOD_TENSOR_ARRAY) {
// If the var type of input is LOD_TENSOR_ARRAY,
......
......@@ -191,8 +191,9 @@ class SliceKernel : public framework::OpKernel<T> {
if (decrease_axis.size() > 0) {
std::vector<int64_t> new_out_shape;
for (size_t i = 0; i < decrease_axis.size(); ++i) {
PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1,
"decrease dim should be 1");
PADDLE_ENFORCE_EQ(
out_dims[decrease_axis[i]], 1,
platform::errors::InvalidArgument("decrease dim should be 1"));
out_dims[decrease_axis[i]] = 0;
}
......
......@@ -31,51 +31,76 @@ class SpaceToDepthOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SpaceToDepthOp should not be null.");
platform::errors::InvalidArgument(
"Input(X) of SpaceToDepthOp should not be null."));
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SpaceToDepthOp should not be null.");
platform::errors::InvalidArgument(
"Output(Out) of SpaceToDepthOp should not be null."));
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dims.size(), 4, "input should be a 4D tensor");
PADDLE_ENFORCE_EQ(x_dims.size(), 4, platform::errors::InvalidArgument(
"input should be a 4D tensor"));
auto blocksize = ctx->Attrs().Get<int64_t>("blocksize");
PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1");
PADDLE_ENFORCE_GT(blocksize, 1,
platform::errors::InvalidArgument(
"The blocksize should be Greater than 1"));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0,
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_GT(x_dims[1], 0,
platform::errors::InvalidArgument(
"input channel should be Greater than 0"));
PADDLE_ENFORCE_GT(x_dims[2], 0,
platform::errors::InvalidArgument(
"input Height should be Greater than 0"));
PADDLE_ENFORCE_GT(x_dims[3], 0,
platform::errors::InvalidArgument(
"input Width should be Greater than 0"));
PADDLE_ENFORCE_EQ(
x_dims[1] % (blocksize * blocksize), 0,
platform::errors::InvalidArgument(
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize"));
PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0,
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize"));
PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0,
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize"));
} else {
if (x_dims[1] != -1) {
PADDLE_ENFORCE_GT(x_dims[1], 0,
"input channel should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0,
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input channel should be Greater than 0"));
PADDLE_ENFORCE_EQ(
x_dims[1] % (blocksize * blocksize), 0,
platform::errors::InvalidArgument(
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize"));
}
if (x_dims[2] != -1) {
PADDLE_ENFORCE_GT(x_dims[2], 0,
"input Height should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0,
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input Height should be Greater than 0"));
PADDLE_ENFORCE_EQ(
x_dims[2] % (blocksize), 0,
platform::errors::InvalidArgument(
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize"));
}
if (x_dims[3] != -1) {
PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0,
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_GT(x_dims[3], 0,
platform::errors::InvalidArgument(
"input Width should be Greater than 0"));
PADDLE_ENFORCE_EQ(
x_dims[3] % (blocksize), 0,
platform::errors::InvalidArgument(
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize"));
}
}
......@@ -156,9 +181,11 @@ class SpaceToDepthGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X"), platform::errors::InvalidArgument(
"Input(X) shouldn't be null."));
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) shouldn't be null.");
platform::errors::InvalidArgument(
"Input(Out@GRAD) shouldn't be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
......
......@@ -25,9 +25,11 @@ class SplitOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SplitOp should not be null.");
platform::errors::InvalidArgument(
"Input(X) of SplitOp should not be null."));
PADDLE_ENFORCE_GE(ctx->Outputs("Out").size(), 1UL,
"Outputs(Out) of SplitOp should not be empty.");
platform::errors::InvalidArgument(
"Outputs(Out) of SplitOp should not be empty."));
auto in_dims = ctx->GetInputDim("X");
auto outs_names = ctx->Outputs("Out");
size_t axis = static_cast<size_t>(ctx->Attrs().Get<int>("axis"));
......@@ -37,9 +39,10 @@ class SplitOp : public framework::OperatorWithKernel {
const size_t outs_number = outs_names.size();
if (sections.size() > 0) {
PADDLE_ENFORCE_EQ(sections.size(), outs_number,
"tensor split sections size "
"should be equal to output size.");
PADDLE_ENFORCE_EQ(
sections.size(), outs_number,
platform::errors::InvalidArgument("tensor split sections size "
"should be equal to output size."));
}
if (ctx->HasInput("AxisTensor")) {
......
......@@ -33,12 +33,14 @@ static inline std::vector<framework::DDim> UpdateOutsDims(
int64_t input_axis_dim = in_dims[axis];
if (num > 0) {
if (is_runtime || input_axis_dim > 0) {
PADDLE_ENFORCE_EQ(input_axis_dim % num, 0,
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But received Attr(num_or_sections) "
"= %d, input(X)'s shape = [%s], Attr(dim) = %d.",
num, in_dims, axis);
PADDLE_ENFORCE_EQ(
input_axis_dim % num, 0,
platform::errors::InvalidArgument(
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But received Attr(num_or_sections) "
"= %d, input(X)'s shape = [%s], Attr(dim) = %d.",
num, in_dims, axis));
size_t out_axis_dim = input_axis_dim / num;
for (auto& out_dim : outs_dims) {
......@@ -64,11 +66,13 @@ static inline std::vector<framework::DDim> UpdateOutsDims(
}
if (each_section_is_known) {
PADDLE_ENFORCE_LE(num_of_unk, 1,
"Only one dimension value of Attr(num_or_sections) "
"in SplitOp can be -1. "
"But received Attr(num_or_sections) = [%s].",
framework::make_ddim(sections));
PADDLE_ENFORCE_LE(
num_of_unk, 1,
platform::errors::InvalidArgument(
"Only one dimension value of Attr(num_or_sections) "
"in SplitOp can be -1. "
"But received Attr(num_or_sections) = [%s].",
framework::make_ddim(sections)));
}
if (unk_dim_idx != -1) {
......@@ -77,21 +81,25 @@ static inline std::vector<framework::DDim> UpdateOutsDims(
// the following check will fail.
PADDLE_ENFORCE_LT(
sum_of_section, input_axis_dim,
"Sum of Attr(num_or_sections) other than unknown section "
"must be less than the input's size "
"along the split dimension. But received Attr(num_or_sections) "
"= [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis);
platform::errors::InvalidArgument(
"Sum of Attr(num_or_sections) other than unknown section "
"must be less than the input's "
"size "
"along the split dimension. But received Attr(num_or_sections) "
"= [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis));
if (each_section_is_known) {
sections[unk_dim_idx] = input_axis_dim - sum_of_section;
}
} else {
PADDLE_ENFORCE_EQ(
sum_of_section, input_axis_dim,
"Sum of Attr(num_or_sections) must be equal to the input's size "
"along the split dimension. But received Attr(num_or_sections)"
" = [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis);
platform::errors::InvalidArgument(
"Sum of Attr(num_or_sections) must be equal to the input's "
"size "
"along the split dimension. But received Attr(num_or_sections)"
" = [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis));
}
}
for (int i = 0; i < outs_number; ++i) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册