未验证 提交 55330131 编写于 作者: L liu zhengxi 提交者: GitHub

OP(pad, pad2d, pad_constant_like) error message enhancement (#23882) (#23994)

* enhance pad.* error message, test=develop
上级 10daf977
......@@ -466,34 +466,43 @@ class Pad2dOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of Pad2dOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of Pad2dOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Pad2d");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Pad2d");
auto x_dim = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dim.size(), 4,
"The size of input(X)'s dimension should be equal to 4.");
platform::errors::InvalidArgument(
"The size of Input(X)'s dimension should be equal to "
"4, but received %d. ",
x_dim.size()));
std::vector<int64_t> out_dims(x_dim.size());
auto data_format = ctx->Attrs().Get<std::string>("data_format");
out_dims[0] = x_dim[0];
if (ctx->HasInput("Paddings")) {
auto paddings_dim = ctx->GetInputDim("Paddings");
PADDLE_ENFORCE_EQ(
paddings_dim.size(), 1,
"Size of Input(Paddings)'s dimension should be equal to 1.");
PADDLE_ENFORCE_EQ(paddings_dim.size(), 1,
platform::errors::InvalidArgument(
"Size of Input(Paddings)'s dimension should be "
"equal to 1, but received %d.",
paddings_dim.size()));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(paddings_dim[0], 4,
"Shape of Input(Paddings) should be equal to [4].");
platform::errors::InvalidArgument(
"Shape of Input(Paddings) should be equal to "
"[4], but received [%d].",
paddings_dim[0]));
}
out_dims[1] = x_dim[1];
out_dims[2] = x_dim[2];
out_dims[3] = x_dim[3];
} else {
auto paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE_EQ(paddings.size(), 4,
"Size of paddings should be equal to 4.");
PADDLE_ENFORCE_EQ(
paddings.size(), 4,
platform::errors::InvalidArgument(
"Size of paddings should be equal to 4, but received %d.",
static_cast<int>(paddings.size())));
if (data_format == "NCHW") {
out_dims[1] = x_dim[1]; // channel
out_dims[2] = ((!ctx->IsRuntime()) && (x_dim[2] < 0))
......@@ -608,9 +617,10 @@ class Pad2dOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Pad2d@Grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "Pad2d@Grad");
auto x_dims = ctx->GetInputDim("X");
auto x_grad_name = framework::GradVarName("X");
if (ctx->HasOutput(x_grad_name)) {
......
......@@ -25,18 +25,19 @@ class PadConstantLikeOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of PadConstantLikeOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Y"),
"Input(Y) of PadConstantLikeOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of PadConstantLikeOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "PadConstantLike");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "PadConstantLike");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "PadConstantLike");
auto x_dim = ctx->GetInputDim("X");
auto y_dim = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dim.size(), y_dim.size(),
"The dimension of X and Y should be the same.");
platform::errors::InvalidArgument(
"The size of Input(X)'s dimension and the size of "
"Input(Y)'s dimension should be the same, but "
"received %d for Input(X) vs %d for Input(Y).",
x_dim.size(), y_dim.size()));
for (int i = 0; i < x_dim.size(); ++i) {
if ((!ctx->IsRuntime()) && ((x_dim[i] == -1) || (y_dim[i] == -1))) {
......@@ -44,8 +45,11 @@ class PadConstantLikeOp : public framework::OperatorWithKernel {
} else {
PADDLE_ENFORCE_GE(
x_dim[i], y_dim[i],
"expected X_dim[i] >= Y_dim[i], but received %d < %d for dim %d",
x_dim[i], y_dim[i], i);
platform::errors::InvalidArgument(
"The size of each dimension of Input(X) expected to be greater "
"than or equal to size of corresponding dimension of Input(Y) "
"(X_dim[i] >= Y_dim[i]), but received %d < %d for dimension %d",
x_dim[i], y_dim[i], i));
}
}
......@@ -157,14 +161,20 @@ class PadConstantLikeOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "PadConstantLike@Grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "PadConstantLike@Grad");
auto y_dim = ctx->GetInputDim("Y");
auto dout_dim = ctx->GetInputDim(framework::GradVarName("Out"));
PADDLE_ENFORCE_EQ(dout_dim.size(), y_dim.size(),
"The dimension of X and Y should be the same.");
PADDLE_ENFORCE_EQ(
dout_dim.size(), y_dim.size(),
platform::errors::InvalidArgument(
"Op(PadConstantLike@Grad) the size of Input(Out@Grad)'s dimension "
"and the size of Input(Y)'s dimension should be the same, but "
"received %d for Input(Out@Grad) vs %d for Input(Y).",
dout_dim.size(), y_dim.size()));
auto y_grad_name = framework::GradVarName("Y");
if (ctx->HasOutput(y_grad_name)) {
......@@ -175,10 +185,14 @@ class PadConstantLikeOpGrad : public framework::OperatorWithKernel {
if ((!ctx->IsRuntime()) && ((dout_dim[i] == -1) || (y_dim[i] == -1))) {
continue;
} else {
PADDLE_ENFORCE_GE(dout_dim[i], y_dim[i],
"expected Out_dim[i] >= Y_dim[i], but received %d "
"< %d for dim %d",
dout_dim[i], y_dim[i], i);
PADDLE_ENFORCE_GE(
dout_dim[i], y_dim[i],
platform::errors::InvalidArgument(
"The size of each dimension of Input(Out@Grad) expected to "
"be greater than or equal to size of corresponding dimension "
"of Input(Y) (Out_dim[i] >= Y_dim[i]), but received %d < %d "
"for dimension %d",
dout_dim[i], y_dim[i], i));
}
}
}
......
......@@ -25,17 +25,24 @@ class PadOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of PadOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of PadOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Pad");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Pad");
auto x_dim = ctx->GetInputDim("X");
auto& paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE_EQ(x_dim.size() * 2, int64_t(paddings.size()),
"Size of paddings should be equal to 2 * dimension size "
"of input tensor.");
PADDLE_ENFORCE_EQ(
static_cast<int>(paddings.size()), x_dim.size() * 2,
platform::errors::InvalidArgument(
"Size of 'paddings' dimension should be equal to 2 * size of "
"Input(X)'s dimension, but received (size of 'paddings' dimension "
"is) %d vs (2 * size of Input(X)'s dimension is) %d.",
static_cast<int>(paddings.size()), x_dim.size() * 2));
for (size_t i = 0; i < paddings.size(); ++i) {
PADDLE_ENFORCE_GE(paddings[i], 0, "paddings should >= 0.");
PADDLE_ENFORCE_GE(paddings[i], 0,
platform::errors::InvalidArgument(
"The element of 'paddings' should >= 0, but "
"received %d for index %d.",
paddings[i], static_cast<int>(i)));
}
std::vector<int64_t> out_dims(x_dim.size());
for (int i = 0; i < x_dim.size(); ++i) {
......
......@@ -6432,6 +6432,9 @@ def pad(x, paddings, pad_value=0., name=None):
x = fluid.data(name='data', shape=[300, 300], dtype='float32')
out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.)
"""
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], "pad")
helper = LayerHelper('pad', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
......@@ -6523,6 +6526,10 @@ def pad_constant_like(x, y, pad_value=0., name=None):
out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
# out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3]
"""
check_type(x, 'x', (Variable), 'pad_constant_like')
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
"pad_constant_like")
helper = LayerHelper('pad_constant_like', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
......@@ -8802,6 +8809,9 @@ def pad2d(input,
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
result = fluid.layers.pad2d(input=data, paddings=[0, 1, 2, 3], mode='reflect')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
"pad2d")
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
......
......@@ -15,6 +15,8 @@
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestPad2dOp(OpTest):
......@@ -124,5 +126,20 @@ class TestCase7(TestPad2dOp):
self.variable_paddings = True
class TestPad2dOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = np.random.random((2, 2, 2, 2)).astype("float32")
def test_Variable():
fluid.layers.pad2d(input=input_data, paddings=[1, 1, 1, 1])
self.assertRaises(TypeError, test_Variable)
data = fluid.data(
name='data', shape=[None, 3, 20, 20], dtype='float16')
fluid.layers.pad2d(input=data, paddings=[1, 1, 1, 1])
if __name__ == '__main__':
unittest.main()
......@@ -17,9 +17,11 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestPadOp(OpTest):
class TestPadConstantLikeOp(OpTest):
def setUp(self):
self.initTestCase()
self.op_type = "pad_constant_like"
......@@ -49,7 +51,7 @@ class TestPadOp(OpTest):
self.paddings = [(0, 13), (0, 0)]
class TestCase1(TestPadOp):
class TestCase1(TestPadConstantLikeOp):
def initTestCase(self):
self.x_shape = (4, 3, 4, 5)
self.y_shape = (2, 3, 4, 5)
......@@ -57,7 +59,7 @@ class TestCase1(TestPadOp):
self.pad_value = 0.5
class TestCase2(TestPadOp):
class TestCase2(TestPadConstantLikeOp):
def initTestCase(self):
self.x_shape = (4, 3, 4, 10)
self.y_shape = (2, 3, 2, 10)
......@@ -65,5 +67,26 @@ class TestCase2(TestPadOp):
self.pad_value = 0.5
class TestPadConstantLikeOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x_data = np.random.random((2, 2, 2, 2)).astype("float32")
y_data = np.random.random((2, 2, 2, 2)).astype("float32")
def test_Variable_x():
var_y = fluid.data(
name="data_y", shape=[2, 2, 2, 2], dtype="float32")
fluid.layers.pad_constant_like(x=x_data, y=var_y)
self.assertRaises(TypeError, test_Variable_x)
def test_Variable_y():
var_x = fluid.data(
name="data_x", shape=[2, 2, 2, 2], dtype="float32")
fluid.layers.pad_constant_like(x=var_x, y=y_data)
self.assertRaises(TypeError, test_Variable_y)
if __name__ == '__main__':
unittest.main()
......@@ -18,6 +18,8 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestPadOp(OpTest):
......@@ -95,5 +97,20 @@ create_test_fp16(TestCase1)
create_test_fp16(TestCase2)
create_test_fp16(TestCase3)
class TestPadOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = np.random.random((2, 2)).astype("float32")
def test_Variable():
fluid.layers.pad(x=input_data, paddings=[1, 1, 1, 1])
self.assertRaises(TypeError, test_Variable)
data = fluid.data(name='data', shape=[4], dtype='float16')
fluid.layers.pad(x=data, paddings=[0, 1])
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册