未验证 提交 248e27b7 编写于 作者: littletomatodonkey's avatar littletomatodonkey 提交者: GitHub

fix pad outliers err (#34979)

* fix pad outliers err

* fix pad api input type and doc

* fix example of pad

* add unittest for pad3d

* fix unittest

* fix error format

* fix pad doc
上级 40d4d834
...@@ -567,6 +567,13 @@ class Pad3dCPUKernel : public framework::OpKernel<T> { ...@@ -567,6 +567,13 @@ class Pad3dCPUKernel : public framework::OpKernel<T> {
in_width, pads[1])); in_width, pads[1]));
} }
if (mode == "circular") {
PADDLE_ENFORCE_NE(
in_depth * in_height * in_width, 0,
platform::errors::InvalidArgument(
"The input tensor size can not be 0 for circular padding mode."));
}
const int pad_left = pads[0]; const int pad_left = pads[0];
const int pad_top = pads[2]; const int pad_top = pads[2];
const int pad_front = pads[4]; const int pad_front = pads[4];
......
...@@ -620,6 +620,13 @@ class Pad3dCUDAKernel : public framework::OpKernel<T> { ...@@ -620,6 +620,13 @@ class Pad3dCUDAKernel : public framework::OpKernel<T> {
in_width, pads[1])); in_width, pads[1]));
} }
if (mode == "circular") {
PADDLE_ENFORCE_NE(
in_depth * in_height * in_width, 0,
platform::errors::InvalidArgument(
"The input tensor size can not be 0 for circular padding mode."));
}
const int pad_left = pads[0]; const int pad_left = pads[0];
const int pad_top = pads[2]; const int pad_top = pads[2];
const int pad_front = pads[4]; const int pad_front = pads[4];
......
...@@ -682,46 +682,64 @@ class TestPad3dAPI(unittest.TestCase): ...@@ -682,46 +682,64 @@ class TestPad3dAPI(unittest.TestCase):
class TestPad3dOpError(unittest.TestCase): class TestPad3dOpError(unittest.TestCase):
def setUp(self):
self.places = [paddle.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(paddle.CUDAPlace(0))
def test_errors(self): def test_errors(self):
def test_variable(): def test_variable():
input_shape = (1, 2, 3, 4, 5) input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32) data = np.random.rand(*input_shape).astype(np.float32)
F.pad(x=data, paddings=[1, 1, 1, 1, 1, 1]) y = F.pad(x=data, pad=[1, 1, 1, 1, 1, 1], data_format="NCDHW")
def test_reflect_1(): def test_reflect_1():
input_shape = (1, 2, 3, 4, 5) input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32) data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.fluid.data(name="x", shape=input_shape) x = paddle.to_tensor(data)
y = F.pad(x, pad=[5, 6, 1, 1, 1, 1], value=1, mode='reflect') y = F.pad(x,
place = paddle.CPUPlace() pad=[5, 6, 1, 1, 1, 1],
exe = Executor(place) value=1,
outputs = exe.run(feed={'x': data}, fetch_list=[y.name]) mode='reflect',
data_format="NCDHW")
def test_reflect_2(): def test_reflect_2():
input_shape = (1, 2, 3, 4, 5) input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32) data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.fluid.data(name="x", shape=input_shape) x = paddle.to_tensor(data)
y = F.pad(x, pad=[1, 1, 4, 3, 1, 1], value=1, mode='reflect') y = F.pad(x,
place = paddle.CPUPlace() pad=[1, 1, 4, 3, 1, 1],
exe = Executor(place) value=1,
outputs = exe.run(feed={'x': data}, fetch_list=[y.name]) mode='reflect',
data_format="NCDHW")
def test_reflect_3(): def test_reflect_3():
input_shape = (1, 2, 3, 4, 5) input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32) data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.fluid.data(name="x", shape=input_shape) x = paddle.to_tensor(data)
y = F.pad(x, pad=[1, 1, 1, 1, 2, 3], value=1, mode='reflect') y = F.pad(x,
place = paddle.CPUPlace() pad=[1, 1, 1, 1, 2, 3],
exe = Executor(place) value=1,
outputs = exe.run(feed={'x': data}, fetch_list=[y.name]) mode='reflect',
data_format="NCDHW")
self.assertRaises(TypeError, test_variable) def test_circular_1():
input_shape = (1, 2, 0, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.to_tensor(data)
y = F.pad(x,
pad=[1, 1, 1, 1, 2, 3],
mode='circular',
data_format="NCDHW")
paddle.disable_static()
for place in self.places:
self.assertRaises(ValueError, test_variable)
self.assertRaises(Exception, test_reflect_1) self.assertRaises(Exception, test_reflect_1)
self.assertRaises(Exception, test_reflect_2) self.assertRaises(Exception, test_reflect_2)
self.assertRaises(Exception, test_reflect_3) self.assertRaises(Exception, test_reflect_3)
self.assertRaises(Exception, test_circular_1)
paddle.enable_static()
class TestPadDataformatError(unittest.TestCase): class TestPadDataformatError(unittest.TestCase):
......
...@@ -1160,12 +1160,13 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1160,12 +1160,13 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
Parameters: Parameters:
x (Tensor): The input tensor with data type float32/double/int32/int64_t. x (Tensor): The input tensor with data type float32/double/int32/int64_t.
pad (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions pad (Tensor | List[int] | Tuple[int]): The padding size with data type int.
of input will be padded. 1. If input dimension is 3, then the pad has the form (pad_left, If mode is 'constant' and length of pad is twice as length of x dimension, then x will
be padded from the first dimension to the last dimension.
Else: 1. If input dimension is 3, then the pad has the form (pad_left,
pad_right). 2. If the input dimension is 4, then the pad has the form (pad_left, pad_right, pad_right). 2. If the input dimension is 4, then the pad has the form (pad_left, pad_right,
pad_top, pad_bottom). 3. If the input dimension is 5, then the pad has the form pad_top, pad_bottom). 3. If the input dimension is 5, then the pad has the form
(pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
When in 'constant' mode, this op uses a constant value to pad the input tensor. When in 'constant' mode, this op uses a constant value to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
...@@ -1189,6 +1190,15 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1189,6 +1190,15 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
[4., 5., 6.]]]]] [4., 5., 6.]]]]]
Case 0: Case 0:
pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
mode = 'constant'
value = 0
Out = [[[[[0., 0., 0.],
[1., 2., 3.],
[4., 5., 6.],
[0., 0., 0.]]]]]
Case 1:
pad = [2, 2, 1, 1, 0, 0], pad = [2, 2, 1, 1, 0, 0],
mode = 'constant' mode = 'constant'
value = 0 value = 0
...@@ -1197,7 +1207,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1197,7 +1207,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
[0. 0. 4. 5. 6. 0. 0.] [0. 0. 4. 5. 6. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]]]]] [0. 0. 0. 0. 0. 0. 0.]]]]]
Case 1: Case 2:
pad = [2, 2, 1, 1, 0, 0], pad = [2, 2, 1, 1, 0, 0],
mode = 'reflect' mode = 'reflect'
Out = [[[[[6. 5. 4. 5. 6. 5. 4.] Out = [[[[[6. 5. 4. 5. 6. 5. 4.]
...@@ -1205,7 +1215,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1205,7 +1215,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
[6. 5. 4. 5. 6. 5. 4.] [6. 5. 4. 5. 6. 5. 4.]
[3. 2. 1. 2. 3. 2. 1.]]]]] [3. 2. 1. 2. 3. 2. 1.]]]]]
Case 2: Case 3:
pad = [2, 2, 1, 1, 0, 0], pad = [2, 2, 1, 1, 0, 0],
mode = 'replicate' mode = 'replicate'
Out = [[[[[1. 1. 1. 2. 3. 3. 3.] Out = [[[[[1. 1. 1. 2. 3. 3. 3.]
...@@ -1213,7 +1223,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1213,7 +1223,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
[4. 4. 4. 5. 6. 6. 6.] [4. 4. 4. 5. 6. 6. 6.]
[4. 4. 4. 5. 6. 6. 6.]]]]] [4. 4. 4. 5. 6. 6. 6.]]]]]
Case 3: Case 4:
pad = [2, 2, 1, 1, 0, 0], pad = [2, 2, 1, 1, 0, 0],
mode = 'circular' mode = 'circular'
Out = [[[[[5. 6. 4. 5. 6. 4. 5.] Out = [[[[[5. 6. 4. 5. 6. 4. 5.]
...@@ -1231,11 +1241,18 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1231,11 +1241,18 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
# example 1 # example 1
x_shape = (1, 1, 3) x_shape = (1, 1, 3)
x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1 x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
y = F.pad(x, [2, 3], value=1, mode='constant', data_format="NCL") y = F.pad(x, [0, 0, 0, 0, 2, 3], value=1, mode='constant', data_format="NCL")
print(y) print(y)
# [[[1. 1. 1. 2. 3. 1. 1. 1.]]] # [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
# example 2 # example 2
x_shape = (1, 1, 3)
x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
y = F.pad(x, [2, 3], value=1, mode='constant', data_format="NCL")
print(y)
# [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
# example 3
x_shape = (1, 1, 2, 3) x_shape = (1, 1, 2, 3)
x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1 x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
y = F.pad(x, [1, 2, 1, 1], value=1, mode='circular') y = F.pad(x, [1, 2, 1, 1], value=1, mode='circular')
...@@ -1295,6 +1312,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1295,6 +1312,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
unsqueezed_dim = [1] unsqueezed_dim = [1]
x = unsqueeze(x, axis=unsqueezed_dim) x = unsqueeze(x, axis=unsqueezed_dim)
else: else:
pad = list(pad)
if data_format in ["NCL", "NCHW", "NCDHW"]: if data_format in ["NCL", "NCHW", "NCDHW"]:
data_format = "NCDHW" data_format = "NCDHW"
if x_dim == 3: if x_dim == 3:
......
...@@ -24,7 +24,7 @@ __all__ = [] ...@@ -24,7 +24,7 @@ __all__ = []
def _npairs(x, n): def _npairs(x, n):
if isinstance(x, (paddle.Tensor, list)): if isinstance(x, (paddle.Tensor, list, tuple)):
return x return x
x = [x] * (n * 2) x = [x] * (n * 2)
return x return x
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册