From 2ed84a679d031abd65ac020d2c6e0e0d818def46 Mon Sep 17 00:00:00 2001 From: littletomatodonkey <2120160898@bit.edu.cn> Date: Sat, 17 Oct 2020 15:54:10 +0800 Subject: [PATCH] Add API for pad op. (#27943) * add pad apis * rm pad2d test_layer * fix code example --- .../unittests/test_bilinear_interp_v2_op.py | 14 - .../fluid/tests/unittests/test_layers.py | 17 - .../unittests/test_nearest_interp_v2_op.py | 14 - .../fluid/tests/unittests/test_pad3d_op.py | 88 +-- python/paddle/nn/__init__.py | 16 +- python/paddle/nn/layer/__init__.py | 13 +- python/paddle/nn/layer/common.py | 733 ++---------------- 7 files changed, 119 insertions(+), 776 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py index 9fc4971fec..58312979c5 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py @@ -606,20 +606,6 @@ class TestBilinearInterpOpAPI(unittest.TestCase): self.assertTrue(np.allclose(res, expect_res)) -class TestUpsampleBilinear2dInterpOpAPI2_0(unittest.TestCase): - def test_case(self): - - # dygraph - x_data = np.random.random((1, 3, 6, 6)).astype("float32") - upsample = paddle.nn.UpsamplingBilinear2d(scale_factor=[2, 2]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) - interp = upsample(x) - expect = bilinear_interp_np( - x_data, out_h=12, out_w=12, align_corners=True) - self.assertTrue(np.allclose(interp.numpy(), expect)) - - class TestBilinearInterpOpAPI_dy(unittest.TestCase): def test_case(self): import paddle diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index ce9cc33cf9..e3f477c1d9 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -316,23 +316,6 @@ class TestLayer(LayerTest): self.assertTrue(np.allclose(static_ret, dy_ret_value)) - def test_pad2d(self): - with self.static_graph(): - t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32') - ret = layers.pad2d(t, paddings=[1, 1, 1, 1]) - static_ret = self.get_static_graph_result( - feed={'t': np.ones( - [3, 3, 5, 5], dtype='float32')}, - fetch_list=[ret])[0] - - with self.dynamic_graph(): - t = np.ones([3, 3, 5, 5], dtype='float32') - my_pad2d = paddle.nn.layer.Pad2D(paddings=1) - dy_ret = my_pad2d(base.to_variable(t)) - dy_ret_value = dy_ret.numpy() - - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - def test_matmul(self): with self.static_graph(): t = layers.data(name='t', shape=[3, 3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py index 2feca1c306..1f88568b5b 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py @@ -526,20 +526,6 @@ class TestNearestAPI(unittest.TestCase): self.assertTrue(np.allclose(results[i + 1], expect_res)) -class TestUpsampleNearest2dInterpOpAPI2_0(unittest.TestCase): - def test_case(self): - - # dygraph - x_data = np.random.random((1, 3, 6, 6)).astype("float32") - upsample = paddle.nn.UpsamplingNearest2d(scale_factor=[2, 2]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) - interp = upsample(x) - expect = nearest_neighbor_interp_np( - x_data, out_h=12, out_w=12, align_corners=False) - self.assertTrue(np.allclose(interp.numpy(), expect)) - - class TestNearestInterpException(unittest.TestCase): def test_exception(self): input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py index aa75ee9c7c..c29352bb51 100644 --- a/python/paddle/fluid/tests/unittests/test_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -314,7 +314,6 @@ class TestPadAPI(unittest.TestCase): def test_dygraph_1(self): paddle.disable_static() - input_shape = (1, 2, 3, 4, 5) pad = [1, 2, 1, 1, 3, 4] mode = "constant" @@ -342,7 +341,6 @@ class TestPadAPI(unittest.TestCase): def test_dygraph_2(self): paddle.disable_static() - input_shape = (2, 3, 4, 5) pad = [1, 1, 3, 4] mode = "constant" @@ -370,38 +368,8 @@ class TestPadAPI(unittest.TestCase): self.assertTrue(np.allclose(y1.numpy(), np_out1)) self.assertTrue(np.allclose(y2.numpy(), np_out2)) - def test_dygraph_2(self): - paddle.disable_static() - - input_shape = (2, 3, 4, 5) - pad = [1, 1, 3, 4] - mode = "constant" - value = 100 - input_data = np.random.rand(*input_shape).astype(np.float32) - np_out1 = self._get_numpy_out( - input_data, pad, mode, value, data_format="NCHW") - np_out2 = self._get_numpy_out( - input_data, pad, mode, value, data_format="NHWC") - tensor_data = paddle.to_tensor(input_data) - tensor_pad = paddle.to_tensor(pad, dtype="int32") - - y1 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NCHW") - y2 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NHWC") - - self.assertTrue(np.allclose(y1.numpy(), np_out1)) - self.assertTrue(np.allclose(y2.numpy(), np_out2)) - def test_dygraph_3(self): paddle.disable_static() - input_shape = (3, 4, 5) pad = [3, 4] mode = "constant" @@ -455,6 +423,8 @@ class TestPad1dAPI(unittest.TestCase): out = np.pad(input_data, pad, mode=mode) elif mode == "replicate": out = np.pad(input_data, pad, mode="edge") + elif mode == "circular": + out = np.pad(input_data, pad, mode="wrap") return out @@ -471,9 +441,10 @@ class TestPad1dAPI(unittest.TestCase): value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - pad_reflection = nn.ReflectionPad1d(padding=pad) - pad_replication = nn.ReplicationPad1d(padding=pad) - pad_constant = nn.ConstantPad1d(padding=pad, value=value) + pad_reflection = nn.Pad1D(padding=pad, mode="reflect") + pad_replication = nn.Pad1D(padding=pad, mode="replicate") + pad_constant = nn.Pad1D(padding=pad, mode="constant", value=value) + pad_circular = nn.Pad1D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) @@ -492,6 +463,11 @@ class TestPad1dAPI(unittest.TestCase): input_data, pad, "constant", value=value, data_format="NCL") self.assertTrue(np.allclose(output.numpy(), np_out)) + output = pad_circular(data) + np_out = self._get_numpy_out( + input_data, pad, "circular", value=value, data_format="NCL") + self.assertTrue(np.allclose(output.numpy(), np_out)) + class TestPad2dAPI(unittest.TestCase): def _get_numpy_out(self, @@ -521,6 +497,8 @@ class TestPad2dAPI(unittest.TestCase): out = np.pad(input_data, pad, mode=mode) elif mode == "replicate": out = np.pad(input_data, pad, mode="edge") + elif mode == "circular": + out = np.pad(input_data, pad, mode="wrap") return out @@ -537,10 +515,10 @@ class TestPad2dAPI(unittest.TestCase): value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - pad_reflection = nn.ReflectionPad2d(padding=pad) - pad_replication = nn.ReplicationPad2d(padding=pad) - pad_constant = nn.ConstantPad2d(padding=pad, value=value) - pad_zero = nn.ZeroPad2d(padding=pad) + pad_reflection = nn.Pad2D(padding=pad, mode="reflect") + pad_replication = nn.Pad2D(padding=pad, mode="replicate") + pad_constant = nn.Pad2D(padding=pad, mode="constant", value=value) + pad_circular = nn.Pad2D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) @@ -559,9 +537,9 @@ class TestPad2dAPI(unittest.TestCase): input_data, pad, "constant", value=value, data_format="NCHW") self.assertTrue(np.allclose(output.numpy(), np_out)) - output = pad_zero(data) + output = pad_circular(data) np_out = self._get_numpy_out( - input_data, pad, "constant", value=0, data_format="NCHW") + input_data, pad, "circular", data_format="NCHW") self.assertTrue(np.allclose(output.numpy(), np_out)) @@ -595,6 +573,8 @@ class TestPad3dAPI(unittest.TestCase): out = np.pad(input_data, pad, mode=mode) elif mode == "replicate": out = np.pad(input_data, pad, mode="edge") + elif mode == "circular": + out = np.pad(input_data, pad, mode="wrap") return out @@ -611,11 +591,18 @@ class TestPad3dAPI(unittest.TestCase): value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - pad_replication = nn.ReplicationPad3d(padding=pad) - pad_constant = nn.ConstantPad3d(padding=pad, value=value) + pad_reflection = nn.Pad3D(padding=pad, mode="reflect") + pad_replication = nn.Pad3D(padding=pad, mode="replicate") + pad_constant = nn.Pad3D(padding=pad, mode="constant", value=value) + pad_circular = nn.Pad3D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) + output = pad_reflection(data) + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCDHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + output = pad_replication(data) np_out = self._get_numpy_out( input_data, pad, "replicate", data_format="NCDHW") @@ -626,6 +613,11 @@ class TestPad3dAPI(unittest.TestCase): input_data, pad, "constant", value=value, data_format="NCDHW") self.assertTrue(np.allclose(output.numpy(), np_out)) + output = pad_circular(data) + np_out = self._get_numpy_out( + input_data, pad, "circular", data_format="NCDHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + class TestPad3dOpError(unittest.TestCase): def test_errors(self): @@ -673,32 +665,30 @@ class TestPad3dOpError(unittest.TestCase): class TestPadDataformatError(unittest.TestCase): def test_errors(self): def test_ncl(): - paddle.disable_static(paddle.CPUPlace()) input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) data = np.arange( np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCL") + my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCL") data = paddle.to_tensor(data) result = my_pad(data) def test_nchw(): - paddle.disable_static(paddle.CPUPlace()) input_shape = (1, 2, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) data = np.arange( np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCHW") + my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCHW") data = paddle.to_tensor(data) result = my_pad(data) def test_ncdhw(): - paddle.disable_static(paddle.CPUPlace()) input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) data = np.arange( np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCDHW") + my_pad = nn.Pad1D( + padding=pad, mode="replicate", data_format="NCDHW") data = paddle.to_tensor(data) result = my_pad(data) diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 6af59465be..1d626c38c2 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -71,22 +71,16 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS from .layer.activation import ThresholdedReLU #DEFINE_ALIAS from .layer.activation import LogSoftmax #DEFINE_ALIAS from .layer.activation import Maxout #DEFINE_ALIAS -from .layer.common import ReflectionPad1d #DEFINE_ALIAS -from .layer.common import ReplicationPad1d #DEFINE_ALIAS -from .layer.common import ConstantPad1d #DEFINE_ALIAS -from .layer.common import ReflectionPad2d #DEFINE_ALIAS -from .layer.common import ReplicationPad2d #DEFINE_ALIAS -from .layer.common import ConstantPad2d #DEFINE_ALIAS -from .layer.common import ZeroPad2d #DEFINE_ALIAS -from .layer.common import ReplicationPad3d #DEFINE_ALIAS -from .layer.common import ConstantPad3d #DEFINE_ALIAS +from .layer.common import BilinearTensorProduct #DEFINE_ALIAS +from .layer.common import Pool2D #DEFINE_ALIAS +from .layer.common import Pad1D #DEFINE_ALIAS +from .layer.common import Pad2D #DEFINE_ALIAS +from .layer.common import Pad3D #DEFINE_ALIAS from .layer.common import CosineSimilarity #DEFINE_ALIAS from .layer.common import Embedding #DEFINE_ALIAS from .layer.common import Linear #DEFINE_ALIAS from .layer.common import Flatten #DEFINE_ALIAS from .layer.common import Upsample #DEFINE_ALIAS -from .layer.common import UpsamplingNearest2d #DEFINE_ALIAS -from .layer.common import UpsamplingBilinear2d #DEFINE_ALIAS from .layer.common import Bilinear #DEFINE_ALIAS from .layer.common import Dropout #DEFINE_ALIAS from .layer.common import Dropout2d #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index afd2cc3a23..1defed3362 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -44,23 +44,14 @@ from .activation import LogSoftmax #DEFINE_ALIAS from .common import BilinearTensorProduct #DEFINE_ALIAS from .common import Bilinear #DEFINE_ALIAS from .common import Pool2D #DEFINE_ALIAS +from .common import Pad1D #DEFINE_ALIAS from .common import Pad2D #DEFINE_ALIAS -from .common import ReflectionPad1d #DEFINE_ALIAS -from .common import ReplicationPad1d #DEFINE_ALIAS -from .common import ConstantPad1d #DEFINE_ALIAS -from .common import ReflectionPad2d #DEFINE_ALIAS -from .common import ReplicationPad2d #DEFINE_ALIAS -from .common import ConstantPad2d #DEFINE_ALIAS -from .common import ZeroPad2d #DEFINE_ALIAS -from .common import ReplicationPad3d #DEFINE_ALIAS -from .common import ConstantPad3d #DEFINE_ALIAS +from .common import Pad3D #DEFINE_ALIAS from .common import CosineSimilarity #DEFINE_ALIAS from .common import Embedding #DEFINE_ALIAS from .common import Linear #DEFINE_ALIAS from .common import Flatten #DEFINE_ALIAS from .common import Upsample #DEFINE_ALIAS -from .common import UpsamplingNearest2d #DEFINE_ALIAS -from .common import UpsamplingBilinear2d #DEFINE_ALIAS from .common import Dropout #DEFINE_ALIAS from .common import Dropout2d #DEFINE_ALIAS from .common import Dropout3d #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index bf2c58d45c..71bddefdb1 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -27,18 +27,9 @@ __all__ = [ 'Embedding', 'Linear', 'Upsample', + 'Pad1D', 'Pad2D', - 'UpsamplingNearest2d', - 'UpsamplingBilinear2d', - 'ReflectionPad1d', - 'ReplicationPad1d', - 'ConstantPad1d', - 'ReflectionPad2d', - 'ReplicationPad2d', - 'ConstantPad2d', - 'ZeroPad2d', - 'ConstantPad3d', - 'ReplicationPad3d', + 'Pad3D', 'CosineSimilarity', 'Dropout', 'Dropout2d', @@ -389,254 +380,6 @@ class Upsample(layers.Layer): return out -class UpsamplingNearest2d(layers.Layer): - """ - This op upsamples a batch of images, using nearest neighbours' pixel values. - The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w), - where in_w is width of the input tensor, in_h is the height of the input tensor. - And the upsampling only applies on the two dimensions(height and width). - - Nearest neighbor interpolation is to perform nearest neighbor interpolation - in both the 3rd dimension(in height direction) and the 4th dimension(in width - direction) on input tensor. - - For details of nearest neighbor interpolation, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation. - - x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8, - its data format is specified by :attr:`data_format`. - size (list|tuple|Tensor|None): Output shape of image resize - layer, the shape is (out_h, out_w) when input is a 4-D Tensor. - Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1]. - If a Tensor Variable, its dimensions size should be a 1. - scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At - least one of :attr:`size` or :attr:`scale_factor` must be set. - And :attr:`size` has a higher priority than :attr:`scale_factor`. - Has to match input size if it is either a list or a tuple or a Tensor. - Default: None. - data_format (str, optional): Specify the data format of the input, and the data format of the output - will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`, - `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored - in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. - name(str, optional): The default value is None. - Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name` - Returns: - A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels), - Raises: - TypeError: size should be a list or tuple or Tensor. - ValueError: 'nearest' only support 4-D tensor. - ValueError: One of size and scale_factor must not be None. - ValueError: size length should be 2 for input 4-D tensor. - ValueError: scale_factor should be greater than zero. - ValueError: data_format can only be 'NCHW', 'NHWC'. - Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_data = np.random.rand(2,3,6,10).astype("float32") - upsample_out = paddle.nn.UpsamplingNearest2d(size=[12,12]) - - input = paddle.to_tensor(input_data) - output = upsample_out(x=input) - print(output.shape) - # [2L, 3L, 12L, 12L] - - """ - - def __init__(self, - size=None, - scale_factor=None, - data_format='NCHW', - name=None): - super(UpsamplingNearest2d, self).__init__() - self.size = size - self.scale_factor = scale_factor - self.data_format = data_format - self.name = name - - def forward(self, x): - out = F.interpolate( - x, - size=self.size, - scale_factor=self.scale_factor, - mode='nearest', - align_corners=False, - align_mode=0, - data_format=self.data_format, - name=self.name) - - return out - - -class UpsamplingBilinear2d(layers.Layer): - """ - This op upsamples a batch of images, using bilinear' pixel values. - The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w), - where in_w is width of the input tensor, in_h is the height of the input tensor. - And the upsampling only applies on the two dimensions(height and width). - - Bilinear interpolation is an extension of linear interpolation for - interpolating functions of two variables (e.g. H-direction and - W-direction in this op) on a rectilinear 2D grid. The key idea is - to perform linear interpolation first in one direction, and then - again in the other direction. - - For details of bilinear interpolation, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Bilinear_interpolation. - - x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8, - its data format is specified by :attr:`data_format`. - size (list|tuple|Tensor|None): Output shape of image resize - layer, the shape is (out_h, out_w) when input is a 4-D Tensor. - Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1]. - If a Tensor Variable, its dimensions size should be a 1. - scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At - least one of :attr:`size` or :attr:`scale_factor` must be set. - And :attr:`size` has a higher priority than :attr:`scale_factor`. - Has to match input size if it is either a list or a tuple or a Tensor. - Default: None. - data_format (str, optional): Specify the data format of the input, and the data format of the output - will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`, - `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored - in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. - name(str, optional): The default value is None. - Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name` - Returns: - A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels), - Raises: - TypeError: size should be a list or tuple or Tensor. - ValueError: 'bilinear' only support 4-D tensor. - ValueError: One of size and scale_factor must not be None. - ValueError: size length should be 2 for input 4-D tensor. - ValueError: scale_factor should be greater than zero. - ValueError: data_format can only be 'NCHW', 'NHWC'. - Examples: - .. code-block:: python - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_data = np.random.rand(2,3,6,10).astype("float32") - upsample_out = paddle.nn.UpsamplingBilinear2d(size=[12,12]) - - input = paddle.to_tensor(input_data) - output = upsample_out(x=input) - print(output.shape) - # [2L, 3L, 12L, 12L] - """ - - def __init__(self, - size=None, - scale_factor=None, - data_format='NCHW', - name=None): - super(UpsamplingBilinear2d, self).__init__() - self.size = size - self.scale_factor = scale_factor - self.data_format = data_format - self.name = name - - def forward(self, x): - out = F.interpolate( - x, - size=self.size, - scale_factor=self.scale_factor, - mode='bilinear', - align_corners=True, - align_mode=0, - data_format=self.data_format, - name=self.name) - - return out - - -class Pad2D(layers.Layer): - """ - This interface is used to construct a callable object of the ``Pad2D`` class. - The Pad2D layer pads the input tensor boundaries according to 'paddings' and 'mode'. - If mode is 'reflect', paddings[0] and paddings[1] must be no greater - than height-1. And the width dimension has the same condition. - Parameters: - paddings (int | List[int32]): The padding size. If padding is a int, uses the same - padding in all boundaries, if padding is a List, it must contain four integers, - (padding_top, padding_bottom, padding_left, padding_right). - Default is [0, 0, 0, 0]. - mode (str): Three modes: 'constant' (default), 'reflect', 'edge' . - When in 'constant' mode, this op uses a constant value to pad the input tensor. - When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. - When in 'edge' mode, uses input boundaries to pad the input tensor. - Default is 'constant' - pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0 - data_format (str): An string from: "NHWC", "NCHW". Specify the data format of - the input data. - Default is "NCHW" - Returns: - None - Examples: - .. code-block:: text - Input = [[[[1., 2., 3.], - [4., 5., 6.]]]] - Case 0: - paddings = [0, 1, 2, 3], - mode = 'constant' - pad_value = 0 - Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.], - [0., 0., 4., 5., 6., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0.]]]] - Case 1: - paddings = [0, 1, 2, 1], - mode = 'reflect' - Out = [[[[3., 2., 1., 2., 3., 2.], - [6., 5., 4., 5., 6., 5.], - [3., 2., 1., 2., 3., 2.]]]] - Case 2: - paddings = [0, 1, 2, 1], - mode = 'edge' - Out = [[[[1., 1., 1., 2., 3., 3.], - [4., 4., 4., 5., 6., 6.], - [4., 4., 4., 5., 6., 6.]]]] - Code Examples: - .. code-block:: python - import paddle.fluid as fluid - import paddle.nn as nn - import numpy as np - data = np.ones((2, 2, 2, 2)).astype('float32') - my_pad = nn.layer.Pad2D(paddings=[1, 1, 1, 1]) - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(data) - result = my_pad(data) - """ - - def __init__(self, - paddings=0, - mode='constant', - pad_value=0.0, - data_format="NCHW"): - super(Pad2D, self).__init__() - self._mode = mode - self._pad_value = pad_value - self._data_format = data_format - self._paddings = [paddings] * 4 if isinstance(paddings, - int) else paddings - - def forward(self, input): - return paddle.fluid.layers.pad2d( - input, - paddings=self._paddings, - mode=self._mode, - pad_value=self._pad_value, - data_format=self._data_format) - - class Bilinear(layers.Layer): """ @@ -960,132 +703,21 @@ class AlphaDropout(layers.Layer): return out -class ReflectionPad1d(layers.Layer): +class Pad1D(layers.Layer): """ - This interface is used to construct a callable object of the ``ReflectionPad1d`` class. - Uses reflection of the input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right). - data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. - Default is "NCL" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[1., 2., 3.], - [4., 5., 6.]]] - padding = [1, 2], - Out = [[[2. 1. 2. 3. 2. 1.] - [5. 4. 5. 6. 5. 4.]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 2, 3) - pad = [1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReflectionPad1d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[2. 1. 2. 3. 2. 1.] - # [5. 4. 5. 6. 5. 4.]]] - """ - - def __init__(self, padding, data_format="NCL", name=None): - super(ReflectionPad1d, self).__init__() - self._mode = "reflect" - self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ReplicationPad1d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReplicationPad1d`` class. - Uses input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right). - data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. - Default is "NCL" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[1., 2., 3.], - [4., 5., 6.]]] - padding = [1, 2], - Out = [[[2. 1. 2. 3. 2. 1.] - [5. 4. 5. 6. 5. 4.]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 2, 3) - pad = [1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[1. 1. 2. 3. 3. 3.] - # [1. 4. 5. 6. 6. 6.]]] - """ - - def __init__(self, padding, data_format="NCL", name=None): - super(ReplicationPad1d, self).__init__() - self._mode = "replicate" - self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ConstantPad1d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ConstantPad1d`` class. - Uses a constant value to pad the input tensor. + This interface is used to construct a callable object of the ``Pad1D`` class. + Pad tensor according to 'pad', 'mode' and 'value'. + If mode is 'reflect', pad[0] and pad[1] must be no greater than width-1. Parameters: padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions of input will be padded. The pad has the form (pad_left, pad_right). + mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'replicate' mode, uses input boundaries to pad the input tensor. + When in 'circular' mode, uses circular input to pad the input tensor. + Default is 'constant'. value (float32): The value to fill the padded areas. Default is 0.0 data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. Default is "NCL" @@ -1101,6 +733,7 @@ class ConstantPad1d(layers.Layer): x = [[[1., 2., 3.], [4., 5., 6.]]] padding = [1, 2], + mode = "constant" value = 0.0 Out = [[[0. 1. 2. 3. 0. 0.] [0. 4. 5. 6. 0. 0.]]] @@ -1115,21 +748,26 @@ class ConstantPad1d(layers.Layer): input_shape = (1, 2, 3) pad = [1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ConstantPad1d(padding=pad) - data = paddle.to_tensor(data) + mode = "constant" + data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1 + my_pad = nn.Pad1D(padding=pad, mode=mode) result = my_pad(data) print(result.numpy()) # [[[0. 1. 2. 3. 0. 0.] # [0. 4. 5. 6. 0. 0.]]] """ - def __init__(self, padding, value=0.0, data_format="NCL", name=None): - super(ConstantPad1d, self).__init__() - self._mode = "constant" - self._data_format = data_format + def __init__(self, + padding, + mode='constant', + value=0.0, + data_format="NCL", + name=None): + super(Pad1D, self).__init__() self._pad = padding + self._mode = mode self._value = value + self._data_format = data_format self._name = name def forward(self, x): @@ -1141,14 +779,22 @@ class ConstantPad1d(layers.Layer): name=self._name) -class ConstantPad2d(layers.Layer): +class Pad2D(layers.Layer): """ - This interface is used to construct a callable object of the ``ConstantPad2d`` class. - Uses a constant value to pad the input tensor. + This interface is used to construct a callable object of the ``Pad2D`` class. + Pad tensor according to 'pad', 'mode' and 'value'. + If mode is 'reflect', pad[0] and pad[1] must be no greater + than width-1. The height dimension has the same condition. Parameters: padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). + mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'replicate' mode, uses input boundaries to pad the input tensor. + When in 'circular' mode, uses circular input to pad the input tensor. + Default is 'constant'. value (float32): The value to fill the padded areas. Default is 0.0 data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. Default is "NCHW" @@ -1164,6 +810,7 @@ class ConstantPad2d(layers.Layer): x = [[[[1., 2., 3.], [4., 5., 6.]]]] padding = [1, 1, 0, 0] + mode = "constant" value = 0.0 Out = [[[[0. 1. 2. 3. 0.] [0. 4. 5. 6. 0.]]]] @@ -1175,12 +822,11 @@ class ConstantPad2d(layers.Layer): import paddle.nn as nn import numpy as np paddle.disable_static() - input_shape = (1, 1, 2, 3) pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ConstantPad2d(padding=pad) - data = paddle.to_tensor(data) + mode = "constant" + data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1 + my_pad = nn.Pad2D(padding=pad, mode=mode) result = my_pad(data) print(result.numpy()) # [[[[0. 0. 0. 0.] @@ -1190,219 +836,44 @@ class ConstantPad2d(layers.Layer): # [0. 0. 0. 0.]]]] """ - def __init__(self, padding, value=0.0, data_format="NCHW", name=None): - super(ConstantPad2d, self).__init__() - self._mode = "constant" - self._data_format = data_format + def __init__(self, + padding, + mode='constant', + value=0.0, + data_format="NCHW", + name=None): + super(Pad2D, self).__init__() self._pad = padding + self._mode = mode self._value = value - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) - - -class ZeroPad2d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ZeroPad2d`` class. - Uses 0 to pad the input tensor. - - Parameters: - padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). - data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. - Default is "NCHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[1., 2., 3.], - [4., 5., 6.]]]] - padding = [1, 1, 0, 0] - Out = [[[[0. 1. 2. 3. 0.] - [0. 4. 5. 6. 0.]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 2, 3) - pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ZeroPad2d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[0. 0. 0. 0.] - # [0. 1. 2. 3.] - # [0. 4. 5. 6.] - # [0. 0. 0. 0.] - # [0. 0. 0. 0.]]]] - """ - - def __init__(self, padding, data_format="NCHW", name=None): - super(ZeroPad2d, self).__init__() - self._mode = "constant" self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ReplicationPad2d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReplicationPad2d`` class. - Uses input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). - data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. - Default is "NCHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[1., 2., 3.], - [4., 5., 6.]]]] - padding = [1, 1, 0, 0] - Out = [[[[1. 1. 2. 3. 3.] - [4. 4. 5. 6. 6.]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 2, 3) - pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad2d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[1. 1. 2. 3.] - # [1. 1. 2. 3.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.]]]] - """ - - def __init__(self, padding, data_format="NCHW", name=None): - super(ReplicationPad2d, self).__init__() - self._mode = "replicate" - self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ReflectionPad2d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReflectionPad2d`` class. - Uses reflection of the input boundaries to pad the input tensor. - - Parameters: - padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). - data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. - Default is "NCHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[1., 2., 3.], - [4., 5., 6.]]]] - padding = [1, 1, 0, 0] - Out = [[[[2. 1. 2. 3. 2.] - [5. 4. 5. 6. 5.]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 4, 3) - pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReflectionPad2d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[ 5. 4. 5. 6.] - # [ 2. 1. 2. 3.] - # [ 5. 4. 5. 6.] - # [ 8. 7. 8. 9.] - # [11. 10. 11. 12.] - # [ 8. 7. 8. 9.] - # [ 5. 4. 5. 6.]]]] - """ - - def __init__(self, padding, data_format="NCHW", name=None): - super(ReflectionPad2d, self).__init__() - self._mode = "reflect" - self._data_format = data_format - self._pad = padding self._name = name def forward(self, x): return F.pad(x, pad=self._pad, mode=self._mode, + value=self._value, data_format=self._data_format, name=self._name) -class ConstantPad3d(layers.Layer): +class Pad3D(layers.Layer): """ - This interface is used to construct a callable object of the ``ConstantPad3d`` class. - Uses a constant value to pad the input tensor. + This interface is used to construct a callable object of the ``Pad3D`` class. + Pad tensor according to 'pad', 'mode' and 'value'. + If mode is 'reflect', pad[0] and pad[1] must be no greater + than width-1. The height and depth dimension has the same condition. Parameters: padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). + mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'replicate' mode, uses input boundaries to pad the input tensor. + When in 'circular' mode, uses circular input to pad the input tensor. + Default is 'constant'. value (float32): The value to fill the padded areas. Default is 0.0 data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data. Default is "NCDHW" @@ -1418,6 +889,7 @@ class ConstantPad3d(layers.Layer): x = [[[[[1., 2., 3.], [4., 5., 6.]]]]] padding = [1, 2, 0, 0, 0, 0] + mode = "constant" value = 0.0 Out = [[[[[0. 1. 2. 3. 0. 0.] [0. 4. 5. 6. 0. 0.]]]]] @@ -1428,13 +900,11 @@ class ConstantPad3d(layers.Layer): import paddle import paddle.nn as nn import numpy as np - paddle.disable_static() - input_shape = (1, 1, 1, 2, 3) pad = [1, 0, 1, 2, 0, 0] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ConstantPad3d(padding=pad) - data = paddle.to_tensor(data) + mode = "constant" + data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1 + my_pad = nn.Pad3D(padding=pad, mode=mode) result = my_pad(data) print(result.numpy()) # [[[[[0. 0. 0. 0.] @@ -1444,81 +914,24 @@ class ConstantPad3d(layers.Layer): # [0. 0. 0. 0.]]]]] """ - def __init__(self, padding, value=0.0, data_format="NCDHW", name=None): - super(ConstantPad3d, self).__init__() - self._mode = "constant" - self._data_format = data_format + def __init__(self, + padding, + mode='constant', + value=0.0, + data_format="NCDHW", + name=None): + super(Pad3D, self).__init__() self._pad = padding + self._mode = mode self._value = value - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) - - -class ReplicationPad3d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReplicationPad3d`` class. - Uses input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). - data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data. - Default is "NCDHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[[1., 2., 3.], - [4., 5., 6.]]]]] - padding = [1, 2, 0, 0, 0, 0] - Out = [[[[[1. 1. 2. 3. 3. 3.] - [4. 4. 5. 6. 6. 6.]]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 1, 2, 3) - pad = [1, 0, 1, 2, 0, 0] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad3d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[[1. 1. 2. 3.] - # [1. 1. 2. 3.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.]]]]] - """ - - def __init__(self, padding, data_format="NCDHW", name=None): - super(ReplicationPad3d, self).__init__() - self._mode = "replicate" self._data_format = data_format - self._pad = padding self._name = name def forward(self, x): return F.pad(x, pad=self._pad, mode=self._mode, + value=self._value, data_format=self._data_format, name=self._name) -- GitLab