diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py index 9fc4971fec23923a40080613612d3a1843a86d2e..58312979c523bdddaba9f3c4f612426aee65266f 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py @@ -606,20 +606,6 @@ class TestBilinearInterpOpAPI(unittest.TestCase): self.assertTrue(np.allclose(res, expect_res)) -class TestUpsampleBilinear2dInterpOpAPI2_0(unittest.TestCase): - def test_case(self): - - # dygraph - x_data = np.random.random((1, 3, 6, 6)).astype("float32") - upsample = paddle.nn.UpsamplingBilinear2d(scale_factor=[2, 2]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) - interp = upsample(x) - expect = bilinear_interp_np( - x_data, out_h=12, out_w=12, align_corners=True) - self.assertTrue(np.allclose(interp.numpy(), expect)) - - class TestBilinearInterpOpAPI_dy(unittest.TestCase): def test_case(self): import paddle diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index ce9cc33cf900576c1b909318e1d831bb9ab1cc32..e3f477c1d9b5e9819ce6b60bc5b7b5a1eaf0ff5f 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -316,23 +316,6 @@ class TestLayer(LayerTest): self.assertTrue(np.allclose(static_ret, dy_ret_value)) - def test_pad2d(self): - with self.static_graph(): - t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32') - ret = layers.pad2d(t, paddings=[1, 1, 1, 1]) - static_ret = self.get_static_graph_result( - feed={'t': np.ones( - [3, 3, 5, 5], dtype='float32')}, - fetch_list=[ret])[0] - - with self.dynamic_graph(): - t = np.ones([3, 3, 5, 5], dtype='float32') - my_pad2d = paddle.nn.layer.Pad2D(paddings=1) - dy_ret = my_pad2d(base.to_variable(t)) - dy_ret_value = dy_ret.numpy() - - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - def test_matmul(self): with self.static_graph(): t = layers.data(name='t', shape=[3, 3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py index 2feca1c30689cec20e1d696cc672516414786038..1f88568b5bc8edbfdcfce27f3b6d67ed1e23dfdd 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py @@ -526,20 +526,6 @@ class TestNearestAPI(unittest.TestCase): self.assertTrue(np.allclose(results[i + 1], expect_res)) -class TestUpsampleNearest2dInterpOpAPI2_0(unittest.TestCase): - def test_case(self): - - # dygraph - x_data = np.random.random((1, 3, 6, 6)).astype("float32") - upsample = paddle.nn.UpsamplingNearest2d(scale_factor=[2, 2]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) - interp = upsample(x) - expect = nearest_neighbor_interp_np( - x_data, out_h=12, out_w=12, align_corners=False) - self.assertTrue(np.allclose(interp.numpy(), expect)) - - class TestNearestInterpException(unittest.TestCase): def test_exception(self): input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py index aa75ee9c7c18decd9c49be328f40eeee912c3658..c29352bb51af687063245d1cfc978f3550d39e53 100644 --- a/python/paddle/fluid/tests/unittests/test_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -314,7 +314,6 @@ class TestPadAPI(unittest.TestCase): def test_dygraph_1(self): paddle.disable_static() - input_shape = (1, 2, 3, 4, 5) pad = [1, 2, 1, 1, 3, 4] mode = "constant" @@ -342,7 +341,6 @@ class TestPadAPI(unittest.TestCase): def test_dygraph_2(self): paddle.disable_static() - input_shape = (2, 3, 4, 5) pad = [1, 1, 3, 4] mode = "constant" @@ -370,38 +368,8 @@ class TestPadAPI(unittest.TestCase): self.assertTrue(np.allclose(y1.numpy(), np_out1)) self.assertTrue(np.allclose(y2.numpy(), np_out2)) - def test_dygraph_2(self): - paddle.disable_static() - - input_shape = (2, 3, 4, 5) - pad = [1, 1, 3, 4] - mode = "constant" - value = 100 - input_data = np.random.rand(*input_shape).astype(np.float32) - np_out1 = self._get_numpy_out( - input_data, pad, mode, value, data_format="NCHW") - np_out2 = self._get_numpy_out( - input_data, pad, mode, value, data_format="NHWC") - tensor_data = paddle.to_tensor(input_data) - tensor_pad = paddle.to_tensor(pad, dtype="int32") - - y1 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NCHW") - y2 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NHWC") - - self.assertTrue(np.allclose(y1.numpy(), np_out1)) - self.assertTrue(np.allclose(y2.numpy(), np_out2)) - def test_dygraph_3(self): paddle.disable_static() - input_shape = (3, 4, 5) pad = [3, 4] mode = "constant" @@ -455,6 +423,8 @@ class TestPad1dAPI(unittest.TestCase): out = np.pad(input_data, pad, mode=mode) elif mode == "replicate": out = np.pad(input_data, pad, mode="edge") + elif mode == "circular": + out = np.pad(input_data, pad, mode="wrap") return out @@ -471,9 +441,10 @@ class TestPad1dAPI(unittest.TestCase): value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - pad_reflection = nn.ReflectionPad1d(padding=pad) - pad_replication = nn.ReplicationPad1d(padding=pad) - pad_constant = nn.ConstantPad1d(padding=pad, value=value) + pad_reflection = nn.Pad1D(padding=pad, mode="reflect") + pad_replication = nn.Pad1D(padding=pad, mode="replicate") + pad_constant = nn.Pad1D(padding=pad, mode="constant", value=value) + pad_circular = nn.Pad1D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) @@ -492,6 +463,11 @@ class TestPad1dAPI(unittest.TestCase): input_data, pad, "constant", value=value, data_format="NCL") self.assertTrue(np.allclose(output.numpy(), np_out)) + output = pad_circular(data) + np_out = self._get_numpy_out( + input_data, pad, "circular", value=value, data_format="NCL") + self.assertTrue(np.allclose(output.numpy(), np_out)) + class TestPad2dAPI(unittest.TestCase): def _get_numpy_out(self, @@ -521,6 +497,8 @@ class TestPad2dAPI(unittest.TestCase): out = np.pad(input_data, pad, mode=mode) elif mode == "replicate": out = np.pad(input_data, pad, mode="edge") + elif mode == "circular": + out = np.pad(input_data, pad, mode="wrap") return out @@ -537,10 +515,10 @@ class TestPad2dAPI(unittest.TestCase): value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - pad_reflection = nn.ReflectionPad2d(padding=pad) - pad_replication = nn.ReplicationPad2d(padding=pad) - pad_constant = nn.ConstantPad2d(padding=pad, value=value) - pad_zero = nn.ZeroPad2d(padding=pad) + pad_reflection = nn.Pad2D(padding=pad, mode="reflect") + pad_replication = nn.Pad2D(padding=pad, mode="replicate") + pad_constant = nn.Pad2D(padding=pad, mode="constant", value=value) + pad_circular = nn.Pad2D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) @@ -559,9 +537,9 @@ class TestPad2dAPI(unittest.TestCase): input_data, pad, "constant", value=value, data_format="NCHW") self.assertTrue(np.allclose(output.numpy(), np_out)) - output = pad_zero(data) + output = pad_circular(data) np_out = self._get_numpy_out( - input_data, pad, "constant", value=0, data_format="NCHW") + input_data, pad, "circular", data_format="NCHW") self.assertTrue(np.allclose(output.numpy(), np_out)) @@ -595,6 +573,8 @@ class TestPad3dAPI(unittest.TestCase): out = np.pad(input_data, pad, mode=mode) elif mode == "replicate": out = np.pad(input_data, pad, mode="edge") + elif mode == "circular": + out = np.pad(input_data, pad, mode="wrap") return out @@ -611,11 +591,18 @@ class TestPad3dAPI(unittest.TestCase): value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - pad_replication = nn.ReplicationPad3d(padding=pad) - pad_constant = nn.ConstantPad3d(padding=pad, value=value) + pad_reflection = nn.Pad3D(padding=pad, mode="reflect") + pad_replication = nn.Pad3D(padding=pad, mode="replicate") + pad_constant = nn.Pad3D(padding=pad, mode="constant", value=value) + pad_circular = nn.Pad3D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) + output = pad_reflection(data) + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCDHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + output = pad_replication(data) np_out = self._get_numpy_out( input_data, pad, "replicate", data_format="NCDHW") @@ -626,6 +613,11 @@ class TestPad3dAPI(unittest.TestCase): input_data, pad, "constant", value=value, data_format="NCDHW") self.assertTrue(np.allclose(output.numpy(), np_out)) + output = pad_circular(data) + np_out = self._get_numpy_out( + input_data, pad, "circular", data_format="NCDHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + class TestPad3dOpError(unittest.TestCase): def test_errors(self): @@ -673,32 +665,30 @@ class TestPad3dOpError(unittest.TestCase): class TestPadDataformatError(unittest.TestCase): def test_errors(self): def test_ncl(): - paddle.disable_static(paddle.CPUPlace()) input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) data = np.arange( np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCL") + my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCL") data = paddle.to_tensor(data) result = my_pad(data) def test_nchw(): - paddle.disable_static(paddle.CPUPlace()) input_shape = (1, 2, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) data = np.arange( np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCHW") + my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCHW") data = paddle.to_tensor(data) result = my_pad(data) def test_ncdhw(): - paddle.disable_static(paddle.CPUPlace()) input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) data = np.arange( np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCDHW") + my_pad = nn.Pad1D( + padding=pad, mode="replicate", data_format="NCDHW") data = paddle.to_tensor(data) result = my_pad(data) diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 6af59465be47ccf3cf2748e50e784fc5948211d6..edd33b5d6e786afc6d6afc1838293a233ec5be72 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -71,22 +71,14 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS from .layer.activation import ThresholdedReLU #DEFINE_ALIAS from .layer.activation import LogSoftmax #DEFINE_ALIAS from .layer.activation import Maxout #DEFINE_ALIAS -from .layer.common import ReflectionPad1d #DEFINE_ALIAS -from .layer.common import ReplicationPad1d #DEFINE_ALIAS -from .layer.common import ConstantPad1d #DEFINE_ALIAS -from .layer.common import ReflectionPad2d #DEFINE_ALIAS -from .layer.common import ReplicationPad2d #DEFINE_ALIAS -from .layer.common import ConstantPad2d #DEFINE_ALIAS -from .layer.common import ZeroPad2d #DEFINE_ALIAS -from .layer.common import ReplicationPad3d #DEFINE_ALIAS -from .layer.common import ConstantPad3d #DEFINE_ALIAS +from .layer.common import Pad1D #DEFINE_ALIAS +from .layer.common import Pad2D #DEFINE_ALIAS +from .layer.common import Pad3D #DEFINE_ALIAS from .layer.common import CosineSimilarity #DEFINE_ALIAS from .layer.common import Embedding #DEFINE_ALIAS from .layer.common import Linear #DEFINE_ALIAS from .layer.common import Flatten #DEFINE_ALIAS from .layer.common import Upsample #DEFINE_ALIAS -from .layer.common import UpsamplingNearest2d #DEFINE_ALIAS -from .layer.common import UpsamplingBilinear2d #DEFINE_ALIAS from .layer.common import Bilinear #DEFINE_ALIAS from .layer.common import Dropout #DEFINE_ALIAS from .layer.common import Dropout2d #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index afd2cc3a2342ca68d50e7f826950878c9a90cc03..1defed3362c1c2ffaa6ff214d7095069fa0c988a 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -44,23 +44,14 @@ from .activation import LogSoftmax #DEFINE_ALIAS from .common import BilinearTensorProduct #DEFINE_ALIAS from .common import Bilinear #DEFINE_ALIAS from .common import Pool2D #DEFINE_ALIAS +from .common import Pad1D #DEFINE_ALIAS from .common import Pad2D #DEFINE_ALIAS -from .common import ReflectionPad1d #DEFINE_ALIAS -from .common import ReplicationPad1d #DEFINE_ALIAS -from .common import ConstantPad1d #DEFINE_ALIAS -from .common import ReflectionPad2d #DEFINE_ALIAS -from .common import ReplicationPad2d #DEFINE_ALIAS -from .common import ConstantPad2d #DEFINE_ALIAS -from .common import ZeroPad2d #DEFINE_ALIAS -from .common import ReplicationPad3d #DEFINE_ALIAS -from .common import ConstantPad3d #DEFINE_ALIAS +from .common import Pad3D #DEFINE_ALIAS from .common import CosineSimilarity #DEFINE_ALIAS from .common import Embedding #DEFINE_ALIAS from .common import Linear #DEFINE_ALIAS from .common import Flatten #DEFINE_ALIAS from .common import Upsample #DEFINE_ALIAS -from .common import UpsamplingNearest2d #DEFINE_ALIAS -from .common import UpsamplingBilinear2d #DEFINE_ALIAS from .common import Dropout #DEFINE_ALIAS from .common import Dropout2d #DEFINE_ALIAS from .common import Dropout3d #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index bf2c58d45c17777aa7db326ae972e95fb91444c7..65575362ce65283a7a00ace05c85e29adce9a3a4 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -27,18 +27,9 @@ __all__ = [ 'Embedding', 'Linear', 'Upsample', + 'Pad1D', 'Pad2D', - 'UpsamplingNearest2d', - 'UpsamplingBilinear2d', - 'ReflectionPad1d', - 'ReplicationPad1d', - 'ConstantPad1d', - 'ReflectionPad2d', - 'ReplicationPad2d', - 'ConstantPad2d', - 'ZeroPad2d', - 'ConstantPad3d', - 'ReplicationPad3d', + 'Pad3D', 'CosineSimilarity', 'Dropout', 'Dropout2d', @@ -559,84 +550,6 @@ class UpsamplingBilinear2d(layers.Layer): return out -class Pad2D(layers.Layer): - """ - This interface is used to construct a callable object of the ``Pad2D`` class. - The Pad2D layer pads the input tensor boundaries according to 'paddings' and 'mode'. - If mode is 'reflect', paddings[0] and paddings[1] must be no greater - than height-1. And the width dimension has the same condition. - Parameters: - paddings (int | List[int32]): The padding size. If padding is a int, uses the same - padding in all boundaries, if padding is a List, it must contain four integers, - (padding_top, padding_bottom, padding_left, padding_right). - Default is [0, 0, 0, 0]. - mode (str): Three modes: 'constant' (default), 'reflect', 'edge' . - When in 'constant' mode, this op uses a constant value to pad the input tensor. - When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. - When in 'edge' mode, uses input boundaries to pad the input tensor. - Default is 'constant' - pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0 - data_format (str): An string from: "NHWC", "NCHW". Specify the data format of - the input data. - Default is "NCHW" - Returns: - None - Examples: - .. code-block:: text - Input = [[[[1., 2., 3.], - [4., 5., 6.]]]] - Case 0: - paddings = [0, 1, 2, 3], - mode = 'constant' - pad_value = 0 - Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.], - [0., 0., 4., 5., 6., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0.]]]] - Case 1: - paddings = [0, 1, 2, 1], - mode = 'reflect' - Out = [[[[3., 2., 1., 2., 3., 2.], - [6., 5., 4., 5., 6., 5.], - [3., 2., 1., 2., 3., 2.]]]] - Case 2: - paddings = [0, 1, 2, 1], - mode = 'edge' - Out = [[[[1., 1., 1., 2., 3., 3.], - [4., 4., 4., 5., 6., 6.], - [4., 4., 4., 5., 6., 6.]]]] - Code Examples: - .. code-block:: python - import paddle.fluid as fluid - import paddle.nn as nn - import numpy as np - data = np.ones((2, 2, 2, 2)).astype('float32') - my_pad = nn.layer.Pad2D(paddings=[1, 1, 1, 1]) - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(data) - result = my_pad(data) - """ - - def __init__(self, - paddings=0, - mode='constant', - pad_value=0.0, - data_format="NCHW"): - super(Pad2D, self).__init__() - self._mode = mode - self._pad_value = pad_value - self._data_format = data_format - self._paddings = [paddings] * 4 if isinstance(paddings, - int) else paddings - - def forward(self, input): - return paddle.fluid.layers.pad2d( - input, - paddings=self._paddings, - mode=self._mode, - pad_value=self._pad_value, - data_format=self._data_format) - - class Bilinear(layers.Layer): """ @@ -960,176 +873,64 @@ class AlphaDropout(layers.Layer): return out -class ReflectionPad1d(layers.Layer): +class Pad1D(layers.Layer): """ - This interface is used to construct a callable object of the ``ReflectionPad1d`` class. - Uses reflection of the input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right). - data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. - Default is "NCL" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[1., 2., 3.], - [4., 5., 6.]]] - padding = [1, 2], - Out = [[[2. 1. 2. 3. 2. 1.] - [5. 4. 5. 6. 5. 4.]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 2, 3) - pad = [1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReflectionPad1d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[2. 1. 2. 3. 2. 1.] - # [5. 4. 5. 6. 5. 4.]]] - """ - - def __init__(self, padding, data_format="NCL", name=None): - super(ReflectionPad1d, self).__init__() - self._mode = "reflect" - self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ReplicationPad1d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReplicationPad1d`` class. - Uses input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right). - data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. - Default is "NCL" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[1., 2., 3.], - [4., 5., 6.]]] - padding = [1, 2], - Out = [[[2. 1. 2. 3. 2. 1.] - [5. 4. 5. 6. 5. 4.]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 2, 3) - pad = [1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad1d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[1. 1. 2. 3. 3. 3.] - # [1. 4. 5. 6. 6. 6.]]] - """ - - def __init__(self, padding, data_format="NCL", name=None): - super(ReplicationPad1d, self).__init__() - self._mode = "replicate" - self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ConstantPad1d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ConstantPad1d`` class. - Uses a constant value to pad the input tensor. - + This interface is used to construct a callable object of the ``Pad1D`` class. + Pad tensor according to 'pad', 'mode' and 'value'. + If mode is 'reflect', pad[0] and pad[1] must be no greater than width-1. Parameters: padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions of input will be padded. The pad has the form (pad_left, pad_right). + mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'replicate' mode, uses input boundaries to pad the input tensor. + When in 'circular' mode, uses circular input to pad the input tensor. + Default is 'constant'. value (float32): The value to fill the padded areas. Default is 0.0 data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. Default is "NCL" name (str, optional) : The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. - Returns: None - Examples: .. code-block:: text - x = [[[1., 2., 3.], [4., 5., 6.]]] padding = [1, 2], + mode = "constant" value = 0.0 Out = [[[0. 1. 2. 3. 0. 0.] [0. 4. 5. 6. 0. 0.]]] - Code Examples: .. code-block:: python - import paddle import paddle.nn as nn import numpy as np paddle.disable_static() - input_shape = (1, 2, 3) pad = [1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ConstantPad1d(padding=pad) - data = paddle.to_tensor(data) + mode = "constant" + data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1 + my_pad = nn.Pad1D(padding=pad, mode=mode) result = my_pad(data) print(result.numpy()) # [[[0. 1. 2. 3. 0. 0.] # [0. 4. 5. 6. 0. 0.]]] """ - def __init__(self, padding, value=0.0, data_format="NCL", name=None): - super(ConstantPad1d, self).__init__() - self._mode = "constant" - self._data_format = data_format + def __init__(self, + padding, + mode='constant', + value=0.0, + data_format="NCL", + name=None): + super(Pad1D, self).__init__() self._pad = padding + self._mode = mode self._value = value + self._data_format = data_format self._name = name def forward(self, x): @@ -1141,46 +942,48 @@ class ConstantPad1d(layers.Layer): name=self._name) -class ConstantPad2d(layers.Layer): +class Pad2D(layers.Layer): """ - This interface is used to construct a callable object of the ``ConstantPad2d`` class. - Uses a constant value to pad the input tensor. - + This interface is used to construct a callable object of the ``Pad2D`` class. + Pad tensor according to 'pad', 'mode' and 'value'. + If mode is 'reflect', pad[0] and pad[1] must be no greater + than width-1. The height dimension has the same condition. Parameters: padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). + mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'replicate' mode, uses input boundaries to pad the input tensor. + When in 'circular' mode, uses circular input to pad the input tensor. + Default is 'constant'. value (float32): The value to fill the padded areas. Default is 0.0 data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. Default is "NCHW" name (str, optional) : The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. - Returns: None - Examples: .. code-block:: text - x = [[[[1., 2., 3.], [4., 5., 6.]]]] padding = [1, 1, 0, 0] + mode = "constant" value = 0.0 Out = [[[[0. 1. 2. 3. 0.] [0. 4. 5. 6. 0.]]]] - Code Examples: .. code-block:: python - import paddle import paddle.nn as nn import numpy as np paddle.disable_static() - input_shape = (1, 1, 2, 3) pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ConstantPad2d(padding=pad) - data = paddle.to_tensor(data) + mode = "constant" + data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1 + my_pad = nn.Pad2D(padding=pad, mode=mode) result = my_pad(data) print(result.numpy()) # [[[[0. 0. 0. 0.] @@ -1190,251 +993,69 @@ class ConstantPad2d(layers.Layer): # [0. 0. 0. 0.]]]] """ - def __init__(self, padding, value=0.0, data_format="NCHW", name=None): - super(ConstantPad2d, self).__init__() - self._mode = "constant" - self._data_format = data_format + def __init__(self, + padding, + mode='constant', + value=0.0, + data_format="NCHW", + name=None): + super(Pad2D, self).__init__() self._pad = padding + self._mode = mode self._value = value - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) - - -class ZeroPad2d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ZeroPad2d`` class. - Uses 0 to pad the input tensor. - - Parameters: - padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). - data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. - Default is "NCHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[1., 2., 3.], - [4., 5., 6.]]]] - padding = [1, 1, 0, 0] - Out = [[[[0. 1. 2. 3. 0.] - [0. 4. 5. 6. 0.]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 2, 3) - pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ZeroPad2d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[0. 0. 0. 0.] - # [0. 1. 2. 3.] - # [0. 4. 5. 6.] - # [0. 0. 0. 0.] - # [0. 0. 0. 0.]]]] - """ - - def __init__(self, padding, data_format="NCHW", name=None): - super(ZeroPad2d, self).__init__() - self._mode = "constant" self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ReplicationPad2d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReplicationPad2d`` class. - Uses input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). - data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. - Default is "NCHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[1., 2., 3.], - [4., 5., 6.]]]] - padding = [1, 1, 0, 0] - Out = [[[[1. 1. 2. 3. 3.] - [4. 4. 5. 6. 6.]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 2, 3) - pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad2d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[1. 1. 2. 3.] - # [1. 1. 2. 3.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.]]]] - """ - - def __init__(self, padding, data_format="NCHW", name=None): - super(ReplicationPad2d, self).__init__() - self._mode = "replicate" - self._data_format = data_format - self._pad = padding - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - data_format=self._data_format, - name=self._name) - - -class ReflectionPad2d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReflectionPad2d`` class. - Uses reflection of the input boundaries to pad the input tensor. - - Parameters: - padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). - data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. - Default is "NCHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[1., 2., 3.], - [4., 5., 6.]]]] - padding = [1, 1, 0, 0] - Out = [[[[2. 1. 2. 3. 2.] - [5. 4. 5. 6. 5.]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 4, 3) - pad = [1, 0, 1, 2] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReflectionPad2d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[ 5. 4. 5. 6.] - # [ 2. 1. 2. 3.] - # [ 5. 4. 5. 6.] - # [ 8. 7. 8. 9.] - # [11. 10. 11. 12.] - # [ 8. 7. 8. 9.] - # [ 5. 4. 5. 6.]]]] - """ - - def __init__(self, padding, data_format="NCHW", name=None): - super(ReflectionPad2d, self).__init__() - self._mode = "reflect" - self._data_format = data_format - self._pad = padding self._name = name def forward(self, x): return F.pad(x, pad=self._pad, mode=self._mode, + value=self._value, data_format=self._data_format, name=self._name) -class ConstantPad3d(layers.Layer): +class Pad3D(layers.Layer): """ - This interface is used to construct a callable object of the ``ConstantPad3d`` class. - Uses a constant value to pad the input tensor. - + This interface is used to construct a callable object of the ``Pad3D`` class. + Pad tensor according to 'pad', 'mode' and 'value'. + If mode is 'reflect', pad[0] and pad[1] must be no greater + than width-1. The height and depth dimension has the same condition. Parameters: padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). + mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'replicate' mode, uses input boundaries to pad the input tensor. + When in 'circular' mode, uses circular input to pad the input tensor. + Default is 'constant'. value (float32): The value to fill the padded areas. Default is 0.0 data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data. Default is "NCDHW" name (str, optional) : The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. - Returns: None - Examples: .. code-block:: text - x = [[[[[1., 2., 3.], [4., 5., 6.]]]]] padding = [1, 2, 0, 0, 0, 0] + mode = "constant" value = 0.0 Out = [[[[[0. 1. 2. 3. 0. 0.] [0. 4. 5. 6. 0. 0.]]]]] - Code Examples: .. code-block:: python - import paddle import paddle.nn as nn import numpy as np - paddle.disable_static() - input_shape = (1, 1, 1, 2, 3) pad = [1, 0, 1, 2, 0, 0] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ConstantPad3d(padding=pad) - data = paddle.to_tensor(data) + mode = "constant" + data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1 + my_pad = nn.Pad3D(padding=pad, mode=mode) result = my_pad(data) print(result.numpy()) # [[[[[0. 0. 0. 0.] @@ -1444,81 +1065,24 @@ class ConstantPad3d(layers.Layer): # [0. 0. 0. 0.]]]]] """ - def __init__(self, padding, value=0.0, data_format="NCDHW", name=None): - super(ConstantPad3d, self).__init__() - self._mode = "constant" - self._data_format = data_format + def __init__(self, + padding, + mode='constant', + value=0.0, + data_format="NCDHW", + name=None): + super(Pad3D, self).__init__() self._pad = padding + self._mode = mode self._value = value - self._name = name - - def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) - - -class ReplicationPad3d(layers.Layer): - """ - This interface is used to construct a callable object of the ``ReplicationPad3d`` class. - Uses input boundaries to pad the input tensor. - - Parameters: - padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions - of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). - data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data. - Default is "NCDHW" - name (str, optional) : The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - None - - Examples: - .. code-block:: text - - x = [[[[[1., 2., 3.], - [4., 5., 6.]]]]] - padding = [1, 2, 0, 0, 0, 0] - Out = [[[[[1. 1. 2. 3. 3. 3.] - [4. 4. 5. 6. 6. 6.]]]]] - - Code Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - paddle.disable_static() - - input_shape = (1, 1, 1, 2, 3) - pad = [1, 0, 1, 2, 0, 0] - data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 - my_pad = nn.ReplicationPad3d(padding=pad) - data = paddle.to_tensor(data) - result = my_pad(data) - print(result.numpy()) - # [[[[[1. 1. 2. 3.] - # [1. 1. 2. 3.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.] - # [4. 4. 5. 6.]]]]] - """ - - def __init__(self, padding, data_format="NCDHW", name=None): - super(ReplicationPad3d, self).__init__() - self._mode = "replicate" self._data_format = data_format - self._pad = padding self._name = name def forward(self, x): return F.pad(x, pad=self._pad, mode=self._mode, + value=self._value, data_format=self._data_format, name=self._name)