From 920d44dfe1b0e9954e1c06b110b792f5eba21f94 Mon Sep 17 00:00:00 2001 From: Asthestarsfalll <72954905+Asthestarsfalll@users.noreply.github.com> Date: Thu, 21 Apr 2022 16:52:25 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90PaddlePaddle=20Hackathon=202=E3=80=912?= =?UTF-8?q?3=E3=80=81=E4=B8=BA=20Paddle=20=E6=96=B0=E5=A2=9E=20Softmax2D?= =?UTF-8?q?=20=E7=BB=84=E7=BD=91API=20(#40910)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Hackathon 23 * fix bug * fix pylint error * try * fix CI-Coverage * update and add more unittest * update --- .../fluid/tests/unittests/test_softmax2d.py | 111 ++++++++++++++++++ python/paddle/nn/__init__.py | 2 + python/paddle/nn/layer/__init__.py | 1 + python/paddle/nn/layer/activation.py | 52 ++++++++ 4 files changed, 166 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_softmax2d.py diff --git a/python/paddle/fluid/tests/unittests/test_softmax2d.py b/python/paddle/fluid/tests/unittests/test_softmax2d.py new file mode 100644 index 0000000000..4879e9a0ef --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_softmax2d.py @@ -0,0 +1,111 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from test_softmax_op import ref_softmax + + +class TestSoftmax2DAPI(unittest.TestCase): + def setUp(self): + self.shape = [2, 6, 5, 4] + self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') + self.axis = -3 + self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + def test_static_api(self): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program()): + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + m = paddle.nn.Softmax2D() + out = m(x) + exe = paddle.static.Executor(self.place) + res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) + out_ref = ref_softmax(self.x_np, self.axis) + self.assertTrue(np.allclose(out_ref, res)) + + def test_dygraph_api(self): + paddle.disable_static(self.place) + x = paddle.to_tensor(self.x_np) + m = paddle.nn.Softmax2D() + out = m(x) + out_ref = ref_softmax(self.x_np, self.axis) + self.assertTrue(np.allclose(out_ref, out.numpy())) + paddle.enable_static() + + +class TestSoftmax2DShape(TestSoftmax2DAPI): + def setUp(self): + self.shape = [2, 6, 4] + self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') + self.axis = -3 + self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + +class TestSoftmax2DFloat32(TestSoftmax2DAPI): + def setUp(self): + self.shape = [2, 3, 4] + self.x_np = np.random.uniform(-1, 1, self.shape).astype('float32') + self.axis = -3 + self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + +class TestSoftmax2DCPU(TestSoftmax2DAPI): + def setUp(self): + self.shape = [2, 6, 4] + self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') + self.axis = -3 + self.place = paddle.CPUPlace() + + +class TestSoftmax2DRepr(unittest.TestCase): + def setUp(self): + self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + def test_extra_repr(self): + paddle.disable_static(self.place) + m = paddle.nn.Softmax2D(name='test') + self.assertTrue(m.extra_repr() == 'name=test') + paddle.enable_static() + + +class TestSoftmax2DError(unittest.TestCase): + def setUp(self): + self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + def test_static_error(self): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program()): + x = paddle.fluid.data('X', [5, 5], 'float32') + m = paddle.nn.Softmax2D() + self.assertRaises(AssertionError, m, x) + + def test_dygraph_error(self): + paddle.disable_static(self.place) + x_np = np.random.randn(2, 3, 4, 2, 3) + x = paddle.to_tensor(x_np, dtype='float64') + m = paddle.nn.Softmax2D() + self.assertRaises(AssertionError, m, x) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index b83a900059..b4824eff00 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -41,6 +41,7 @@ from .layer.activation import Sigmoid # noqa: F401 from .layer.activation import Hardsigmoid # noqa: F401 from .layer.activation import LogSigmoid # noqa: F401 from .layer.activation import Softmax # noqa: F401 +from .layer.activation import Softmax2D # noqa: F401 from .layer.activation import Softplus # noqa: F401 from .layer.activation import Softshrink # noqa: F401 from .layer.activation import Softsign # noqa: F401 @@ -260,6 +261,7 @@ __all__ = [ #noqa 'AdaptiveMaxPool1D', 'TransformerEncoder', 'Softmax', + 'Softmax2D', 'ParameterList', 'Conv2D', 'Softshrink', diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index 2b50508065..7dd18f1fef 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -26,6 +26,7 @@ from .activation import LeakyReLU # noqa: F401 from .activation import Sigmoid # noqa: F401 from .activation import Softmax # noqa: F401 from .activation import LogSoftmax # noqa: F401 +from .activation import Softmax2D # noqa: F401 from .common import Bilinear # noqa: F401 from .common import Pad1D # noqa: F401 from .common import Pad2D # noqa: F401 diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 400585c431..cd82fe12ff 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -1338,3 +1338,55 @@ class Maxout(Layer): def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' return 'groups={}, axis={}{}'.format(self._groups, self._axis, name_str) + + +class Softmax2D(Layer): + r""" + Softmax2D Activation. + Given a Tensor with shape (B, C, H, W) or (C, H, W), it will apply Softmax to each location (C, h_i, w_j). + The sum of result in each location (C, H_i, W_j) will be one. + + Shape: + - Input: :math:`(B, C, H, W)` or :math:`(C, H, W)` + - Output: :math:`(B, C, H, W)` or :math:`(C, H, W)`(same as input) + + Return: + A Tensor of the same shape and dtype as input with value in range [0, 1]. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.rand([1, 2, 3, 4]) + # [[[[0.42496058 0.1172187 0.14664008 0.8151267 ] + # [0.24430142 0.42052492 0.60372984 0.79307914] + # [0.4539401 0.90458065 0.10235776 0.62009853]] + + # [[0.11731581 0.16053623 0.05667042 0.91876775] + # [0.9413854 0.30770817 0.6788164 0.9543593 ] + # [0.4145064 0.75909156 0.11598814 0.73599935]]]] + m = paddle.nn.Softmax2D() + out = m(x) + # [[[[0.5763103 0.48917228 0.5224772 0.4741129 ] + # [0.3324591 0.5281743 0.48123717 0.45976716] + # [0.5098571 0.5363083 0.49659243 0.4710572 ]] + + # [[0.42368975 0.51082766 0.47752273 0.5258871 ] + # [0.66754097 0.47182566 0.5187628 0.5402329 ] + # [0.49014282 0.46369177 0.50340754 0.5289428 ]]]] + """ + + def __init__(self, name=None): + super(Softmax2D, self).__init__() + self._dtype = None + self._name = name + + def forward(self, x): + assert x.ndim == 3 or x.ndim == 4, "Softmax2D requires a 3D or 4D tensor as input. Received: {}D.".format( + x.ndim) + return F.softmax(x, axis=-3, dtype=self._dtype, name=self._name) + + def extra_repr(self): + name_str = 'name={}'.format(self._name) if self._name else '' + return name_str -- GitLab