From b2ee8380182dd8e32e7b628d06747ebed1948569 Mon Sep 17 00:00:00 2001 From: Feiyu Chan Date: Thu, 22 Apr 2021 13:41:08 +0800 Subject: [PATCH] add glu in nn.functional (#32096) add glu in nn.functional --- python/paddle/fluid/nets.py | 2 + .../paddle/fluid/tests/unittests/test_glu.py | 23 +++++++++ python/paddle/nn/functional/__init__.py | 1 + python/paddle/nn/functional/activation.py | 50 +++++++++++++++++++ 4 files changed, 76 insertions(+) diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index c47cce76f89..e8f8bdd3f9a 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -16,6 +16,7 @@ from __future__ import print_function import six from . import layers from .data_feeder import check_variable_and_dtype, convert_dtype +from ..utils import deprecated __all__ = [ "simple_img_conv_pool", @@ -332,6 +333,7 @@ def sequence_conv_pool(input, return pool_out +@deprecated(since="2.0.0", update_to="paddle.nn.functional.glu") def glu(input, dim=-1): r""" :api_attr: Static Graph diff --git a/python/paddle/fluid/tests/unittests/test_glu.py b/python/paddle/fluid/tests/unittests/test_glu.py index 63818d8ac50..25f1975db0c 100644 --- a/python/paddle/fluid/tests/unittests/test_glu.py +++ b/python/paddle/fluid/tests/unittests/test_glu.py @@ -17,6 +17,9 @@ from paddle import fluid import paddle.fluid.dygraph as dg import unittest +import paddle +from paddle.nn import functional as F + def sigmoid(x): return 1.0 / (1.0 + np.exp(-x)) @@ -48,5 +51,25 @@ class TestGLUCase(unittest.TestCase): self.check_identity(fluid.CUDAPlace(0)) +class TestGLUV2(unittest.TestCase): + def setUp(self): + self.x = np.random.randn(5, 20) + self.dim = -1 + self.out = glu(self.x, self.dim) + + def check_identity(self, place): + with dg.guard(place): + x_var = paddle.to_tensor(self.x) + y_var = F.glu(x_var, self.dim) + y_np = y_var.numpy() + + np.testing.assert_allclose(y_np, self.out) + + def test_case(self): + self.check_identity(fluid.CPUPlace()) + if fluid.is_compiled_with_cuda(): + self.check_identity(fluid.CUDAPlace(0)) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index 36f39a5056e..f1f9913e558 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -58,6 +58,7 @@ from .activation import tanh_ #DEFINE_ALIAS from .activation import tanhshrink #DEFINE_ALIAS from .activation import thresholded_relu #DEFINE_ALIAS from .activation import log_softmax #DEFINE_ALIAS +from .activation import glu #DEFINE_ALIAS from .common import dropout #DEFINE_ALIAS from .common import dropout2d #DEFINE_ALIAS from .common import dropout3d #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 3553a93dfab..8119b0f45d9 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -23,6 +23,8 @@ from ...tensor.math import tanh #DEFINE_ALIAS from ...tensor.math import tanh_ #DEFINE_ALIAS from ...tensor.manipulation import _print_warning_in_static_mode +from ...tensor.manipulation import chunk +from ...tensor.math import multiply __all__ = [ 'brelu', @@ -53,6 +55,7 @@ __all__ = [ 'tanhshrink', 'thresholded_relu', 'log_softmax', + 'glu', ] import warnings @@ -1276,3 +1279,50 @@ def log_softmax(x, axis=-1, dtype=None, name=None): attrs={'axis': axis}) return out + + +def glu(x, axis=-1, name=None): + r""" + The gated linear unit. The input is evenly splited into 2 parts along a + given axis. The first part is used as the content, and the second part is + passed through a sigmoid function then used as the gate. The output is a + elementwise multiplication of the content and the gate. + + .. math:: + + \mathrm{GLU}(a, b) = a \otimes \sigma(b) + + Parameters: + x (Tensor): The input Tensor with data type float32, float64. + axis (int, optional): The axis along which split the input tensor. It + should be in range [-D, D), where D is the dimensions of ``x`` . + If ``axis`` < 0, it works the same way as :math:`axis + D` . + Default is -1. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Returns: + A Tensor with the same data type as x. The size of the given aixs is + halved. + + Examples: + .. code-block:: python + + import paddle + from paddle.nn import functional as F + + x = paddle.to_tensor( + [[-0.22014759, -1.76358426, 0.80566144, 0.04241343], + [-1.94900405, -1.89956081, 0.17134808, -1.11280477]] + ) + print(F.glu(x).numpy()) + # array([[-0.15216254, -0.9004892 ], + # [-1.0577879 , -0.46985325]], dtype=float32) + + """ + check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], + "glu") + a, b = chunk(x, 2, axis=axis, name=name) + gate = sigmoid(b, name=name) + out = paddle.multiply(a, gate, name=name) + return out -- GitLab