未验证 提交 b2ee8380 编写于 作者: F Feiyu Chan 提交者: GitHub

add glu in nn.functional (#32096)

add glu in nn.functional
上级 e727820d
......@@ -16,6 +16,7 @@ from __future__ import print_function
import six
from . import layers
from .data_feeder import check_variable_and_dtype, convert_dtype
from ..utils import deprecated
__all__ = [
"simple_img_conv_pool",
......@@ -332,6 +333,7 @@ def sequence_conv_pool(input,
return pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.glu")
def glu(input, dim=-1):
r"""
:api_attr: Static Graph
......
......@@ -17,6 +17,9 @@ from paddle import fluid
import paddle.fluid.dygraph as dg
import unittest
import paddle
from paddle.nn import functional as F
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
......@@ -48,5 +51,25 @@ class TestGLUCase(unittest.TestCase):
self.check_identity(fluid.CUDAPlace(0))
class TestGLUV2(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(5, 20)
self.dim = -1
self.out = glu(self.x, self.dim)
def check_identity(self, place):
with dg.guard(place):
x_var = paddle.to_tensor(self.x)
y_var = F.glu(x_var, self.dim)
y_np = y_var.numpy()
np.testing.assert_allclose(y_np, self.out)
def test_case(self):
self.check_identity(fluid.CPUPlace())
if fluid.is_compiled_with_cuda():
self.check_identity(fluid.CUDAPlace(0))
if __name__ == '__main__':
unittest.main()
......@@ -58,6 +58,7 @@ from .activation import tanh_ #DEFINE_ALIAS
from .activation import tanhshrink #DEFINE_ALIAS
from .activation import thresholded_relu #DEFINE_ALIAS
from .activation import log_softmax #DEFINE_ALIAS
from .activation import glu #DEFINE_ALIAS
from .common import dropout #DEFINE_ALIAS
from .common import dropout2d #DEFINE_ALIAS
from .common import dropout3d #DEFINE_ALIAS
......
......@@ -23,6 +23,8 @@ from ...tensor.math import tanh #DEFINE_ALIAS
from ...tensor.math import tanh_ #DEFINE_ALIAS
from ...tensor.manipulation import _print_warning_in_static_mode
from ...tensor.manipulation import chunk
from ...tensor.math import multiply
__all__ = [
'brelu',
......@@ -53,6 +55,7 @@ __all__ = [
'tanhshrink',
'thresholded_relu',
'log_softmax',
'glu',
]
import warnings
......@@ -1276,3 +1279,50 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
attrs={'axis': axis})
return out
def glu(x, axis=-1, name=None):
r"""
The gated linear unit. The input is evenly splited into 2 parts along a
given axis. The first part is used as the content, and the second part is
passed through a sigmoid function then used as the gate. The output is a
elementwise multiplication of the content and the gate.
.. math::
\mathrm{GLU}(a, b) = a \otimes \sigma(b)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
axis (int, optional): The axis along which split the input tensor. It
should be in range [-D, D), where D is the dimensions of ``x`` .
If ``axis`` < 0, it works the same way as :math:`axis + D` .
Default is -1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type as x. The size of the given aixs is
halved.
Examples:
.. code-block:: python
import paddle
from paddle.nn import functional as F
x = paddle.to_tensor(
[[-0.22014759, -1.76358426, 0.80566144, 0.04241343],
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
)
print(F.glu(x).numpy())
# array([[-0.15216254, -0.9004892 ],
# [-1.0577879 , -0.46985325]], dtype=float32)
"""
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
"glu")
a, b = chunk(x, 2, axis=axis, name=name)
gate = sigmoid(b, name=name)
out = paddle.multiply(a, gate, name=name)
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册