未验证 提交 61800f4a 编写于 作者: Q Qi Li 提交者: GitHub

elu gelu relu logsigmoid, test=develop (#26304)

* logsigmoid and LogSigmoid, test=develop

* add elu gelu relu, test=develop

* update to_variable to to_tensor, test=develop

* address review comments, test=develop

* address review comments, test=develop

* change to_variable to to_tensor in test, test=develop
上级 0a461ac3
......@@ -645,6 +645,7 @@ __all__ += ['gelu']
_gelu_ = generate_layer_fn('gelu')
@deprecated(since="2.0.0", update_to="paddle.nn.functional.gelu")
def gelu(x, approximate=False):
locals_var = locals().copy()
kwargs = dict()
......@@ -655,10 +656,6 @@ def gelu(x, approximate=False):
gelu.__doc__ = """
:alias_main: paddle.nn.functional.gelu
:alias: paddle.nn.functional.gelu,paddle.nn.functional.activation.gelu
:old_api: paddle.fluid.layers.gelu
:strong:`GeLU Activation Operator`
For more details, see [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415).
......
......@@ -118,7 +118,7 @@ class TestLogSigmoid(TestActivation):
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.log(1 / (1 + np.exp(-x)))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.inputs = {'X': x}
self.outputs = {'Out': out}
def test_check_grad(self):
......@@ -127,6 +127,48 @@ class TestLogSigmoid(TestActivation):
self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestLogSigmoidAPI(unittest.TestCase):
# test paddle.nn.LogSigmoid, paddle.nn.functional.logsigmoid
def setUp(self):
self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
out1 = F.logsigmoid(x)
m = paddle.nn.LogSigmoid()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.logsigmoid(x)
m = paddle.nn.LogSigmoid()
out2 = m(x)
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.logsigmoid, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32')
self.assertRaises(TypeError, F.logsigmoid, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16')
F.logsigmoid(x_fp16)
class TestTanh(TestActivation, TestParameter):
def setUp(self):
self.op_type = "tanh"
......@@ -644,7 +686,7 @@ class TestRelu(TestActivation):
x[np.abs(x) < 0.005] = 0.02
out = np.maximum(x, 0)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.inputs = {'X': x}
self.outputs = {'Out': out}
def test_check_grad(self):
......@@ -653,18 +695,46 @@ class TestRelu(TestActivation):
self.check_grad(['X'], 'Out')
class TestReluOpError(unittest.TestCase):
class TestReluAPI(unittest.TestCase):
# test paddle.nn.ReLU, paddle.nn.functional.relu
def setUp(self):
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
out1 = F.relu(x)
m = paddle.nn.ReLU()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = np.maximum(self.x_np, 0)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.relu(x)
m = paddle.nn.ReLU()
out2 = m(x)
out_ref = np.maximum(self.x_np, 0)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()
def test_errors(self):
with program_guard(Program()):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.relu, 1)
self.assertRaises(TypeError, F.relu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, fluid.layers.relu, x_int32)
x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32')
self.assertRaises(TypeError, F.relu, x_int32)
# support the input dtype is float16
x_fp16 = fluid.layers.data(
name='x_fp16', shape=[12, 10], dtype='float16')
fluid.layers.relu(x_fp16)
x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16')
F.relu(x_fp16)
class TestLeakyRelu(TestActivation):
......@@ -717,7 +787,7 @@ class TestGeluApproximate(TestActivation):
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = gelu(x, approximate)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {"approximate": approximate}
......@@ -735,7 +805,7 @@ class TestGelu(TestActivation):
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = gelu(x, approximate)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {"approximate": approximate}
......@@ -745,6 +815,55 @@ class TestGelu(TestActivation):
self.check_grad(['X'], 'Out')
class TestGELUAPI(unittest.TestCase):
# test paddle.nn.GELU, paddle.nn.functional.gelu
def setUp(self):
self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
out1 = F.gelu(x)
m = paddle.nn.GELU()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = gelu(self.x_np, False)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.gelu(x)
m = paddle.nn.GELU()
out2 = m(x)
out_ref = gelu(self.x_np, False)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
out1 = F.gelu(x, True)
m = paddle.nn.GELU(True)
out2 = m(x)
out_ref = gelu(self.x_np, True)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.gelu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32')
self.assertRaises(TypeError, F.gelu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16')
F.gelu(x_fp16)
class TestBRelu(TestActivation):
def setUp(self):
self.op_type = "brelu"
......@@ -894,6 +1013,11 @@ class TestSoftReluOpError(unittest.TestCase):
fluid.layers.soft_relu(x_fp16)
def elu(x, alpha):
out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
return out_ref.astype(x.dtype)
class TestELU(TestActivation):
def setUp(self):
self.op_type = "elu"
......@@ -901,7 +1025,7 @@ class TestELU(TestActivation):
x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
alpha = 1.
out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
out = elu(x, alpha)
# Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
# is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
self.inputs = {'X': x}
......@@ -914,16 +1038,53 @@ class TestELU(TestActivation):
self.check_grad(['X'], 'Out')
class TestELUOpError(unittest.TestCase):
class TestELUAPI(unittest.TestCase):
# test paddle.nn.ELU, paddle.nn.functional.elu
def setUp(self):
self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
out1 = F.elu(x)
m = paddle.nn.ELU()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = elu(self.x_np, 1.0)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.elu(x)
m = paddle.nn.ELU()
out2 = m(x)
out_ref = elu(self.x_np, 1.0)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
out1 = F.elu(x, 0.2)
m = paddle.nn.ELU(0.2)
out2 = m(x)
out_ref = elu(self.x_np, 0.2)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of elu_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.elu, x1)
# The input dtype of elu_op must be float16 float32 or float64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.elu, x2)
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.elu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32')
self.assertRaises(TypeError, F.elu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16')
F.elu(x_fp16)
class TestReciprocal(TestActivation):
......@@ -1422,73 +1583,5 @@ create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
create_test_act_fp16_class(TestHardSwish)
class TestNNReluAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.x_shape = [10, 12]
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
self.y = self.ref_forward(self.x)
def ref_forward(self, x):
return np.maximum(x, 0)
def ref_backward(self, y, dy):
y_t = y.copy()
y_t[y_t > 0] = 1
return y_t * dy
def check_api(self, place=fluid.CPUPlace()):
main_program = Program()
myrelu = nn.ReLU()
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
x.stop_gradient = False
y = myrelu(x)
fluid.backward.append_backward(fluid.layers.mean(y))
exe = fluid.Executor(place)
out = exe.run(main_program,
feed={'x': self.x},
fetch_list=[y, y.grad_name, x.grad_name])
self.assertTrue(np.allclose(out[0], self.y))
self.assertTrue(np.allclose(out[2], self.ref_backward(self.y, out[1])))
with fluid.dygraph.guard(place):
x = fluid.dygraph.to_variable(self.x)
y = myrelu(x)
self.assertTrue(np.allclose(y.numpy(), self.y))
def test_check_api(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
self.check_api(place)
class TestNNFunctionalReluAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.x_shape = [10, 12]
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
self.y = self.ref_forward(self.x)
def ref_forward(self, x):
return np.maximum(x, 0)
def test_check_api(self):
main_program = Program()
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
y = F.relu(x)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
self.assertTrue(np.allclose(out[0], self.y))
if __name__ == "__main__":
unittest.main()
......@@ -51,11 +51,14 @@ from .decode import beam_search_decode #DEFINE_ALIAS
from .decode import gather_tree #DEFINE_ALIAS
from .input import data #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
from .layer.activation import ELU
from .layer.activation import GELU
from .layer.activation import Hardshrink
# from .layer.activation import PReLU #DEFINE_ALIAS
from .layer.activation import ReLU #DEFINE_ALIAS
from .layer.activation import ReLU
from .layer.activation import LeakyReLU #DEFINE_ALIAS
from .layer.activation import Sigmoid #DEFINE_ALIAS
from .layer.activation import LogSigmoid
# from .layer.activation import Softmax #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.activation import HSigmoid #DEFINE_ALIAS
......
......@@ -14,13 +14,10 @@
# TODO: define activation functions of neural network
from ...fluid.layers import brelu #DEFINE_ALIAS
from ...fluid.layers import elu #DEFINE_ALIAS
from ...fluid.layers import erf #DEFINE_ALIAS
from ...fluid.layers import gelu #DEFINE_ALIAS
from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS
from ...fluid.layers import hard_swish #DEFINE_ALIAS
from ...fluid.layers import leaky_relu #DEFINE_ALIAS
from ...fluid.layers import logsigmoid #DEFINE_ALIAS
from ...fluid.layers import maxout #DEFINE_ALIAS
from ...fluid.layers import relu6 #DEFINE_ALIAS
from ...fluid.layers import selu #DEFINE_ALIAS
......@@ -69,6 +66,108 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
import paddle
def elu(x, alpha=1.0, name=None):
"""
elu activation.
.. math::
elu(x) = max(0, x) + min(0, \\alpha * (e^{x}-1))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
out = F.elu(x, alpha=0.2)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
if in_dygraph_mode():
return core.ops.elu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
helper = LayerHelper("elu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
def gelu(x, approximate=False, name=None):
"""
gelu activation.
if approximate is True
.. math::
gelu(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))
else
.. math::
gelu(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
approximate (bool, optional): Wether to enable approximation. Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
data = np.random.randn(2, 3).astype("float32")
x = paddle.to_tensor(data)
out = F.gelu(x)
data
# array([[ 0.87165993, -1.0541513 , -0.37214822],
# [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32)
out
# array([[ 0.70456535, -0.15380788, -0.13207214],
# [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32)
"""
if in_dygraph_mode():
return core.ops.gelu(x, 'approximate', approximate)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu')
helper = LayerHelper("gelu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='gelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'approximate': approximate})
return out
def hardshrink(x, threshold=0.5, name=None):
"""
hard shrinkage activation
......@@ -245,11 +344,8 @@ def hsigmoid(input,
return out
def relu(input, inplace=False, name=None):
def relu(x, name=None):
"""
:alias_main: paddle.nn.functional.relu
:alias: paddle.nn.functional.relu,paddle.nn.functional.activation.relu
ReLU Activation.
.. math:
......@@ -257,44 +353,74 @@ def relu(input, inplace=False, name=None):
out = max(x, 0)
Parameters:
input (Variable): The input variable. A multi-dimension Tensor with type float16, float32, or float64.
inplace (bool, optional): If inplace is True, the input and output of ``ReLU`` are the same variable.
Otherwise, the input and output of ``ReLU`` are different variables. Default: False. Note that if x is
more than one OPs' input, inplace must be False.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Output of relu operator, a Tensor with shape same as input
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.nn.functional as functional
import numpy as np
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
data = np.array([-2, 0, 1]).astype('float32')
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
res = functional.relu(data) # [0, 0, 1]
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
out = F.relu(x) # [0., 0., 1.]
"""
if in_dygraph_mode():
if inplace:
warnings.warn(
"Inplace on ReLU is not allowed and will be discarded in dygraph mode currently."
)
return core.ops.relu(input)
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'relu')
return core.ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
helper = LayerHelper('relu', **locals())
outs = input if inplace else helper.create_variable_for_type_inference(
input.dtype)
helper.append_op(type='relu', inputs={'X': [input]}, outputs={'Out': outs})
return outs
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='relu', inputs={'X': x}, outputs={'Out': out})
return out
def logsigmoid(x, name=None):
"""
logsigmoid activation.
.. math:
logsigmoid(x) = \log \frac{1}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0]))
out = F.logsigmoid(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
"""
if in_dygraph_mode():
return core.ops.logsigmoid(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'logsigmoid')
helper = LayerHelper("logsigmoid", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out})
return out
def softmax(x, axis=-1, name=None):
......
......@@ -15,12 +15,15 @@
# TODO: define activation functions of neural network
__all__ = [
'ELU',
'GELU',
'Hardshrink',
# 'PReLU',
'ReLU',
'LeakyReLU',
'Sigmoid',
# 'Softmax',
'LogSigmoid',
'LogSoftmax',
'HSigmoid'
]
......@@ -31,6 +34,103 @@ from ...fluid.framework import in_dygraph_mode
from .. import functional as F
class ELU(layers.Layer):
"""
ELU Activation.
.. math::
ELU(x) = max(0, x) + min(0, \\alpha * (e^{x}-1))
Parameters:
alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
m = paddle.nn.ELU(0.2)
out = m(x)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
def __init__(self, alpha=1.0, name=None):
super(ELU, self).__init__()
self._alpha = alpha
self._name = name
def forward(self, x):
return F.elu(x, self._alpha, self._name)
class GELU(layers.Layer):
"""
GELU Activation.
If approximate is True
.. math::
GELU(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))
else
.. math::
GELU(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))
Parameters:
approximate (bool, optional): Wether to enable approximation. Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
data = np.random.randn(2, 3).astype("float32")
x = paddle.to_tensor(data)
m = paddle.nn.GELU()
out = m(x)
data
# array([[ 0.87165993, -1.0541513 , -0.37214822],
# [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32)
out
# array([[ 0.70456535, -0.15380788, -0.13207214],
# [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32)
"""
def __init__(self, approximate=False, name=None):
super(GELU, self).__init__()
self._approximate = approximate
self._name = name
def forward(self, x):
return F.gelu(x, self._approximate, self._name)
class Hardshrink(layers.Layer):
"""
Hardshrink Activation
......@@ -216,44 +316,39 @@ class HSigmoid(layers.Layer):
class ReLU(layers.Layer):
"""
:alias_main: paddle.nn.ReLU
:alias: paddle.nn.ReLU,paddle.nn.layer.ReLU,paddle.nn.layer.activation.ReLU
ReLU Activation.
.. math:
out = max(x, 0)
ReLU(x) = max(x, 0)
Parameters:
inplace (bool, optional): If inplace is True, the input and output of
``ReLU`` are the same variable. Otherwise, the input and output of
``ReLU`` are different variables. Default False. Note that if x is
more than one OPs' input, inplace must be False.
Returns:
None
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.nn as nn
import numpy as np
import paddle
import numpy as np
data = np.array([-2, 0, 1]).astype('float32')
my_relu = nn.ReLU()
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
res = my_relu(data) # [0, 0, 1]
paddle.disable_static()
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
m = paddle.nn.ReLU()
out = m(x) # [0., 0., 1.]
"""
def __init__(self, inplace=False):
def __init__(self, name=None):
super(ReLU, self).__init__()
self._inplace = inplace
self._name = name
def forward(self, input):
return F.relu(input, self._inplace)
def forward(self, x):
return F.relu(x, self._name)
class LeakyReLU(layers.Layer):
......@@ -336,6 +431,44 @@ class Sigmoid(layers.Layer):
return F.sigmoid(x, self.name)
class LogSigmoid(layers.Layer):
"""
LogSigmoid Activation.
.. math:
LogSigmoid(x) = \log \frac{1}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, or float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0]))
m = paddle.nn.LogSigmoid()
out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
"""
def __init__(self, name=None):
super(LogSigmoid, self).__init__()
self._name = name
def forward(self, x):
return F.logsigmoid(x, self._name)
class LogSoftmax(layers.Layer):
"""
This operator implements the log_softmax layer. The calculation process is as follows:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册