未验证 提交 32ae8e81 编写于 作者: Z zhupengyang 提交者: GitHub

leaky_relu, log_softmax, hardshrink formula format (#26720)

上级 c2c68958
...@@ -168,13 +168,13 @@ def hardshrink(x, threshold=0.5, name=None): ...@@ -168,13 +168,13 @@ def hardshrink(x, threshold=0.5, name=None):
.. math:: .. math::
hardshrink(x)= hardshrink(x)=
\left\{ \\left\\{
\begin{aligned} \\begin{aligned}
&x, & & if \ x > threshold \\ &x, & & if \\ x > threshold \\\\
&x, & & if \ x < -threshold \\ &x, & & if \\ x < -threshold \\\\
&0, & & if \ others &0, & & if \\ others
\end{aligned} \\end{aligned}
\right. \\right.
Args: Args:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
...@@ -391,14 +391,14 @@ def leaky_relu(x, negative_slope=0.01, name=None): ...@@ -391,14 +391,14 @@ def leaky_relu(x, negative_slope=0.01, name=None):
""" """
leaky_relu activation leaky_relu activation
.. math: .. math::
leaky_relu(x)= leaky\\_relu(x)=
\left\{ \\left\\{
\begin{aligned} \\begin{aligned}
&x, & & if \ x >= 0 \\ &x, & & if \\ x >= 0 \\\\
&negative\_slope * x, & & otherwise \\ &negative\_slope * x, & & otherwise \\\\
\end{aligned} \\end{aligned}
\right. \\ \\right. \\\\
Args: Args:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
...@@ -1033,8 +1033,8 @@ def log_softmax(x, axis=-1, dtype=None, name=None): ...@@ -1033,8 +1033,8 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
.. math:: .. math::
Out[i, j] = log(softmax(x)) log\\_softmax[i, j] = log(softmax(x))
= log(\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}) = log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
Parameters: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
......
...@@ -144,13 +144,13 @@ class Hardshrink(layers.Layer): ...@@ -144,13 +144,13 @@ class Hardshrink(layers.Layer):
.. math:: .. math::
hardshrink(x)= hardshrink(x)=
\left\{ \\left\\{
\begin{aligned} \\begin{aligned}
&x, & & if \ x > threshold \\ &x, & & if \\ x > threshold \\\\
&x, & & if \ x < -threshold \\ &x, & & if \\ x < -threshold \\\\
&0, & & if \ others &0, & & if \\ others
\end{aligned} \\end{aligned}
\right. \\right.
Parameters: Parameters:
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5 threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
...@@ -165,14 +165,14 @@ class Hardshrink(layers.Layer): ...@@ -165,14 +165,14 @@ class Hardshrink(layers.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 2.5])) x = paddle.to_tensor(np.array([-1, 0.3, 2.5]))
m = paddle.nn.Hardshrink() m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5] out = m(x) # [-1., 0., 2.5]
""" """
def __init__(self, threshold=0.5, name=None): def __init__(self, threshold=0.5, name=None):
...@@ -598,15 +598,15 @@ class LeakyReLU(layers.Layer): ...@@ -598,15 +598,15 @@ class LeakyReLU(layers.Layer):
""" """
Leaky ReLU Activation. Leaky ReLU Activation.
.. math: .. math::
LeakyReLU(x)= LeakyReLU(x)=
\left\{ \\left\\{
\begin{aligned} \\begin{aligned}
&x, & & if \ x >= 0 \\ &x, & & if \\ x >= 0 \\\\
&negative\_slope * x, & & otherwise \\ &negative\_slope * x, & & otherwise \\\\
\end{aligned} \\end{aligned}
\right. \\ \\right. \\\\
Parameters: Parameters:
negative_slope (float, optional): Slope of the activation function at negative_slope (float, optional): Slope of the activation function at
...@@ -1015,7 +1015,7 @@ class LogSoftmax(layers.Layer): ...@@ -1015,7 +1015,7 @@ class LogSoftmax(layers.Layer):
.. math:: .. math::
Out[i, j] = log(softmax(x)) Out[i, j] = log(softmax(x))
= log(\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}) = log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
Parameters: Parameters:
axis (int, optional): The axis along which to perform log_softmax axis (int, optional): The axis along which to perform log_softmax
...@@ -1032,26 +1032,26 @@ class LogSoftmax(layers.Layer): ...@@ -1032,26 +1032,26 @@ class LogSoftmax(layers.Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x = np.array([[[-2.0, 3.0, -4.0, 5.0], x = np.array([[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0], [3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]], [-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0], [[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0], [-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]]) [6.0, 7.0, 8.0, 9.0]]])
m = paddle.nn.LogSoftmax() m = paddle.nn.LogSoftmax()
x = paddle.to_tensor(x) x = paddle.to_tensor(x)
out = m(x) out = m(x)
# [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948] # [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948]
# [ -2.1270514 -9.127051 -0.12705144 -11.127051 ] # [ -2.1270514 -9.127051 -0.12705144 -11.127051 ]
# [-16.313261 -17.313261 -1.3132617 -0.31326184]] # [-16.313261 -17.313261 -1.3132617 -0.31326184]]
# [[ -3.0518122 -6.051812 -7.051812 -0.051812 ] # [[ -3.0518122 -6.051812 -7.051812 -0.051812 ]
# [-12.313267 -1.3132664 -0.3132665 -15.313267 ] # [-12.313267 -1.3132664 -0.3132665 -15.313267 ]
# [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]] # [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]]
""" """
def __init__(self, axis=-1, name=None): def __init__(self, axis=-1, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册