未验证 提交 32ae8e81 编写于 作者: Z zhupengyang 提交者: GitHub

leaky_relu, log_softmax, hardshrink formula format (#26720)

上级 c2c68958
......@@ -168,13 +168,13 @@ def hardshrink(x, threshold=0.5, name=None):
.. math::
hardshrink(x)=
\left\{
\begin{aligned}
&x, & & if \ x > threshold \\
&x, & & if \ x < -threshold \\
&0, & & if \ others
\end{aligned}
\right.
\\left\\{
\\begin{aligned}
&x, & & if \\ x > threshold \\\\
&x, & & if \\ x < -threshold \\\\
&0, & & if \\ others
\\end{aligned}
\\right.
Args:
x (Tensor): The input Tensor with data type float32, float64.
......@@ -391,14 +391,14 @@ def leaky_relu(x, negative_slope=0.01, name=None):
"""
leaky_relu activation
.. math:
leaky_relu(x)=
\left\{
\begin{aligned}
&x, & & if \ x >= 0 \\
&negative\_slope * x, & & otherwise \\
\end{aligned}
\right. \\
.. math::
leaky\\_relu(x)=
\\left\\{
\\begin{aligned}
&x, & & if \\ x >= 0 \\\\
&negative\_slope * x, & & otherwise \\\\
\\end{aligned}
\\right. \\\\
Args:
x (Tensor): The input Tensor with data type float32, float64.
......@@ -1033,8 +1033,8 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
.. math::
Out[i, j] = log(softmax(x))
= log(\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])})
log\\_softmax[i, j] = log(softmax(x))
= log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
......
......@@ -144,13 +144,13 @@ class Hardshrink(layers.Layer):
.. math::
hardshrink(x)=
\left\{
\begin{aligned}
&x, & & if \ x > threshold \\
&x, & & if \ x < -threshold \\
&0, & & if \ others
\end{aligned}
\right.
\\left\\{
\\begin{aligned}
&x, & & if \\ x > threshold \\\\
&x, & & if \\ x < -threshold \\\\
&0, & & if \\ others
\\end{aligned}
\\right.
Parameters:
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
......@@ -165,14 +165,14 @@ class Hardshrink(layers.Layer):
.. code-block:: python
import paddle
import numpy as np
import paddle
import numpy as np
paddle.disable_static()
paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 2.5]))
m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5]
x = paddle.to_tensor(np.array([-1, 0.3, 2.5]))
m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5]
"""
def __init__(self, threshold=0.5, name=None):
......@@ -598,15 +598,15 @@ class LeakyReLU(layers.Layer):
"""
Leaky ReLU Activation.
.. math:
.. math::
LeakyReLU(x)=
\left\{
\begin{aligned}
&x, & & if \ x >= 0 \\
&negative\_slope * x, & & otherwise \\
\end{aligned}
\right. \\
\\left\\{
\\begin{aligned}
&x, & & if \\ x >= 0 \\\\
&negative\_slope * x, & & otherwise \\\\
\\end{aligned}
\\right. \\\\
Parameters:
negative_slope (float, optional): Slope of the activation function at
......@@ -1015,7 +1015,7 @@ class LogSoftmax(layers.Layer):
.. math::
Out[i, j] = log(softmax(x))
= log(\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])})
= log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
Parameters:
axis (int, optional): The axis along which to perform log_softmax
......@@ -1032,26 +1032,26 @@ class LogSoftmax(layers.Layer):
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = np.array([[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]])
m = paddle.nn.LogSoftmax()
x = paddle.to_tensor(x)
out = m(x)
# [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948]
# [ -2.1270514 -9.127051 -0.12705144 -11.127051 ]
# [-16.313261 -17.313261 -1.3132617 -0.31326184]]
# [[ -3.0518122 -6.051812 -7.051812 -0.051812 ]
# [-12.313267 -1.3132664 -0.3132665 -15.313267 ]
# [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]]
import paddle
import numpy as np
paddle.disable_static()
x = np.array([[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]])
m = paddle.nn.LogSoftmax()
x = paddle.to_tensor(x)
out = m(x)
# [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948]
# [ -2.1270514 -9.127051 -0.12705144 -11.127051 ]
# [-16.313261 -17.313261 -1.3132617 -0.31326184]]
# [[ -3.0518122 -6.051812 -7.051812 -0.051812 ]
# [-12.313267 -1.3132664 -0.3132665 -15.313267 ]
# [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]]
"""
def __init__(self, axis=-1, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册