未验证 提交 04abcab8 编写于 作者: M mrcangye 提交者: GitHub

fix some doc bug test=document_fix (#45488)

* fix some doc bug test=document_fix

* fix some docs issues, test=document_fix

* beta -> \beta in softplus

* threshold -> \varepsilon in softplus

* parameter name

* delta -> \delta in smooth_l1_loss

* fix some docs test=document_fix

* fix docs test=document_fix

* fix docs && 增加空行 test=document_fix

* Update python/paddle/nn/functional/activation.py, test=document_fix

* Update python/paddle/nn/layer/activation.py, test=document_fix
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 c5a1a4b0
......@@ -170,9 +170,9 @@ class ActivationOpGrad : public framework::OperatorWithKernel {
};
UNUSED constexpr char SigmoidDoc[] = R"DOC(
Sigmoid Activation Operator
Sigmoid Activation
$$out = \\frac{1}{1 + e^{-x}}$$
$$out = \frac{1}{1 + e^{-x}}$$
)DOC";
......
......@@ -949,12 +949,14 @@ def silu(x, name=None):
silu(x) = \frac{x}{1 + e^{-x}}
Where :math:`x` is the input Tensor.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
A Tensor with the same data type and shape as ``x`` .
A Tensor with the same data type and shape as :attr:`x`.
Examples:
.. code-block:: python
......@@ -1072,15 +1074,13 @@ def softmax(x, axis=-1, dtype=None, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = np.array([[[2.0, 3.0, 4.0, 5.0],
x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], 'float32')
x = paddle.to_tensor(x)
[6.0, 7.0, 8.0, 9.0]]],dtype='float32')
out1 = F.softmax(x)
out2 = F.softmax(x, dtype='float64')
# out1's data type is float32; out2's data type is float64
......@@ -1167,14 +1167,15 @@ def softplus(x, beta=1, threshold=20, name=None):
softplus activation
.. math::
softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\
\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
softplus(x)=\begin{cases}
\frac{1}{\beta} * \log(1 + e^{\beta * x}),&x\leqslant\frac{\varepsilon}{\beta};\\
x,&x>\frac{\varepsilon}{\beta}.
\end{cases}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
beta (float, optional): The value of beta for softplus. Default is 1
threshold (float, optional): The value of threshold for softplus. Default is 20
beta (float, optional): The value of :math:`\beta` for softplus. Default is 1
threshold (float, optional): The value of :math:`\varepsilon` for softplus. Default is 20
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
......@@ -1185,9 +1186,8 @@ def softplus(x, beta=1, threshold=20, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3], dtype='float32')
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
......
......@@ -996,13 +996,13 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
loss(x,y) = \frac{1}{n}\sum_{i}z_i
where z_i is given by:
where :math:`z_i` is given by:
.. math::
\mathop{z_i} = \left\{\begin{array}{rcl}
0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\
delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise}
0.5(x_i - y_i)^2 & & {if |x_i - y_i| < \delta} \\
\delta * |x_i - y_i| - 0.5 * \delta^2 & & {otherwise}
\end{array} \right.
Parameters:
......@@ -1017,12 +1017,11 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
delta (float, optional): Specifies the hyperparameter delta to be used.
delta (float, optional): Specifies the hyperparameter :math:`\delta` to be used.
The value determines how large the errors need to be to use L1. Errors
smaller than delta are minimized with L2. Parameter is ignored for
negative/zero values. Default = 1.0
name (str, optional): Name for the operation (optional, default is
None). For more information, please refer to :ref:`api_guide_Name`.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor, The tensor variable storing the smooth_l1_loss of input and label.
......@@ -1031,14 +1030,12 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
.. code-block:: python
import paddle
import numpy as np
input_data = np.random.rand(3,3).astype("float32")
label_data = np.random.rand(3,3).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.rand([3, 3]).astype('float32')
label = paddle.rand([3, 3]).astype('float32')
output = paddle.nn.functional.smooth_l1_loss(input, label)
print(output)
# [0.068004]
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'smooth_l1_loss')
......
......@@ -706,15 +706,15 @@ class LeakyReLU(Layer):
class Sigmoid(Layer):
"""
r"""
this interface is used to construct a callable object of the ``Sigmoid`` class. This layer calcluate the `sigmoid` of input x.
.. math::
Sigmoid(x) = \\frac{1}{1 + e^{-x}}
sigmoid(x) = \frac{1}{1 + e^{-x}}
Parameters:
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Shape:
x: N-D tensor, available dtype is float16, float32, float64.
......@@ -801,15 +801,15 @@ class Softplus(Layer):
Softplus Activation
.. math::
Softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\
\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
softplus(x)=\begin{cases}
\frac{1}{\beta} * \log(1 + e^{\beta * x}),&x\leqslant\frac{\varepsilon}{\beta};\\
x,&x>\frac{\varepsilon}{\beta}.
\end{cases}
Parameters:
beta (float, optional): The value of beta for Softplus. Default is 1
threshold (float, optional): The value of threshold for Softplus. Default is 20
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
beta (float, optional): The value of :math:`\beta` for Softplus. Default is 1
threshold (float, optional): The value of :math:`\varepsilon` for Softplus. Default is 20
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Shape:
- input: Tensor with any shape.
......@@ -819,9 +819,8 @@ class Softplus(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3], dtype='float32')
m = paddle.nn.Softplus()
out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
......@@ -1101,16 +1100,17 @@ class ThresholdedReLU(Layer):
class Silu(Layer):
"""
Silu Activation.
r"""
Silu Activation
.. math::
Silu(x) = \frac{x}{1 + e^{-x}}
silu(x) = \frac{x}{1 + \mathrm{e}^{-x}}
Where :math:`x` is the input Tensor.
Parameters:
x (Tensor): The input Tensor with data type float32, or float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Shape:
- input: Tensor with any shape.
......@@ -1271,15 +1271,13 @@ class Softmax(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.array([[[2.0, 3.0, 4.0, 5.0],
x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], 'float32')
x = paddle.to_tensor(x)
[6.0, 7.0, 8.0, 9.0]]], dtype='float32')
m = paddle.nn.Softmax()
out = m(x)
# [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
......
......@@ -1138,15 +1138,15 @@ class SmoothL1Loss(Layer):
.. math::
loss(x,y) = \frac{1}{n}\sum_{i}z_i
loss(x, y) = \frac{1}{n}\sum_{i}z_i
where z_i is given by:
where :math:`z_i` is given by:
.. math::
\mathop{z_i} = \left\{\begin{array}{rcl}
0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\
delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise}
0.5(x_i - y_i)^2 & & {if |x_i - y_i| < \delta} \\
\delta * |x_i - y_i| - 0.5 * \delta^2 & & {otherwise}
\end{array} \right.
Parameters:
......@@ -1156,12 +1156,11 @@ class SmoothL1Loss(Layer):
If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
delta (float, optional): Specifies the hyperparameter delta to be used.
delta (float, optional): Specifies the hyperparameter :math:`\delta` to be used.
The value determines how large the errors need to be to use L1. Errors
smaller than delta are minimized with L2. Parameter is ignored for
negative/zero values. Default = 1.0
name (str, optional): Name for the operation (optional, default is
None). For more information, please refer to :ref:`api_guide_Name`.
negative/zero values. Default value is :math:`1.0`.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Call Parameters:
......@@ -1179,14 +1178,12 @@ class SmoothL1Loss(Layer):
.. code-block:: python
import paddle
import numpy as np
input_data = np.random.rand(3,3).astype("float32")
label_data = np.random.rand(3,3).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.rand([3, 3]).astype("float32")
label = paddle.rand([3, 3]).astype("float32")
loss = paddle.nn.SmoothL1Loss()
output = loss(input, label)
print(output)
# [0.049606]
"""
def __init__(self, reduction='mean', delta=1.0, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册