未验证 提交 c11c83fb 编写于 作者: H hong19860320 提交者: GitHub

Add the parameter checking for softplus and fix the doc string (#26530)

上级 0ca10d31
...@@ -717,6 +717,9 @@ class TestSoftshrinkAPI(unittest.TestCase): ...@@ -717,6 +717,9 @@ class TestSoftshrinkAPI(unittest.TestCase):
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softshrink, x_int32) self.assertRaises(TypeError, F.softshrink, x_int32)
# The threshold must be no less than zero
x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32')
self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softshrink(x_fp16) F.softshrink(x_fp16)
......
...@@ -225,7 +225,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None): ...@@ -225,7 +225,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None):
x, \\text{otherwise} x, \\text{otherwise}
\\end{cases} \\end{cases}
Args: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
min (float, optional): The minimum value of the linear region range. Default is -1. min (float, optional): The minimum value of the linear region range. Default is -1.
max (float, optional): The maximum value of the linear region range. Default is 1. max (float, optional): The maximum value of the linear region range. Default is 1.
...@@ -598,9 +598,9 @@ def relu6(x, name=None): ...@@ -598,9 +598,9 @@ def relu6(x, name=None):
.. math:: .. math::
\text{relu6}(x) = \min(\max(0,x), 6) relu6(x) = min(max(0,x), 6)
Args: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`. For more information, please refer to :ref:`api_guide_Name`.
...@@ -609,18 +609,16 @@ def relu6(x, name=None): ...@@ -609,18 +609,16 @@ def relu6(x, name=None):
A Tensor with the same data type and shape as ``x`` . A Tensor with the same data type and shape as ``x`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) paddle.disable_static()
out = F.relu6(x) # [0, 0.3, 6]
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
out = F.relu6(x) # [0, 0.3, 6]
""" """
threshold = 6.0 threshold = 6.0
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -646,11 +644,9 @@ def selu(x, ...@@ -646,11 +644,9 @@ def selu(x,
.. math:: .. math::
\text{selu}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))), \\ selu(x) = scale * (max(0,x) + min(0, alpha * (e^{x} - 1)))
with\,alpha=1.6732632423543772848170429916717 and \\
scale=1.0507009873554804934193349852946
Args: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
scale (float, optional): The value of scale for selu. Default is 1.0507009873554804934193349852946 scale (float, optional): The value of scale for selu. Default is 1.0507009873554804934193349852946
alpha (float, optional): The value of alpha for selu. Default is 1.6732632423543772848170429916717 alpha (float, optional): The value of alpha for selu. Default is 1.6732632423543772848170429916717
...@@ -661,18 +657,16 @@ def selu(x, ...@@ -661,18 +657,16 @@ def selu(x,
A Tensor with the same data type and shape as ``x`` . A Tensor with the same data type and shape as ``x`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[0, 1],[2, 3]])) paddle.disable_static()
out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]]
x = paddle.to_tensor(np.array([[0, 1],[2, 3]]))
out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.selu(x, 'scale', scale, 'alpha', alpha) return core.ops.selu(x, 'scale', scale, 'alpha', alpha)
...@@ -856,10 +850,10 @@ def softplus(x, beta=1, threshold=20, name=None): ...@@ -856,10 +850,10 @@ def softplus(x, beta=1, threshold=20, name=None):
.. math:: .. math::
\text{softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\ softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\
\text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold. \\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
Args: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
beta (float, optional): The value of beta for softplus. Default is 1 beta (float, optional): The value of beta for softplus. Default is 1
threshold (float, optional): The value of threshold for softplus. Default is 20 threshold (float, optional): The value of threshold for softplus. Default is 20
...@@ -870,18 +864,16 @@ def softplus(x, beta=1, threshold=20, name=None): ...@@ -870,18 +864,16 @@ def softplus(x, beta=1, threshold=20, name=None):
A Tensor with the same data type and shape as ``x`` . A Tensor with the same data type and shape as ``x`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) paddle.disable_static()
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.softplus(x, 'beta', beta, 'threshold', threshold) return core.ops.softplus(x, 'beta', beta, 'threshold', threshold)
...@@ -905,14 +897,13 @@ def softshrink(x, threshold=0.5, name=None): ...@@ -905,14 +897,13 @@ def softshrink(x, threshold=0.5, name=None):
.. math:: .. math::
\text{softshrink}(x) = softshrink(x)= \\begin{cases}
\begin{cases} x - threshold, \\text{if } x > threshold \\\\
x - threshold, & \text{ if } x > threshold \\ x + threshold, \\text{if } x < -threshold \\\\
x + threshold, & \text{ if } x < -threshold \\ 0, \\text{otherwise}
0, & \text{ otherwise } \\end{cases}
\end{cases}
Args: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5 threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
...@@ -922,19 +913,22 @@ def softshrink(x, threshold=0.5, name=None): ...@@ -922,19 +913,22 @@ def softshrink(x, threshold=0.5, name=None):
A Tensor with the same data type and shape as ``x`` . A Tensor with the same data type and shape as ``x`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) paddle.disable_static()
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
""" """
if threshold < 0:
raise ValueError(
"The threshold must be no less than zero. Received: {}.".format(
threshold))
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.softshrink(x, 'lambda', threshold) return core.ops.softshrink(x, 'lambda', threshold)
...@@ -956,9 +950,9 @@ def softsign(x, name=None): ...@@ -956,9 +950,9 @@ def softsign(x, name=None):
.. math:: .. math::
\text{softsign}(x) = \frac{x}{1 + |x|} softsign(x) = \\frac{x}{1 + |x|}
Args: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`. For more information, please refer to :ref:`api_guide_Name`.
...@@ -967,18 +961,16 @@ def softsign(x, name=None): ...@@ -967,18 +961,16 @@ def softsign(x, name=None):
A Tensor with the same data type and shape as ``x`` . A Tensor with the same data type and shape as ``x`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) paddle.disable_static()
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.softsign(x) return core.ops.softsign(x)
...@@ -997,7 +989,7 @@ def tanhshrink(x, name=None): ...@@ -997,7 +989,7 @@ def tanhshrink(x, name=None):
.. math:: .. math::
\text{tanhshrink}(x) = x - \text{tanh}(x) tanhshrink(x) = x - tanh(x)
Args: Args:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
...@@ -1008,18 +1000,16 @@ def tanhshrink(x, name=None): ...@@ -1008,18 +1000,16 @@ def tanhshrink(x, name=None):
A Tensor with the same data type and shape as ``x`` . A Tensor with the same data type and shape as ``x`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) paddle.disable_static()
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.tanh_shrink(x) return core.ops.tanh_shrink(x)
......
...@@ -513,7 +513,7 @@ class ReLU6(layers.Layer): ...@@ -513,7 +513,7 @@ class ReLU6(layers.Layer):
.. math:: .. math::
\text{ReLU6}(x) = \min(\max(0,x), 6) ReLU6(x) = min(max(0,x), 6)
Parameters: Parameters:
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
...@@ -524,17 +524,16 @@ class ReLU6(layers.Layer): ...@@ -524,17 +524,16 @@ class ReLU6(layers.Layer):
- output: Tensor with the same shape as input. - output: Tensor with the same shape as input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
m = paddle.nn.ReLU6() m = paddle.nn.ReLU6()
out = m(x) # [0, 0.3, 6] out = m(x) # [0, 0.3, 6]
""" """
def __init__(self, name=None): def __init__(self, name=None):
...@@ -551,9 +550,7 @@ class SELU(layers.Layer): ...@@ -551,9 +550,7 @@ class SELU(layers.Layer):
.. math:: .. math::
\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))), \\ SELU(x) = scale * (max(0,x) + min(0, alpha * (e^{x} - 1)))
with\,alpha=1.6732632423543772848170429916717 and \\
scale=1.0507009873554804934193349852946
Parameters: Parameters:
scale (float, optional): The value of scale for SELU. Default is 1.0507009873554804934193349852946 scale (float, optional): The value of scale for SELU. Default is 1.0507009873554804934193349852946
...@@ -566,17 +563,16 @@ class SELU(layers.Layer): ...@@ -566,17 +563,16 @@ class SELU(layers.Layer):
- output: Tensor with the same shape as input. - output: Tensor with the same shape as input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([[0, 1],[2, 3]])) x = paddle.to_tensor(np.array([[0, 1],[2, 3]]))
m = paddle.nn.SELU() m = paddle.nn.SELU()
out = m(x) # [[0, 1.050701],[2.101402, 3.152103]] out = m(x) # [[0, 1.050701],[2.101402, 3.152103]]
""" """
def __init__(self, def __init__(self,
...@@ -684,10 +680,12 @@ class Softplus(layers.Layer): ...@@ -684,10 +680,12 @@ class Softplus(layers.Layer):
.. math:: .. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\ Softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\
\text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold. \\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
Parameters: Parameters:
beta (float, optional): The value of beta for Softplus. Default is 1
threshold (float, optional): The value of threshold for Softplus. Default is 20
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`. For more information, please refer to :ref:`api_guide_Name`.
...@@ -696,18 +694,16 @@ class Softplus(layers.Layer): ...@@ -696,18 +694,16 @@ class Softplus(layers.Layer):
- output: Tensor with the same shape as input. - output: Tensor with the same shape as input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) paddle.disable_static()
m = paddle.nn.Softplus()
out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355]
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Softplus()
out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355]
""" """
def __init__(self, beta=1, threshold=20, name=None): def __init__(self, beta=1, threshold=20, name=None):
...@@ -726,14 +722,14 @@ class Softshrink(layers.Layer): ...@@ -726,14 +722,14 @@ class Softshrink(layers.Layer):
.. math:: .. math::
\text{Softshrink}(x) = Softshrink(x)= \\begin{cases}
\begin{cases} x - threshold, \\text{if } x > threshold \\\\
x - threshold, & \text{ if } x > threshold \\ x + threshold, \\text{if } x < -threshold \\\\
x + threshold, & \text{ if } x < -threshold \\ 0, \\text{otherwise}
0, & \text{ otherwise } \\end{cases}
\end{cases}
Parameters: Parameters:
threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`. For more information, please refer to :ref:`api_guide_Name`.
...@@ -742,17 +738,16 @@ class Softshrink(layers.Layer): ...@@ -742,17 +738,16 @@ class Softshrink(layers.Layer):
- output: Tensor with the same shape as input. - output: Tensor with the same shape as input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
m = paddle.nn.Softshrink() m = paddle.nn.Softshrink()
out = m(x) # [-0.4, 0, 0, 0.3] out = m(x) # [-0.4, 0, 0, 0.3]
""" """
def __init__(self, threshold=0.5, name=None): def __init__(self, threshold=0.5, name=None):
...@@ -770,7 +765,7 @@ class Softsign(layers.Layer): ...@@ -770,7 +765,7 @@ class Softsign(layers.Layer):
.. math:: .. math::
\text{Softsign}(x) = \frac{x}{1 + |x|} Softsign(x) = \\frac{x}{1 + |x|}
Parameters: Parameters:
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
...@@ -781,17 +776,16 @@ class Softsign(layers.Layer): ...@@ -781,17 +776,16 @@ class Softsign(layers.Layer):
- output: Tensor with the same shape as input. - output: Tensor with the same shape as input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Softsign() m = paddle.nn.Softsign()
out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
""" """
def __init__(self, name=None): def __init__(self, name=None):
...@@ -808,7 +802,7 @@ class Tanhshrink(layers.Layer): ...@@ -808,7 +802,7 @@ class Tanhshrink(layers.Layer):
.. math:: .. math::
\text{Tanhshrink}(x) = x - \text{Tanh}(x) Tanhshrink(x) = x - tanh(x)
Parameters: Parameters:
name (str, optional): Name for the operation (optional, default is None). name (str, optional): Name for the operation (optional, default is None).
...@@ -821,14 +815,14 @@ class Tanhshrink(layers.Layer): ...@@ -821,14 +815,14 @@ class Tanhshrink(layers.Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Tanhshrink() m = paddle.nn.Tanhshrink()
out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
""" """
def __init__(self, name=None): def __init__(self, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册