From c11c83fb3aac5be3cf2d7f4e1e3b1b25c13e8b79 Mon Sep 17 00:00:00 2001 From: hong19860320 <9973393+hong19860320@users.noreply.github.com> Date: Sat, 22 Aug 2020 17:09:22 +0800 Subject: [PATCH] Add the parameter checking for softplus and fix the doc string (#26530) --- .../tests/unittests/test_activation_op.py | 3 + python/paddle/nn/functional/activation.py | 126 ++++++++---------- python/paddle/nn/layer/activation.py | 106 +++++++-------- 3 files changed, 111 insertions(+), 124 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 533f1081cd5..ab61a5b3cfc 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -717,6 +717,9 @@ class TestSoftshrinkAPI(unittest.TestCase): # The input dtype must be float16, float32, float64. x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.softshrink, x_int32) + # The threshold must be no less than zero + x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32') + self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0) # support the input dtype is float16 x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') F.softshrink(x_fp16) diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index a3abca8417d..bd975106bdf 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -225,7 +225,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None): x, \\text{otherwise} \\end{cases} - Args: + Parameters: x (Tensor): The input Tensor with data type float32, float64. min (float, optional): The minimum value of the linear region range. Default is -1. max (float, optional): The maximum value of the linear region range. Default is 1. @@ -598,9 +598,9 @@ def relu6(x, name=None): .. math:: - \text{relu6}(x) = \min(\max(0,x), 6) + relu6(x) = min(max(0,x), 6) - Args: + Parameters: x (Tensor): The input Tensor with data type float32, float64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -609,18 +609,16 @@ def relu6(x, name=None): A Tensor with the same data type and shape as ``x`` . Examples: - .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() + import paddle + import paddle.nn.functional as F + import numpy as np - x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) - out = F.relu6(x) # [0, 0.3, 6] + paddle.disable_static() + x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) + out = F.relu6(x) # [0, 0.3, 6] """ threshold = 6.0 if in_dygraph_mode(): @@ -646,11 +644,9 @@ def selu(x, .. math:: - \text{selu}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))), \\ - with\,alpha=1.6732632423543772848170429916717 and \\ - scale=1.0507009873554804934193349852946 + selu(x) = scale * (max(0,x) + min(0, alpha * (e^{x} - 1))) - Args: + Parameters: x (Tensor): The input Tensor with data type float32, float64. scale (float, optional): The value of scale for selu. Default is 1.0507009873554804934193349852946 alpha (float, optional): The value of alpha for selu. Default is 1.6732632423543772848170429916717 @@ -661,18 +657,16 @@ def selu(x, A Tensor with the same data type and shape as ``x`` . Examples: - .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() + import paddle + import paddle.nn.functional as F + import numpy as np - x = paddle.to_tensor(np.array([[0, 1],[2, 3]])) - out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]] + paddle.disable_static() + x = paddle.to_tensor(np.array([[0, 1],[2, 3]])) + out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]] """ if in_dygraph_mode(): return core.ops.selu(x, 'scale', scale, 'alpha', alpha) @@ -856,10 +850,10 @@ def softplus(x, beta=1, threshold=20, name=None): .. math:: - \text{softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\ - \text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold. + softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\ + \\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.} - Args: + Parameters: x (Tensor): The input Tensor with data type float32, float64. beta (float, optional): The value of beta for softplus. Default is 1 threshold (float, optional): The value of threshold for softplus. Default is 20 @@ -870,18 +864,16 @@ def softplus(x, beta=1, threshold=20, name=None): A Tensor with the same data type and shape as ``x`` . Examples: - .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() + import paddle + import paddle.nn.functional as F + import numpy as np - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) - out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] + paddle.disable_static() + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] """ if in_dygraph_mode(): return core.ops.softplus(x, 'beta', beta, 'threshold', threshold) @@ -905,14 +897,13 @@ def softshrink(x, threshold=0.5, name=None): .. math:: - \text{softshrink}(x) = - \begin{cases} - x - threshold, & \text{ if } x > threshold \\ - x + threshold, & \text{ if } x < -threshold \\ - 0, & \text{ otherwise } - \end{cases} + softshrink(x)= \\begin{cases} + x - threshold, \\text{if } x > threshold \\\\ + x + threshold, \\text{if } x < -threshold \\\\ + 0, \\text{otherwise} + \\end{cases} - Args: + Parameters: x (Tensor): The input Tensor with data type float32, float64. threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5 name (str, optional): Name for the operation (optional, default is None). @@ -922,19 +913,22 @@ def softshrink(x, threshold=0.5, name=None): A Tensor with the same data type and shape as ``x`` . Examples: - .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() + import paddle + import paddle.nn.functional as F + import numpy as np - x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) - out = F.softshrink(x) # [-0.4, 0, 0, 0.3] + paddle.disable_static() + x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) + out = F.softshrink(x) # [-0.4, 0, 0, 0.3] """ + if threshold < 0: + raise ValueError( + "The threshold must be no less than zero. Received: {}.".format( + threshold)) + if in_dygraph_mode(): return core.ops.softshrink(x, 'lambda', threshold) @@ -956,9 +950,9 @@ def softsign(x, name=None): .. math:: - \text{softsign}(x) = \frac{x}{1 + |x|} + softsign(x) = \\frac{x}{1 + |x|} - Args: + Parameters: x (Tensor): The input Tensor with data type float32, float64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -967,18 +961,16 @@ def softsign(x, name=None): A Tensor with the same data type and shape as ``x`` . Examples: - .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() + import paddle + import paddle.nn.functional as F + import numpy as np - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) - out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] + paddle.disable_static() + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] """ if in_dygraph_mode(): return core.ops.softsign(x) @@ -997,7 +989,7 @@ def tanhshrink(x, name=None): .. math:: - \text{tanhshrink}(x) = x - \text{tanh}(x) + tanhshrink(x) = x - tanh(x) Args: x (Tensor): The input Tensor with data type float32, float64. @@ -1008,18 +1000,16 @@ def tanhshrink(x, name=None): A Tensor with the same data type and shape as ``x`` . Examples: - .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() + import paddle + import paddle.nn.functional as F + import numpy as np - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) - out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] + paddle.disable_static() + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] """ if in_dygraph_mode(): return core.ops.tanh_shrink(x) diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index dcf037a38d7..6373b058832 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -513,7 +513,7 @@ class ReLU6(layers.Layer): .. math:: - \text{ReLU6}(x) = \min(\max(0,x), 6) + ReLU6(x) = min(max(0,x), 6) Parameters: name (str, optional): Name for the operation (optional, default is None). @@ -524,17 +524,16 @@ class ReLU6(layers.Layer): - output: Tensor with the same shape as input. Examples: - .. code-block:: python - import paddle - import numpy as np + import paddle + import numpy as np - paddle.disable_static() + paddle.disable_static() - x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) - m = paddle.nn.ReLU6() - out = m(x) # [0, 0.3, 6] + x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) + m = paddle.nn.ReLU6() + out = m(x) # [0, 0.3, 6] """ def __init__(self, name=None): @@ -551,9 +550,7 @@ class SELU(layers.Layer): .. math:: - \text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))), \\ - with\,alpha=1.6732632423543772848170429916717 and \\ - scale=1.0507009873554804934193349852946 + SELU(x) = scale * (max(0,x) + min(0, alpha * (e^{x} - 1))) Parameters: scale (float, optional): The value of scale for SELU. Default is 1.0507009873554804934193349852946 @@ -566,17 +563,16 @@ class SELU(layers.Layer): - output: Tensor with the same shape as input. Examples: - .. code-block:: python - import paddle - import numpy as np + import paddle + import numpy as np - paddle.disable_static() + paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 1],[2, 3]])) - m = paddle.nn.SELU() - out = m(x) # [[0, 1.050701],[2.101402, 3.152103]] + x = paddle.to_tensor(np.array([[0, 1],[2, 3]])) + m = paddle.nn.SELU() + out = m(x) # [[0, 1.050701],[2.101402, 3.152103]] """ def __init__(self, @@ -684,10 +680,12 @@ class Softplus(layers.Layer): .. math:: - \text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\ - \text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold. + Softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\ + \\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.} Parameters: + beta (float, optional): The value of beta for Softplus. Default is 1 + threshold (float, optional): The value of threshold for Softplus. Default is 20 name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -696,18 +694,16 @@ class Softplus(layers.Layer): - output: Tensor with the same shape as input. Examples: - .. code-block:: python - import paddle - import numpy as np - - paddle.disable_static() + import paddle + import numpy as np - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) - m = paddle.nn.Softplus() - out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355] + paddle.disable_static() + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Softplus() + out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355] """ def __init__(self, beta=1, threshold=20, name=None): @@ -726,14 +722,14 @@ class Softshrink(layers.Layer): .. math:: - \text{Softshrink}(x) = - \begin{cases} - x - threshold, & \text{ if } x > threshold \\ - x + threshold, & \text{ if } x < -threshold \\ - 0, & \text{ otherwise } - \end{cases} + Softshrink(x)= \\begin{cases} + x - threshold, \\text{if } x > threshold \\\\ + x + threshold, \\text{if } x < -threshold \\\\ + 0, \\text{otherwise} + \\end{cases} Parameters: + threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5 name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -742,17 +738,16 @@ class Softshrink(layers.Layer): - output: Tensor with the same shape as input. Examples: - .. code-block:: python - import paddle - import numpy as np + import paddle + import numpy as np - paddle.disable_static() + paddle.disable_static() - x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) - m = paddle.nn.Softshrink() - out = m(x) # [-0.4, 0, 0, 0.3] + x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) + m = paddle.nn.Softshrink() + out = m(x) # [-0.4, 0, 0, 0.3] """ def __init__(self, threshold=0.5, name=None): @@ -770,7 +765,7 @@ class Softsign(layers.Layer): .. math:: - \text{Softsign}(x) = \frac{x}{1 + |x|} + Softsign(x) = \\frac{x}{1 + |x|} Parameters: name (str, optional): Name for the operation (optional, default is None). @@ -781,17 +776,16 @@ class Softsign(layers.Layer): - output: Tensor with the same shape as input. Examples: - .. code-block:: python - import paddle - import numpy as np + import paddle + import numpy as np - paddle.disable_static() + paddle.disable_static() - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) - m = paddle.nn.Softsign() - out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Softsign() + out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] """ def __init__(self, name=None): @@ -808,7 +802,7 @@ class Tanhshrink(layers.Layer): .. math:: - \text{Tanhshrink}(x) = x - \text{Tanh}(x) + Tanhshrink(x) = x - tanh(x) Parameters: name (str, optional): Name for the operation (optional, default is None). @@ -821,14 +815,14 @@ class Tanhshrink(layers.Layer): Examples: .. code-block:: python - import paddle - import numpy as np + import paddle + import numpy as np - paddle.disable_static() + paddle.disable_static() - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) - m = paddle.nn.Tanhshrink() - out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Tanhshrink() + out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] """ def __init__(self, name=None): -- GitLab