未验证 提交 a4303496 编写于 作者: Q Qi Li 提交者: GitHub

[DOC] activation api doc, test=document_fix (#28405)

上级 23439b16
......@@ -61,7 +61,7 @@ from .layer.activation import SELU #DEFINE_ALIAS
from .layer.activation import LeakyReLU #DEFINE_ALIAS
from .layer.activation import Sigmoid #DEFINE_ALIAS
from .layer.activation import Hardsigmoid #DEFINE_ALIAS
from .layer.activation import LogSigmoid
from .layer.activation import LogSigmoid #DEFINE_ALIAS
from .layer.activation import Softmax #DEFINE_ALIAS
from .layer.activation import Softplus #DEFINE_ALIAS
from .layer.activation import Softshrink #DEFINE_ALIAS
......
......@@ -81,8 +81,6 @@ def elu(x, alpha=1.0, name=None):
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
out = F.elu(x, alpha=0.2)
# [[-0.12642411 6. ]
......@@ -135,8 +133,6 @@ def gelu(x, approximate=False, name=None):
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]]))
out1 = F.gelu(x) # [-0.158655 0.345731 0.841345 1.39979]
out2 = F.gelu(x, True) # [-0.158808 0.345714 0.841192 1.39957]
......@@ -237,8 +233,6 @@ def hardtanh(x, min=-1.0, max=1.0, name=None):
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
out = F.hardtanh(x) # [-1., 0.3, 1.]
"""
......@@ -439,8 +433,6 @@ def prelu(x, weight, name=None):
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
data = np.array([[[[-2.0, 3.0, -4.0, 5.0],
[ 3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
......@@ -512,8 +504,6 @@ def relu(x, name=None):
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
out = F.relu(x) # [0., 0., 1.]
"""
......@@ -550,8 +540,6 @@ def log_sigmoid(x, name=None):
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
"""
......@@ -823,12 +811,7 @@ def softmax(x, axis=-1, dtype=None, name=None):
calculations. It should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data
type of the output tensor. If dtype is specified, ``x`` is casted
to ``dtype`` before the operation is performed. This is useful for
preventing data type overflows. Supported dtype: float32, float64.
If ``dtype`` is None, the output Tensor has the same dtype as x.
Default is None.
dtype (str, optional): The data type of the output tensor, can be float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
......@@ -843,8 +826,6 @@ def softmax(x, axis=-1, dtype=None, name=None):
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = np.array([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
......
......@@ -35,11 +35,11 @@ from .rnn import *
from .vision import *
from .transformer import *
# from .activation import PReLU #DEFINE_ALIAS
from .activation import PReLU #DEFINE_ALIAS
from .activation import ReLU #DEFINE_ALIAS
from .activation import LeakyReLU #DEFINE_ALIAS
from .activation import Sigmoid #DEFINE_ALIAS
# from .activation import Softmax #DEFINE_ALIAS
from .activation import Softmax #DEFINE_ALIAS
from .activation import LogSoftmax #DEFINE_ALIAS
from .common import BilinearTensorProduct #DEFINE_ALIAS
from .common import Bilinear #DEFINE_ALIAS
......
......@@ -72,8 +72,6 @@ class ELU(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
m = paddle.nn.ELU(0.2)
out = m(x)
......@@ -121,8 +119,6 @@ class GELU(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]]))
m = paddle.nn.GELU()
......@@ -301,8 +297,6 @@ class Hardtanh(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
m = paddle.nn.Hardtanh()
out = m(x) # # [-1., 0.3, 1.]
......@@ -333,7 +327,7 @@ class PReLU(layers.Layer):
Default is 1.
init (float, optional): Init value of learnable `weight`. Default is 0.25.
weight_attr(ParamAttr, optional): The parameter attribute for the learnable `weight`.
Default is None. For more information, please refer to :ref:`api_fluid_ParamAttr`.
Default is None. For more information, please refer to :ref:`api_paddle_ParamAttr`.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
......@@ -347,7 +341,6 @@ class PReLU(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
paddle.set_default_dtype("float64")
data = np.array([[[[-2.0, 3.0, -4.0, 5.0],
......@@ -408,8 +401,6 @@ class ReLU(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
m = paddle.nn.ReLU()
out = m(x) # [0., 0., 1.]
......@@ -885,8 +876,6 @@ class LogSigmoid(layers.Layer):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
m = paddle.nn.LogSigmoid()
out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
......@@ -983,12 +972,6 @@ class Softmax(layers.Layer):
calculations. It should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data
type of the output tensor. If dtype is specified, ``x`` is casted
to ``dtype`` before the operation is performed. This is useful for
preventing data type overflows. Supported dtype: float32, float64.
If ``dtype`` is None, the output Tensor has the same dtype as x.
Default is None.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
......@@ -1002,8 +985,6 @@ class Softmax(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
x = np.array([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册