diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index e53ba753a9bda79cbdcbcd8d92d05833b9643454..51b2e2072791ed82f6605d57d056c8dc0a491f28 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -61,7 +61,7 @@ from .layer.activation import SELU #DEFINE_ALIAS from .layer.activation import LeakyReLU #DEFINE_ALIAS from .layer.activation import Sigmoid #DEFINE_ALIAS from .layer.activation import Hardsigmoid #DEFINE_ALIAS -from .layer.activation import LogSigmoid +from .layer.activation import LogSigmoid #DEFINE_ALIAS from .layer.activation import Softmax #DEFINE_ALIAS from .layer.activation import Softplus #DEFINE_ALIAS from .layer.activation import Softshrink #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 0f79aa012ca325a4a56ebe89511b074d6814df31..fd86c2e9fa760dd3929c5b157090917bb22ef507 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -81,8 +81,6 @@ def elu(x, alpha=1.0, name=None): import paddle.nn.functional as F import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) out = F.elu(x, alpha=0.2) # [[-0.12642411 6. ] @@ -135,8 +133,6 @@ def gelu(x, approximate=False, name=None): import paddle.nn.functional as F import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]])) out1 = F.gelu(x) # [-0.158655 0.345731 0.841345 1.39979] out2 = F.gelu(x, True) # [-0.158808 0.345714 0.841192 1.39957] @@ -237,8 +233,6 @@ def hardtanh(x, min=-1.0, max=1.0, name=None): import paddle.nn.functional as F import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) out = F.hardtanh(x) # [-1., 0.3, 1.] """ @@ -439,8 +433,6 @@ def prelu(x, weight, name=None): import paddle.nn.functional as F import numpy as np - paddle.disable_static() - data = np.array([[[[-2.0, 3.0, -4.0, 5.0], [ 3.0, -4.0, 5.0, -6.0], [-7.0, -8.0, 8.0, 9.0]], @@ -512,8 +504,6 @@ def relu(x, name=None): import paddle.nn.functional as F import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) out = F.relu(x) # [0., 0., 1.] """ @@ -550,8 +540,6 @@ def log_sigmoid(x, name=None): import paddle import paddle.nn.functional as F - paddle.disable_static() - x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] """ @@ -823,12 +811,7 @@ def softmax(x, axis=-1, dtype=None, name=None): calculations. It should be in range [-D, D), where D is the dimensions of ``x`` . If ``axis`` < 0, it works the same way as :math:`axis + D` . Default is -1. - dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data - type of the output tensor. If dtype is specified, ``x`` is casted - to ``dtype`` before the operation is performed. This is useful for - preventing data type overflows. Supported dtype: float32, float64. - If ``dtype`` is None, the output Tensor has the same dtype as x. - Default is None. + dtype (str, optional): The data type of the output tensor, can be float32, float64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -843,8 +826,6 @@ def softmax(x, axis=-1, dtype=None, name=None): import paddle.nn.functional as F import numpy as np - paddle.disable_static() - x = np.array([[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 8.0, 9.0]], diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index 801290e99572be628ae5fa878f8ac8076f89b26a..4e68fcab3fda8cb5a20d3df0a2d5d3a9d27c0228 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -35,11 +35,11 @@ from .rnn import * from .vision import * from .transformer import * -# from .activation import PReLU #DEFINE_ALIAS +from .activation import PReLU #DEFINE_ALIAS from .activation import ReLU #DEFINE_ALIAS from .activation import LeakyReLU #DEFINE_ALIAS from .activation import Sigmoid #DEFINE_ALIAS -# from .activation import Softmax #DEFINE_ALIAS +from .activation import Softmax #DEFINE_ALIAS from .activation import LogSoftmax #DEFINE_ALIAS from .common import BilinearTensorProduct #DEFINE_ALIAS from .common import Bilinear #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index dbb9d00f365cfa6c80ec95d43d00e77ffe5874ee..b0a1b27855a80b71026d3ebc38575361d187c1aa 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -72,8 +72,6 @@ class ELU(layers.Layer): import paddle import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) m = paddle.nn.ELU(0.2) out = m(x) @@ -121,8 +119,6 @@ class GELU(layers.Layer): import paddle import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]])) m = paddle.nn.GELU() @@ -301,8 +297,6 @@ class Hardtanh(layers.Layer): import paddle import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) m = paddle.nn.Hardtanh() out = m(x) # # [-1., 0.3, 1.] @@ -333,7 +327,7 @@ class PReLU(layers.Layer): Default is 1. init (float, optional): Init value of learnable `weight`. Default is 0.25. weight_attr(ParamAttr, optional): The parameter attribute for the learnable `weight`. - Default is None. For more information, please refer to :ref:`api_fluid_ParamAttr`. + Default is None. For more information, please refer to :ref:`api_paddle_ParamAttr`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -347,7 +341,6 @@ class PReLU(layers.Layer): import paddle import numpy as np - paddle.disable_static() paddle.set_default_dtype("float64") data = np.array([[[[-2.0, 3.0, -4.0, 5.0], @@ -408,8 +401,6 @@ class ReLU(layers.Layer): import paddle import numpy as np - paddle.disable_static() - x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) m = paddle.nn.ReLU() out = m(x) # [0., 0., 1.] @@ -885,8 +876,6 @@ class LogSigmoid(layers.Layer): import paddle - paddle.disable_static() - x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) m = paddle.nn.LogSigmoid() out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] @@ -983,12 +972,6 @@ class Softmax(layers.Layer): calculations. It should be in range [-D, D), where D is the dimensions of ``x`` . If ``axis`` < 0, it works the same way as :math:`axis + D` . Default is -1. - dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data - type of the output tensor. If dtype is specified, ``x`` is casted - to ``dtype`` before the operation is performed. This is useful for - preventing data type overflows. Supported dtype: float32, float64. - If ``dtype`` is None, the output Tensor has the same dtype as x. - Default is None. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -1002,8 +985,6 @@ class Softmax(layers.Layer): import paddle import numpy as np - paddle.disable_static() - x = np.array([[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 8.0, 9.0]],