未验证 提交 2cd10fc4 编写于 作者: Z zhupengyang 提交者: GitHub

fix 2.0 api docs (#28445)

上级 a083c76a
......@@ -9730,15 +9730,13 @@ def swish(x, beta=1.0, name=None):
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.prelu")
@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu")
def prelu(x, mode, param_attr=None, name=None):
"""
:api_attr: Static Graph
Equation:
prelu activation.
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
prelu(x) = max(0, x) + \\alpha * min(0, x)
There are three modes for the activation:
......@@ -9748,34 +9746,28 @@ def prelu(x, mode, param_attr=None, name=None):
channel: Elements in same channel share same alpha.
element: All elements do not share alpha. Each element has its own alpha.
Args:
x (Variable): The input Tensor or LoDTensor with data type float32.
Parameters:
x (Tensor): The input Tensor or LoDTensor with data type float32.
mode (str): The mode for weight sharing.
param_attr(ParamAttr|None): The parameter attribute for the learnable
param_attr (ParamAttr|None, optional): The parameter attribute for the learnable
weight (alpha), it can be create by ParamAttr. None by default.
For detailed information, please refer to :ref:`api_fluid_ParamAttr`.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable:
output(Variable): The tensor or LoDTensor with the same shape as input.
The data type is float32.
Tensor: A tensor with the same shape and data type as x.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
from paddle.fluid.param_attr import ParamAttr
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
output = fluid.layers.prelu(
x,mode,param_attr=ParamAttr(name='alpha'))
x = paddle.to_tensor([-1., 2., 3.])
param = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.2))
out = paddle.static.nn.prelu(x, 'all', param)
# [-0.2, 2., 3.]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'prelu')
......
......@@ -79,9 +79,8 @@ def elu(x, alpha=1.0, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
out = F.elu(x, alpha=0.2)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
......@@ -131,11 +130,14 @@ def gelu(x, approximate=False, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]]))
out1 = F.gelu(x) # [-0.158655 0.345731 0.841345 1.39979]
out2 = F.gelu(x, True) # [-0.158808 0.345714 0.841192 1.39957]
x = paddle.to_tensor([[-1, 0.5], [1, 1.5]])
out1 = F.gelu(x)
# [[-0.15865529, 0.34573123],
# [ 0.84134471, 1.39978933]]
out2 = F.gelu(x, True)
# [[-0.15880799, 0.34571400],
# [ 0.84119201, 1.39957154]]
"""
if in_dygraph_mode():
......@@ -181,11 +183,8 @@ def hardshrink(x, threshold=0.5, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 2.5]))
x = paddle.to_tensor([-1, 0.3, 2.5])
out = F.hardshrink(x) # [-1., 0., 2.5]
"""
......@@ -385,11 +384,8 @@ def leaky_relu(x, negative_slope=0.01, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-2, 0, 1], 'float32'))
x = paddle.to_tensor([-2., 0., 1.])
out = F.leaky_relu(x) # [-0.02, 0., 1.]
"""
......@@ -1147,8 +1143,10 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
.. math::
log\\_softmax[i, j] = log(softmax(x))
= log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
\\begin{aligned}
log\\_softmax[i, j] &= log(softmax(x)) \\\\
&= log(\\frac{\\exp(X[i, j])}{\\sum_j(\\exp(X[i, j])})
\\end{aligned}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
......@@ -1174,16 +1172,13 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = np.array([[[-2.0, 3.0, -4.0, 5.0],
x = [[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]], 'float32')
[6.0, 7.0, 8.0, 9.0]]]
x = paddle.to_tensor(x)
out1 = F.log_softmax(x)
out2 = F.log_softmax(x, dtype='float64')
......
......@@ -70,9 +70,8 @@ class ELU(layers.Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
x = paddle.to_tensor([[-1. ,6.], [1., 15.6]])
m = paddle.nn.ELU(0.2)
out = m(x)
# [[-0.12642411 6. ]
......@@ -166,11 +165,8 @@ class Hardshrink(layers.Layer):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 2.5]))
x = paddle.to_tensor([-1, 0.3, 2.5])
m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5]
"""
......@@ -293,11 +289,10 @@ class Hardtanh(layers.Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
x = paddle.to_tensor([-1.5, 0.3, 2.5])
m = paddle.nn.Hardtanh()
out = m(x) # # [-1., 0.3, 1.]
out = m(x) # [-1., 0.3, 1.]
"""
def __init__(self, min=-1.0, max=1.0, name=None):
......@@ -397,9 +392,8 @@ class ReLU(layers.Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
x = paddle.to_tensor([-2., 0., 1.])
m = paddle.nn.ReLU()
out = m(x) # [0., 0., 1.]
"""
......@@ -613,7 +607,7 @@ class Hardsigmoid(layers.Layer):
import paddle
m = paddle.nn.Sigmoid()
m = paddle.nn.Hardsigmoid()
x = paddle.to_tensor([-4., 5., 1.])
out = m(x) # [0., 1, 0.666667]
"""
......@@ -1016,8 +1010,10 @@ class LogSoftmax(layers.Layer):
.. math::
Out[i, j] = log(softmax(x))
= log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
\\begin{aligned}
Out[i, j] &= log(softmax(x)) \\\\
&= log(\\frac{\\exp(X[i, j])}{\\sum_j(\\exp(X[i, j])})
\\end{aligned}
Parameters:
axis (int, optional): The axis along which to perform log_softmax
......@@ -1035,16 +1031,13 @@ class LogSoftmax(layers.Layer):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = np.array([[[-2.0, 3.0, -4.0, 5.0],
x = [[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]])
[6.0, 7.0, 8.0, 9.0]]]
m = paddle.nn.LogSoftmax()
x = paddle.to_tensor(x)
out = m(x)
......
......@@ -300,9 +300,6 @@ def ones(shape, dtype=None, name=None):
def ones_like(x, dtype=None, name=None):
"""
:alias_main: paddle.ones_like
:alias: paddle.tensor.ones_like, paddle.tensor.creation.ones_like
This OP returns a Tensor filled with the value 1, with the same shape and
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
......@@ -330,11 +327,9 @@ def ones_like(x, dtype=None, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1,2,3])
out1 = paddle.zeros_like(x) # [1., 1., 1.]
out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1]
out1 = paddle.ones_like(x) # [1., 1., 1.]
out2 = paddle.ones_like(x, dtype='int32') # [1, 1, 1]
"""
return full_like(x=x, fill_value=1, dtype=dtype, name=name)
......@@ -380,9 +375,6 @@ def zeros(shape, dtype=None, name=None):
def zeros_like(x, dtype=None, name=None):
"""
:alias_main: paddle.zeros_like
:alias: paddle.tensor.zeros_like, paddle.tensor.creation.zeros_like
This OP returns a Tensor filled with the value 0, with the same shape and
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
......@@ -410,9 +402,7 @@ def zeros_like(x, dtype=None, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1,2,3])
x = paddle.to_tensor([1, 2, 3])
out1 = paddle.zeros_like(x) # [0., 0., 0.]
out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0]
......@@ -519,9 +509,6 @@ def full(shape, fill_value, dtype=None, name=None):
def arange(start=0, end=None, step=1, dtype=None, name=None):
"""
:alias_main: paddle.arange
:alias: paddle.tensor.arange, paddle.tensor.creation.arange
This OP returns a 1-D Tensor with spaced values within a given interval.
Values are generated into the half-open interval [``start``, ``end``) with
......@@ -558,14 +545,11 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
Raises:
TypeError: If ``dtype`` is not int32, int64, float32, float64.
examples:
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
out1 = paddle.arange(5)
# [0, 1, 2, 3, 4]
......
......@@ -252,16 +252,14 @@ def standard_normal(shape, dtype=None, name=None):
import paddle
paddle.disable_static()
# example 1: attr shape is a list which doesn't contain Tensor.
out1 = paddle.standard_normal(shape=[2, 3])
# [[-2.923464 , 0.11934398, -0.51249987], # random
# [ 0.39632758, 0.08177969, 0.2692008 ]] # random
# example 2: attr shape is a list which contains Tensor.
dim1 = paddle.full([1], 2, "int64")
dim2 = paddle.full([1], 3, "int32")
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.standard_normal(shape=[dim1, dim2, 2])
# [[[-2.8852394 , -0.25898588], # random
# [-0.47420555, 0.17683524], # random
......@@ -272,8 +270,7 @@ def standard_normal(shape, dtype=None, name=None):
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.standard_normal(shape_tensor)
out3 = paddle.standard_normal(shape_tensor)
# [[-2.878077 , 0.17099959, 0.05111201] # random
# [-0.3761474, -1.044801 , 1.1870178 ]] # random
......@@ -281,7 +278,58 @@ def standard_normal(shape, dtype=None, name=None):
return gaussian(shape=shape, mean=0.0, std=1.0, dtype=dtype, name=name)
randn = standard_normal
def randn(shape, dtype=None, name=None):
"""
This OP returns a Tensor filled with random values sampled from a standard
normal distribution with mean 0 and standard deviation 1, with ``shape``
and ``dtype``.
Args:
shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype (str|np.dtype, optional): The data type of the output Tensor.
Supported data types: float32, float64.
Default is None, use global default dtype (see ``get_default_dtype``
for details).
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a standard
normal distribution with mean 0 and standard deviation 1, with
``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle
# example 1: attr shape is a list which doesn't contain Tensor.
out1 = paddle.randn(shape=[2, 3])
# [[-2.923464 , 0.11934398, -0.51249987], # random
# [ 0.39632758, 0.08177969, 0.2692008 ]] # random
# example 2: attr shape is a list which contains Tensor.
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.randn(shape=[dim1, dim2, 2])
# [[[-2.8852394 , -0.25898588], # random
# [-0.47420555, 0.17683524], # random
# [-0.7989969 , 0.00754541]], # random
# [[ 0.85201347, 0.32320443], # random
# [ 1.1399018 , 0.48336947], # random
# [ 0.8086993 , 0.6868893 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
out3 = paddle.randn(shape_tensor)
# [[-2.878077 , 0.17099959, 0.05111201] # random
# [-0.3761474, -1.044801 , 1.1870178 ]] # random
"""
return standard_normal(shape, dtype, name)
def normal(mean=0.0, std=1.0, shape=None, name=None):
......@@ -322,8 +370,6 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
import paddle
paddle.disable_static()
out1 = paddle.normal(shape=[2, 3])
# [[ 0.17501129 0.32364586 1.561118 ] # random
# [-1.7232178 1.1545963 -0.76156676]] # random
......@@ -381,7 +427,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
Examples:
::
.. code-block:: text
Input:
shape = [1, 2]
......@@ -423,33 +469,27 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
import paddle
paddle.disable_static()
# example 1:
# attr shape is a list which doesn't contain Tensor.
result_1 = paddle.tensor.random.uniform(shape=[3, 4])
# [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357],
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249],
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]]
out1 = paddle.uniform(shape=[3, 4])
# [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], # random
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
# example 2:
# attr shape is a list which contains Tensor.
dim_1 = paddle.full([1], 2, "int64")
dim_2 = paddle.full([1], 3, "int32")
result_2 = paddle.tensor.random.uniform(shape=[dim_1, dim_2])
# [[-0.9951253, 0.30757582, 0.9899647 ],
# [ 0.5864527, 0.6607096, -0.8886161 ]]
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.uniform(shape=[dim1, dim2])
# [[-0.9951253, 0.30757582, 0.9899647 ], # random
# [ 0.5864527, 0.6607096, -0.8886161]] # random
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.tensor.random.uniform(shape_tensor)
# if shape_tensor's value is [2, 3]
# result_3 is:
# [[-0.8517412, -0.4006908, 0.2551912 ],
# [ 0.3364414, 0.36278176, -0.16085452]]
out3 = paddle.uniform(shape_tensor)
# [[-0.8517412, -0.4006908, 0.2551912 ], # random
# [ 0.3364414, 0.36278176, -0.16085452]] # random
"""
if dtype is None:
dtype = paddle.framework.get_default_dtype()
......@@ -517,8 +557,6 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
import paddle
paddle.disable_static()
# example 1:
# attr shape is a list which doesn't contain Tensor.
out1 = paddle.randint(low=-5, high=5, shape=[3])
......@@ -526,18 +564,16 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
# example 2:
# attr shape is a list which contains Tensor.
dim1 = paddle.full([1], 2, "int64")
dim2 = paddle.full([1], 3, "int32")
out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2], dtype="int32")
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2])
# [[0, -1, -3], # random
# [4, -2, 0]] # random
# example 3:
# attr shape is a Tensor
shape_tensor = paddle.to_tensor(3)
result_3 = paddle.randint(low=-5, high=5, shape=shape_tensor)
out3 = paddle.randint(low=-5, high=5, shape=shape_tensor)
# [-2, 2, 3] # random
# example 4:
......@@ -611,8 +647,6 @@ def randperm(n, dtype="int64", name=None):
import paddle
paddle.disable_static()
out1 = paddle.randperm(5)
# [4, 1, 2, 3, 0] # random
......@@ -668,15 +702,14 @@ def rand(shape, dtype=None, name=None):
import paddle
paddle.disable_static()
# example 1: attr shape is a list which doesn't contain Tensor.
out1 = paddle.rand(shape=[2, 3])
# [[0.451152 , 0.55825245, 0.403311 ], # random
# [0.22550228, 0.22106001, 0.7877319 ]] # random
# example 2: attr shape is a list which contains Tensor.
dim1 = paddle.full([1], 2, "int64")
dim2 = paddle.full([1], 3, "int32")
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.rand(shape=[dim1, dim2, 2])
# [[[0.8879919 , 0.25788337], # random
# [0.28826773, 0.9712097 ], # random
......@@ -687,8 +720,7 @@ def rand(shape, dtype=None, name=None):
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.rand(shape_tensor)
out3 = paddle.rand(shape_tensor)
# [[0.22920267, 0.841956 , 0.05981819], # random
# [0.4836288 , 0.24573246, 0.7516129 ]] # random
......
......@@ -56,17 +56,13 @@ def mean(x, axis=None, keepdim=False, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = np.array([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]], 'float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.]],
[[13., 14., 15., 16.],
[17., 18., 19., 20.],
[21., 22., 23., 24.]]])
out1 = paddle.mean(x)
# [12.5]
out2 = paddle.mean(x, axis=-1)
......@@ -145,12 +141,8 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
x = paddle.to_tensor(x)
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.var(x)
# [2.66666667]
out2 = paddle.var(x, axis=1)
......@@ -208,12 +200,8 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
x = paddle.to_tensor(x)
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.std(x)
# [1.63299316]
out2 = paddle.std(x, axis=1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册