未验证 提交 3c2a46bd 编写于 作者: 徐铭远 提交者: GitHub

fix doc of erf,rank,mm,cross_entropy,pixel_shuffle,kron... (#29126)

* fix doc example, test=develop, test=document_fix
上级 d576d6dd
...@@ -11253,25 +11253,24 @@ def shape(input): ...@@ -11253,25 +11253,24 @@ def shape(input):
def rank(input): def rank(input):
""" """
:alias_main: paddle.rank
:alias: paddle.rank,paddle.tensor.rank,paddle.tensor.attribute.rank
:old_api: paddle.fluid.layers.rank
The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor. The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Args: Args:
input (Variable): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary. input (Tensor): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns: Returns:
Variable, the output data type is int32.: The 0-D tensor with the dimensions of the input variable. Tensor, the output data type is int32.: The 0-D tensor with the dimensions of the input Tensor.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
input = fluid.data(name="input", shape=[3, 100, 100], dtype="float32") input = paddle.rand((3, 100, 100))
rank = fluid.layers.rank(input) # rank=(3,) rank = paddle.rank(input)
print(rank)
# 3
""" """
check_type(input, 'input', (Variable), 'input') check_type(input, 'input', (Variable), 'input')
ndims = len(input.shape) ndims = len(input.shape)
......
...@@ -160,7 +160,9 @@ Examples: ...@@ -160,7 +160,9 @@ Examples:
import paddle.nn.functional as F import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] out = F.tanhshrink(x)
print(out)
# [-0.020051, -0.00262468, 0.000332005, 0.00868739]
""") """)
...@@ -185,6 +187,7 @@ Examples: ...@@ -185,6 +187,7 @@ Examples:
x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4]) x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
out = paddle.rsqrt(x) out = paddle.rsqrt(x)
print(out)
# [3.16227766 2.23606798 1.82574186 1.58113883] # [3.16227766 2.23606798 1.82574186 1.58113883]
""") """)
...@@ -353,7 +356,9 @@ Examples: ...@@ -353,7 +356,9 @@ Examples:
import paddle.nn.functional as F import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] out = F.softplus(x)
print(out)
# [0.513015, 0.598139, 0.744397, 0.854355]
""") """)
...@@ -365,7 +370,9 @@ Examples: ...@@ -365,7 +370,9 @@ Examples:
import paddle.nn.functional as F import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] out = F.softsign(x)
print(out)
# [-0.285714, -0.166667, 0.0909091, 0.230769]
""") """)
......
...@@ -1184,23 +1184,22 @@ def cross_entropy(input, ...@@ -1184,23 +1184,22 @@ def cross_entropy(input,
Returns: Returns:
The tensor variable storing the cross_entropy_loss of input and label. Tensor.The tensor storing the cross_entropy_loss of input and label.
Return type: Variable.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F input_data = np.random.random([5, 100]).astype("float64")
import numpy as np label_data = np.random.randint(0, 100, size=(5)).astype(np.int64)
input_np = np.random.random([2, 4]).astype(np.float64) weight_data = np.random.random([100]).astype("float64")
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64) input = paddle.to_tensor(input_data)
weight_np = np.random.random([4]).astype(np.float64) #shape:C label = paddle.to_tensor(label_data)
output = F.softmax_cross_entropy( weight = paddle.to_tensor(weight_data)
paddle.to_tensor(input_np), loss = paddle.nn.functional.cross_entropy(input=input, label=label, weight=weight)
paddle.to_tensor(label_np), print(loss)
weight=paddle.to_tensor(weight_np)) # [4.28546723]
print(output.numpy()) #[1.30719427]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
......
...@@ -534,6 +534,7 @@ def max_pool1d(x, ...@@ -534,6 +534,7 @@ def max_pool1d(x,
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
...@@ -655,6 +656,7 @@ def max_pool2d(x, ...@@ -655,6 +656,7 @@ def max_pool2d(x,
ShapeError: If the output's shape calculated is not greater than 0. ShapeError: If the output's shape calculated is not greater than 0.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
...@@ -784,6 +786,7 @@ def max_pool3d(x, ...@@ -784,6 +786,7 @@ def max_pool3d(x,
ShapeError: If the output's shape calculated is not greater than 0. ShapeError: If the output's shape calculated is not greater than 0.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
......
...@@ -341,15 +341,14 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): ...@@ -341,15 +341,14 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
ValueError: If the square of upscale_factor cannot divide the channels of input. ValueError: If the square of upscale_factor cannot divide the channels of input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
x = np.random.randn(2, 9, 4, 4).astype(np.float32) x = np.random.randn(2, 9, 4, 4).astype(np.float32)
paddle.disable_static()
x_var = paddle.to_tensor(x) x_var = paddle.to_tensor(x)
out_var = F.pixel_shuffle(x_var, 3) out_var = F.pixel_shuffle(x_var, 3)
out = out_var.numpy() out = out_var.numpy()
print(out.shape)
# (2, 1, 12, 12) # (2, 1, 12, 12)
""" """
if not in_dygraph_mode(): if not in_dygraph_mode():
......
...@@ -177,15 +177,15 @@ class CrossEntropyLoss(fluid.dygraph.Layer): ...@@ -177,15 +177,15 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
Parameters: Parameters:
input (Variable): Input tensor, the data type is float32, float64. Shape is input (Tensor): Input tensor, the data type is float32, float64. Shape is
(N, C), where C is number of classes, and if shape is more than 2D, this (N, C), where C is number of classes, and if shape is more than 2D, this
is (N, C, D1, D2,..., Dk), k >= 1. is (N, C, D1, D2,..., Dk), k >= 1.
label (Variable): Label tensor, the data type is int64. Shape is (N), where each label (Tensor): Label tensor, the data type is int64. Shape is (N), where each
value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is
(N, D1, D2,..., Dk), k >= 1. (N, D1, D2,..., Dk), k >= 1.
weight (Variable, optional): Weight tensor, a manual rescaling weight for each weight (Tensor, optional): Weight tensor, a manual rescaling weight given
sample relative to each class. It has the same shape as label. to each class and the shape is (C). It has the same dimensions as class
and the data type is float32, float64. Default is ``'None'``. number and the data type is float32, float64. Default is ``'None'``.
reduction (str, optional): Indicate how to average the loss by batch_size, reduction (str, optional): Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
...@@ -202,24 +202,24 @@ class CrossEntropyLoss(fluid.dygraph.Layer): ...@@ -202,24 +202,24 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
Returns: Returns:
The tensor variable storing the cross_entropy_loss of input and label. Tensor. The tensor storing the cross_entropy_loss of input and label.
Return type: Variable.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2, 1)).astype(np.int64) input_data = paddle.uniform([5, 100], dtype="float64")
weight_np = np.random.random([4]).astype(np.float64) #shape:C label_data = np.random.randint(0, 100, size=(5)).astype(np.int64)
weight_ce = weight_np[label_np] #shape:N,1 weight_data = np.random.random([100]).astype("float64")
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( input = paddle.to_tensor(input_data)
weight=paddle.to_tensor(weight_ce)) label = paddle.to_tensor(label_data)
output = cross_entropy_loss( weight = paddle.to_tensor(weight_data)
paddle.to_tensor(input_np), ce_loss = paddle.nn.CrossEntropyLoss(weight=weight, reduction='mean')
paddle.to_tensor(label_np)) output = ce_loss(input, label)
print(output.numpy()) #[1.44375251] print(output)
# [4.84496039]
""" """
def __init__(self, def __init__(self,
...@@ -861,7 +861,9 @@ class MarginRankingLoss(fluid.dygraph.Layer): ...@@ -861,7 +861,9 @@ class MarginRankingLoss(fluid.dygraph.Layer):
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32") label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss() margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label) loss = margin_rank_loss(input, other, label)
print(loss) # [0.75]
print(loss)
# [0.75]
""" """
def __init__(self, margin=0.0, reduction='mean', name=None): def __init__(self, margin=0.0, reduction='mean', name=None):
......
...@@ -920,10 +920,15 @@ class AdaptiveMaxPool2D(layers.Layer): ...@@ -920,10 +920,15 @@ class AdaptiveMaxPool2D(layers.Layer):
.. math:: .. math::
hstart &= floor(i * H_{in} / H_{out}) hstart &= floor(i * H_{in} / H_{out})
hend &= ceil((i + 1) * H_{in} / H_{out}) hend &= ceil((i + 1) * H_{in} / H_{out})
wstart &= floor(j * W_{in} / W_{out}) wstart &= floor(j * W_{in} / W_{out})
wend &= ceil((j + 1) * W_{in} / W_{out}) wend &= ceil((j + 1) * W_{in} / W_{out})
Output(i ,j) &= max(Input[hstart:hend, wstart:wend]) Output(i ,j) &= max(Input[hstart:hend, wstart:wend])
Parameters: Parameters:
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two element, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input. output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two element, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input.
return_mask (bool): If true, the index of max pooling point will be returned along with outputs. It cannot be set in average pooling type. Default False. return_mask (bool): If true, the index of max pooling point will be returned along with outputs. It cannot be set in average pooling type. Default False.
...@@ -987,11 +992,17 @@ class AdaptiveMaxPool3D(layers.Layer): ...@@ -987,11 +992,17 @@ class AdaptiveMaxPool3D(layers.Layer):
.. math:: .. math::
dstart &= floor(i * D_{in} / D_{out}) dstart &= floor(i * D_{in} / D_{out})
dend &= ceil((i + 1) * D_{in} / D_{out}) dend &= ceil((i + 1) * D_{in} / D_{out})
hstart &= floor(j * H_{in} / H_{out}) hstart &= floor(j * H_{in} / H_{out})
hend &= ceil((j + 1) * H_{in} / H_{out}) hend &= ceil((j + 1) * H_{in} / H_{out})
wstart &= floor(k * W_{in} / W_{out}) wstart &= floor(k * W_{in} / W_{out})
wend &= ceil((k + 1) * W_{in} / W_{out}) wend &= ceil((k + 1) * W_{in} / W_{out})
Output(i ,j, k) &= max(Input[dstart:dend, hstart:hend, wstart:wend]) Output(i ,j, k) &= max(Input[dstart:dend, hstart:hend, wstart:wend])
Parameters: Parameters:
......
...@@ -52,7 +52,6 @@ class PixelShuffle(layers.Layer): ...@@ -52,7 +52,6 @@ class PixelShuffle(layers.Layer):
import paddle.nn as nn import paddle.nn as nn
import numpy as np import numpy as np
paddle.disable_static()
x = np.random.randn(2, 9, 4, 4).astype(np.float32) x = np.random.randn(2, 9, 4, 4).astype(np.float32)
x_var = paddle.to_tensor(x) x_var = paddle.to_tensor(x)
pixel_shuffle = nn.PixelShuffle(3) pixel_shuffle = nn.PixelShuffle(3)
......
...@@ -790,8 +790,10 @@ def mm(input, mat2, name=None): ...@@ -790,8 +790,10 @@ def mm(input, mat2, name=None):
nontransposed, the prepended or appended dimension :math:`1` will be nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication. removed after matrix multiplication.
This op does not support broadcasting. See paddle.matmul.
Args: Args:
x (Tensor): The input tensor which is a Tensor. input (Tensor): The input tensor which is a Tensor.
mat2 (Tensor): The input tensor which is a Tensor. mat2 (Tensor): The input tensor which is a Tensor.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` user to set this property. For more information, please refer to :ref:`api_guide_Name`
...@@ -802,31 +804,16 @@ def mm(input, mat2, name=None): ...@@ -802,31 +804,16 @@ def mm(input, mat2, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], mat2: [B, ..., K, N]
# paddle.matmul(x, mat2) # out: [B, ..., M, N]
# x: [B, M, K], mat2: [B, K, N]
# paddle.matmul(x, mat2) # out: [B, M, N]
# x: [B, M, K], mat2: [K, N]
# paddle.matmul(x, mat2) # out: [B, M, N]
# x: [M, K], mat2: [K, N]
# paddle.matmul(x, mat2) # out: [M, N]
# x: [B, M, K], mat2: [K]
# paddle.matmul(x, mat2) # out: [B, M]
# x: [K], mat2: [K]
# paddle.matmul(x, mat2) # out: [1]
import paddle import paddle
input = paddle.arange(1, 7).reshape((3, 2)).astype('float32')
mat2 = paddle.arange(1, 9).reshape((2, 4)).astype('float32')
out = paddle.mm(input, mat2)
print(out)
# [[11., 14., 17., 20.],
# [23., 30., 37., 44.],
# [35., 46., 57., 68.]])
x = paddle.rand(shape=[2, 3], dtype='float32')
y = paddle.rand(shape=[3, 2], dtype='float32')
out = paddle.mm(x, y)
print(out.shape) # [2, 2]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
out = _varbase_creator(dtype=input.dtype) out = _varbase_creator(dtype=input.dtype)
...@@ -1407,7 +1394,7 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None): ...@@ -1407,7 +1394,7 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None):
out(Tensor): The output result. A Tensor with the same data type as input's. out(Tensor): The output result. A Tensor with the same data type as input's.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
input = paddle.ones([2,2]) input = paddle.ones([2,2])
tensor1 = paddle.ones([2,2]) tensor1 = paddle.ones([2,2])
...@@ -1609,8 +1596,6 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -1609,8 +1596,6 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
@templatedoc(op_type="kron") @templatedoc(op_type="kron")
def kron(x, y, name=None): def kron(x, y, name=None):
""" """
:alias_main: paddle.kron
:alias: paddle.kron,paddle.tensor.kron,paddle.tensor.math.kron
${comment} ${comment}
...@@ -1630,28 +1615,17 @@ ${comment} ...@@ -1630,28 +1615,17 @@ ${comment}
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle import fluid x = paddle.to_tensor([[1, 2], [3, 4]], dtype='int64')
import paddle.fluid.dygraph as dg y = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='int64')
import numpy as np out = paddle.kron(x, y)
print(out)
a = np.arange(1, 5).reshape(2, 2).astype(np.float32) # [[1, 2, 3, 2, 4, 6],
b = np.arange(1, 10).reshape(3, 3).astype(np.float32) # [ 4, 5, 6, 8, 10, 12],
# [ 7, 8, 9, 14, 16, 18],
place = fluid.CPUPlace() # [ 3, 6, 9, 4, 8, 12],
with dg.guard(place): # [12, 15, 18, 16, 20, 24],
a_var = dg.to_variable(a) # [21, 24, 27, 28, 32, 36]])
b_var = dg.to_variable(b)
c_var = paddle.kron(a_var, b_var)
c_np = c_var.numpy()
print(c_np)
#[[ 1. 2. 3. 2. 4. 6.]
# [ 4. 5. 6. 8. 10. 12.]
# [ 7. 8. 9. 14. 16. 18.]
# [ 3. 6. 9. 4. 8. 12.]
# [12. 15. 18. 16. 20. 24.]
# [21. 24. 27. 28. 32. 36.]]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.kron(x, y) return core.ops.kron(x, y)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册