未验证 提交 3c2a46bd 编写于 作者: 徐铭远 提交者: GitHub

fix doc of erf,rank,mm,cross_entropy,pixel_shuffle,kron... (#29126)

* fix doc example, test=develop, test=document_fix
上级 d576d6dd
......@@ -11253,25 +11253,24 @@ def shape(input):
def rank(input):
"""
:alias_main: paddle.rank
:alias: paddle.rank,paddle.tensor.rank,paddle.tensor.attribute.rank
:old_api: paddle.fluid.layers.rank
The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Args:
input (Variable): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
input (Tensor): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns:
Variable, the output data type is int32.: The 0-D tensor with the dimensions of the input variable.
Tensor, the output data type is int32.: The 0-D tensor with the dimensions of the input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
input = fluid.data(name="input", shape=[3, 100, 100], dtype="float32")
rank = fluid.layers.rank(input) # rank=(3,)
input = paddle.rand((3, 100, 100))
rank = paddle.rank(input)
print(rank)
# 3
"""
check_type(input, 'input', (Variable), 'input')
ndims = len(input.shape)
......
......@@ -160,7 +160,9 @@ Examples:
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
out = F.tanhshrink(x)
print(out)
# [-0.020051, -0.00262468, 0.000332005, 0.00868739]
""")
......@@ -185,6 +187,7 @@ Examples:
x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
out = paddle.rsqrt(x)
print(out)
# [3.16227766 2.23606798 1.82574186 1.58113883]
""")
......@@ -353,7 +356,9 @@ Examples:
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
out = F.softplus(x)
print(out)
# [0.513015, 0.598139, 0.744397, 0.854355]
""")
......@@ -365,7 +370,9 @@ Examples:
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
out = F.softsign(x)
print(out)
# [-0.285714, -0.166667, 0.0909091, 0.230769]
""")
......
......@@ -1184,23 +1184,22 @@ def cross_entropy(input,
Returns:
The tensor variable storing the cross_entropy_loss of input and label.
Tensor.The tensor storing the cross_entropy_loss of input and label.
Return type: Variable.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
weight_np = np.random.random([4]).astype(np.float64) #shape:C
output = F.softmax_cross_entropy(
paddle.to_tensor(input_np),
paddle.to_tensor(label_np),
weight=paddle.to_tensor(weight_np))
print(output.numpy()) #[1.30719427]
input_data = np.random.random([5, 100]).astype("float64")
label_data = np.random.randint(0, 100, size=(5)).astype(np.int64)
weight_data = np.random.random([100]).astype("float64")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
weight = paddle.to_tensor(weight_data)
loss = paddle.nn.functional.cross_entropy(input=input, label=label, weight=weight)
print(loss)
# [4.28546723]
"""
if reduction not in ['sum', 'mean', 'none']:
......
......@@ -534,6 +534,7 @@ def max_pool1d(x,
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
......@@ -655,6 +656,7 @@ def max_pool2d(x,
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
......@@ -784,6 +786,7 @@ def max_pool3d(x,
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
......
......@@ -341,15 +341,14 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
ValueError: If the square of upscale_factor cannot divide the channels of input.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = np.random.randn(2, 9, 4, 4).astype(np.float32)
paddle.disable_static()
x_var = paddle.to_tensor(x)
out_var = F.pixel_shuffle(x_var, 3)
out = out_var.numpy()
print(out.shape)
# (2, 1, 12, 12)
"""
if not in_dygraph_mode():
......
......@@ -177,15 +177,15 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
Parameters:
input (Variable): Input tensor, the data type is float32, float64. Shape is
input (Tensor): Input tensor, the data type is float32, float64. Shape is
(N, C), where C is number of classes, and if shape is more than 2D, this
is (N, C, D1, D2,..., Dk), k >= 1.
label (Variable): Label tensor, the data type is int64. Shape is (N), where each
label (Tensor): Label tensor, the data type is int64. Shape is (N), where each
value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is
(N, D1, D2,..., Dk), k >= 1.
weight (Variable, optional): Weight tensor, a manual rescaling weight for each
sample relative to each class. It has the same shape as label.
and the data type is float32, float64. Default is ``'None'``.
weight (Tensor, optional): Weight tensor, a manual rescaling weight given
to each class and the shape is (C). It has the same dimensions as class
number and the data type is float32, float64. Default is ``'None'``.
reduction (str, optional): Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
......@@ -202,24 +202,24 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
Returns:
The tensor variable storing the cross_entropy_loss of input and label.
Tensor. The tensor storing the cross_entropy_loss of input and label.
Return type: Variable.
Examples:
.. code-block:: python
import paddle
import numpy as np
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2, 1)).astype(np.int64)
weight_np = np.random.random([4]).astype(np.float64) #shape:C
weight_ce = weight_np[label_np] #shape:N,1
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=paddle.to_tensor(weight_ce))
output = cross_entropy_loss(
paddle.to_tensor(input_np),
paddle.to_tensor(label_np))
print(output.numpy()) #[1.44375251]
input_data = paddle.uniform([5, 100], dtype="float64")
label_data = np.random.randint(0, 100, size=(5)).astype(np.int64)
weight_data = np.random.random([100]).astype("float64")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
weight = paddle.to_tensor(weight_data)
ce_loss = paddle.nn.CrossEntropyLoss(weight=weight, reduction='mean')
output = ce_loss(input, label)
print(output)
# [4.84496039]
"""
def __init__(self,
......@@ -861,7 +861,9 @@ class MarginRankingLoss(fluid.dygraph.Layer):
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss) # [0.75]
print(loss)
# [0.75]
"""
def __init__(self, margin=0.0, reduction='mean', name=None):
......
......@@ -920,10 +920,15 @@ class AdaptiveMaxPool2D(layers.Layer):
.. math::
hstart &= floor(i * H_{in} / H_{out})
hend &= ceil((i + 1) * H_{in} / H_{out})
wstart &= floor(j * W_{in} / W_{out})
wend &= ceil((j + 1) * W_{in} / W_{out})
Output(i ,j) &= max(Input[hstart:hend, wstart:wend])
Parameters:
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two element, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input.
return_mask (bool): If true, the index of max pooling point will be returned along with outputs. It cannot be set in average pooling type. Default False.
......@@ -987,11 +992,17 @@ class AdaptiveMaxPool3D(layers.Layer):
.. math::
dstart &= floor(i * D_{in} / D_{out})
dend &= ceil((i + 1) * D_{in} / D_{out})
hstart &= floor(j * H_{in} / H_{out})
hend &= ceil((j + 1) * H_{in} / H_{out})
wstart &= floor(k * W_{in} / W_{out})
wend &= ceil((k + 1) * W_{in} / W_{out})
Output(i ,j, k) &= max(Input[dstart:dend, hstart:hend, wstart:wend])
Parameters:
......
......@@ -52,7 +52,6 @@ class PixelShuffle(layers.Layer):
import paddle.nn as nn
import numpy as np
paddle.disable_static()
x = np.random.randn(2, 9, 4, 4).astype(np.float32)
x_var = paddle.to_tensor(x)
pixel_shuffle = nn.PixelShuffle(3)
......
......@@ -790,8 +790,10 @@ def mm(input, mat2, name=None):
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
This op does not support broadcasting. See paddle.matmul.
Args:
x (Tensor): The input tensor which is a Tensor.
input (Tensor): The input tensor which is a Tensor.
mat2 (Tensor): The input tensor which is a Tensor.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
......@@ -802,31 +804,16 @@ def mm(input, mat2, name=None):
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], mat2: [B, ..., K, N]
# paddle.matmul(x, mat2) # out: [B, ..., M, N]
# x: [B, M, K], mat2: [B, K, N]
# paddle.matmul(x, mat2) # out: [B, M, N]
# x: [B, M, K], mat2: [K, N]
# paddle.matmul(x, mat2) # out: [B, M, N]
# x: [M, K], mat2: [K, N]
# paddle.matmul(x, mat2) # out: [M, N]
# x: [B, M, K], mat2: [K]
# paddle.matmul(x, mat2) # out: [B, M]
# x: [K], mat2: [K]
# paddle.matmul(x, mat2) # out: [1]
import paddle
input = paddle.arange(1, 7).reshape((3, 2)).astype('float32')
mat2 = paddle.arange(1, 9).reshape((2, 4)).astype('float32')
out = paddle.mm(input, mat2)
print(out)
# [[11., 14., 17., 20.],
# [23., 30., 37., 44.],
# [35., 46., 57., 68.]])
x = paddle.rand(shape=[2, 3], dtype='float32')
y = paddle.rand(shape=[3, 2], dtype='float32')
out = paddle.mm(x, y)
print(out.shape) # [2, 2]
"""
if in_dygraph_mode():
out = _varbase_creator(dtype=input.dtype)
......@@ -1609,8 +1596,6 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
@templatedoc(op_type="kron")
def kron(x, y, name=None):
"""
:alias_main: paddle.kron
:alias: paddle.kron,paddle.tensor.kron,paddle.tensor.math.kron
${comment}
......@@ -1631,27 +1616,16 @@ ${comment}
.. code-block:: python
import paddle
from paddle import fluid
import paddle.fluid.dygraph as dg
import numpy as np
a = np.arange(1, 5).reshape(2, 2).astype(np.float32)
b = np.arange(1, 10).reshape(3, 3).astype(np.float32)
place = fluid.CPUPlace()
with dg.guard(place):
a_var = dg.to_variable(a)
b_var = dg.to_variable(b)
c_var = paddle.kron(a_var, b_var)
c_np = c_var.numpy()
print(c_np)
#[[ 1. 2. 3. 2. 4. 6.]
# [ 4. 5. 6. 8. 10. 12.]
# [ 7. 8. 9. 14. 16. 18.]
# [ 3. 6. 9. 4. 8. 12.]
# [12. 15. 18. 16. 20. 24.]
# [21. 24. 27. 28. 32. 36.]]
x = paddle.to_tensor([[1, 2], [3, 4]], dtype='int64')
y = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='int64')
out = paddle.kron(x, y)
print(out)
# [[1, 2, 3, 2, 4, 6],
# [ 4, 5, 6, 8, 10, 12],
# [ 7, 8, 9, 14, 16, 18],
# [ 3, 6, 9, 4, 8, 12],
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
"""
if in_dygraph_mode():
return core.ops.kron(x, y)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册