未验证 提交 4f006636 编写于 作者: L Liyulingyue 提交者: GitHub

Fix some en docs of paddle. and paddle.nn.initialize. (#42916)

* calculate_gain; test=document_fix

* Constant; test=document_fix

* KaimingNormal; test=document_fix

* KaimingUniform; test=document_fix

* randint; test=document_fix

* squeeze;test=document_fix

* argmin; test=document_fix

* argmin; test=document_fix

* triu; test=document_fix

* add_n;test=document_fix

* unique; test=document_fix

* topk; test=document_fix

* squeeze;test=document_fix

* randint;test=document_fix

* argmin; test=document_fix

* constant; test=document_fix

* constant; test=document_fix

* KaimingNormal; test=document_fix

* kaiming; test=document_fix

* unique; test=document_fix
上级 24ea1dd8
...@@ -1164,10 +1164,11 @@ def calculate_gain(nonlinearity, param=None): ...@@ -1164,10 +1164,11 @@ def calculate_gain(nonlinearity, param=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3 gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3
gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2)) gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2))
initializer = paddle.nn.initializer.Orthogonal(gain)
""" """
if param is None: if param is None:
......
...@@ -22,11 +22,11 @@ class Constant(ConstantInitializer): ...@@ -22,11 +22,11 @@ class Constant(ConstantInitializer):
"""Implement the constant initializer. """Implement the constant initializer.
Args: Args:
value (float32): constant value to initialize the parameter value (float32|float64, optional): constant value to initialize the parameter. Default: 0.0.
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
......
...@@ -36,7 +36,7 @@ class KaimingNormal(MSRAInitializer): ...@@ -36,7 +36,7 @@ class KaimingNormal(MSRAInitializer):
\sqrt{\frac{2.0}{fan\_in}} \sqrt{\frac{2.0}{fan\_in}}
Args: Args:
fan_in (float32|None): fan_in for Kaiming normal Initializer. If None, it is\ fan_in (float32|None, optional): fan_in for Kaiming normal Initializer. If None, it is
inferred from the variable. default is None. inferred from the variable. default is None.
Note: Note:
...@@ -44,7 +44,7 @@ class KaimingNormal(MSRAInitializer): ...@@ -44,7 +44,7 @@ class KaimingNormal(MSRAInitializer):
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
...@@ -79,7 +79,7 @@ class KaimingUniform(MSRAInitializer): ...@@ -79,7 +79,7 @@ class KaimingUniform(MSRAInitializer):
x = \sqrt{\frac{6.0}{fan\_in}} x = \sqrt{\frac{6.0}{fan\_in}}
Args: Args:
fan_in (float32|None): fan_in for Kaiming uniform Initializer. If None, it is\ fan_in (float32|None, optional): fan_in for Kaiming uniform Initializer. If None, it is
inferred from the variable. default is None. inferred from the variable. default is None.
Note: Note:
...@@ -87,7 +87,7 @@ class KaimingUniform(MSRAInitializer): ...@@ -87,7 +87,7 @@ class KaimingUniform(MSRAInitializer):
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
......
...@@ -962,7 +962,7 @@ def tril(x, diagonal=0, name=None): ...@@ -962,7 +962,7 @@ def tril(x, diagonal=0, name=None):
def triu(x, diagonal=0, name=None): def triu(x, diagonal=0, name=None):
r""" r"""
This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices Return the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`x`, the other elements of the result tensor are set to 0. :attr:`x`, the other elements of the result tensor are set to 0.
The upper triangular part of the matrix is defined as the elements on and The upper triangular part of the matrix is defined as the elements on and
above the diagonal. above the diagonal.
......
...@@ -1887,7 +1887,7 @@ def split(x, num_or_sections, axis=0, name=None): ...@@ -1887,7 +1887,7 @@ def split(x, num_or_sections, axis=0, name=None):
def squeeze(x, axis=None, name=None): def squeeze(x, axis=None, name=None):
""" """
This OP will squeeze the dimension(s) of size 1 of input tensor x's shape. Squeeze the dimension(s) of size 1 of input tensor x's shape.
Note that the output Tensor will share data with origin Tensor and doesn't have a Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version,
...@@ -1944,7 +1944,7 @@ def squeeze(x, axis=None, name=None): ...@@ -1944,7 +1944,7 @@ def squeeze(x, axis=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
x = paddle.rand([5, 1, 10]) x = paddle.rand([5, 1, 10])
...@@ -2139,13 +2139,13 @@ def unique(x, ...@@ -2139,13 +2139,13 @@ def unique(x,
:ref:`api_guide_Name`. Default: None. :ref:`api_guide_Name`. Default: None.
Returns: Returns:
tuple: (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \ tuple (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \
provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \ provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \
is True. `counts` is provided only if `return_counts` is True. is True. `counts` is provided only if `return_counts` is True.
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
x = paddle.to_tensor([2, 3, 3, 1, 5, 3]) x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
......
...@@ -1319,7 +1319,7 @@ def nanmean(x, axis=None, keepdim=False, name=None): ...@@ -1319,7 +1319,7 @@ def nanmean(x, axis=None, keepdim=False, name=None):
@templatedoc(op_type="sum") @templatedoc(op_type="sum")
def add_n(inputs, name=None): def add_n(inputs, name=None):
""" """
This OP is used to sum one or more Tensor of the input. Sum one or more Tensor of the input.
For example: For example:
...@@ -1365,7 +1365,7 @@ def add_n(inputs, name=None): ...@@ -1365,7 +1365,7 @@ def add_n(inputs, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32') input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
......
...@@ -631,13 +631,13 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): ...@@ -631,13 +631,13 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
If ``high`` is None (the default), the range is [0, ``low``). If ``high`` is None (the default), the range is [0, ``low``).
Args: Args:
low (int): The lower bound on the range of random values to generate. low (int, optional): The lower bound on the range of random values to generate.
The ``low`` is included in the range. If ``high`` is None, the The ``low`` is included in the range. If ``high`` is None, the
range is [0, ``low``). Default is 0. range is [0, ``low``). Default is 0.
high (int, optional): The upper bound on the range of random values to high (int, optional): The upper bound on the range of random values to
generate, the ``high`` is excluded in the range. Default is None generate, the ``high`` is excluded in the range. Default is None
(see above for behavior if high = None). Default is None. (see above for behavior if high = None). Default is None.
shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape`` shape (list|tuple|Tensor, optional): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape`` (with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or is a Tensor, it should be a 1-D Tensor(with the data type int32 or
......
...@@ -207,7 +207,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): ...@@ -207,7 +207,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
""" """
This OP computes the indices of the min elements of the input tensor's Computing the indices of the min elements of the input tensor's
element along the provided axis. element along the provided axis.
Args: Args:
...@@ -217,7 +217,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): ...@@ -217,7 +217,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
is [-R, R), where R is x.ndim. when axis < 0, it works the same way is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index. as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False. keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
dtype(str): Data type of the output tensor which can dtype(str, optional): Data type of the output tensor which can
be int32, int64. The default value is 'int64', and it will be int32, int64. The default value is 'int64', and it will
return the int64 indices. return the int64 indices.
name(str, optional): The default value is None. Normally there is no name(str, optional): The default value is None. Normally there is no
...@@ -225,11 +225,11 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): ...@@ -225,11 +225,11 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
refer to :ref:`api_guide_Name`. refer to :ref:`api_guide_Name`.
Returns: Returns:
Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64` Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`.
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
x = paddle.to_tensor([[5,8,9,5], x = paddle.to_tensor([[5,8,9,5],
...@@ -834,7 +834,7 @@ def masked_select(x, mask, name=None): ...@@ -834,7 +834,7 @@ def masked_select(x, mask, name=None):
def topk(x, k, axis=None, largest=True, sorted=True, name=None): def topk(x, k, axis=None, largest=True, sorted=True, name=None):
""" """
This OP is used to find values and indices of the k largest or smallest at the optional axis. Return values and indices of the k largest or smallest at the optional axis.
If the input is a 1-D Tensor, finds the k largest or smallest values and indices. If the input is a 1-D Tensor, finds the k largest or smallest values and indices.
If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`. If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`.
...@@ -856,35 +856,27 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): ...@@ -856,35 +856,27 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
import paddle import paddle
tensor_1 = paddle.to_tensor([1, 4, 5, 7]) data_1 = paddle.to_tensor([1, 4, 5, 7])
value_1, indices_1 = paddle.topk(tensor_1, k=1) value_1, indices_1 = paddle.topk(data_1, k=1)
print(value_1) print(value_1) # [7]
# [7] print(indices_1) # [3]
print(indices_1)
# [3] data_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]]) value_2, indices_2 = paddle.topk(data_2, k=1)
value_2, indices_2 = paddle.topk(tensor_2, k=1) print(value_2) # [[7], [6]]
print(value_2) print(indices_2) # [[3], [1]]
# [[7]
# [6]] value_3, indices_3 = paddle.topk(data_2, k=1, axis=-1)
print(indices_2) print(value_3) # [[7], [6]]
# [[3] print(indices_3) # [[3], [1]]
# [1]]
value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1) value_4, indices_4 = paddle.topk(data_2, k=1, axis=0)
print(value_3) print(value_4) # [[2, 6, 5, 7]]
# [[7] print(indices_4) # [[1, 1, 0, 0]]
# [6]]
print(indices_3)
# [[3]
# [1]]
value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0)
print(value_4)
# [[2 6 5 7]]
print(indices_4)
# [[1 1 0 0]]
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册