From 4f0066361f017c5ae849e56054c49c451d31ebd1 Mon Sep 17 00:00:00 2001 From: Liyulingyue <83450930+Liyulingyue@users.noreply.github.com> Date: Mon, 13 Jun 2022 14:01:58 +0800 Subject: [PATCH] Fix some en docs of paddle. and paddle.nn.initialize. (#42916) * calculate_gain; test=document_fix * Constant; test=document_fix * KaimingNormal; test=document_fix * KaimingUniform; test=document_fix * randint; test=document_fix * squeeze;test=document_fix * argmin; test=document_fix * argmin; test=document_fix * triu; test=document_fix * add_n;test=document_fix * unique; test=document_fix * topk; test=document_fix * squeeze;test=document_fix * randint;test=document_fix * argmin; test=document_fix * constant; test=document_fix * constant; test=document_fix * KaimingNormal; test=document_fix * kaiming; test=document_fix * unique; test=document_fix --- python/paddle/fluid/initializer.py | 3 +- python/paddle/nn/initializer/constant.py | 4 +- python/paddle/nn/initializer/kaiming.py | 8 ++-- python/paddle/tensor/creation.py | 2 +- python/paddle/tensor/manipulation.py | 8 ++-- python/paddle/tensor/math.py | 4 +- python/paddle/tensor/random.py | 4 +- python/paddle/tensor/search.py | 56 ++++++++++-------------- 8 files changed, 41 insertions(+), 48 deletions(-) diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 47199fcd1a..f09097b57b 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -1164,10 +1164,11 @@ def calculate_gain(nonlinearity, param=None): Examples: .. code-block:: python - + :name: code-example1 import paddle gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3 gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2)) + initializer = paddle.nn.initializer.Orthogonal(gain) """ if param is None: diff --git a/python/paddle/nn/initializer/constant.py b/python/paddle/nn/initializer/constant.py index 292eaff362..66818dab45 100644 --- a/python/paddle/nn/initializer/constant.py +++ b/python/paddle/nn/initializer/constant.py @@ -22,11 +22,11 @@ class Constant(ConstantInitializer): """Implement the constant initializer. Args: - value (float32): constant value to initialize the parameter + value (float32|float64, optional): constant value to initialize the parameter. Default: 0.0. Examples: .. code-block:: python - + :name: code-example1 import paddle import paddle.nn as nn diff --git a/python/paddle/nn/initializer/kaiming.py b/python/paddle/nn/initializer/kaiming.py index b8ed7febb6..4564965719 100644 --- a/python/paddle/nn/initializer/kaiming.py +++ b/python/paddle/nn/initializer/kaiming.py @@ -36,7 +36,7 @@ class KaimingNormal(MSRAInitializer): \sqrt{\frac{2.0}{fan\_in}} Args: - fan_in (float32|None): fan_in for Kaiming normal Initializer. If None, it is\ + fan_in (float32|None, optional): fan_in for Kaiming normal Initializer. If None, it is inferred from the variable. default is None. Note: @@ -44,7 +44,7 @@ class KaimingNormal(MSRAInitializer): Examples: .. code-block:: python - + :name: code-example1 import paddle import paddle.nn as nn @@ -79,7 +79,7 @@ class KaimingUniform(MSRAInitializer): x = \sqrt{\frac{6.0}{fan\_in}} Args: - fan_in (float32|None): fan_in for Kaiming uniform Initializer. If None, it is\ + fan_in (float32|None, optional): fan_in for Kaiming uniform Initializer. If None, it is inferred from the variable. default is None. Note: @@ -87,7 +87,7 @@ class KaimingUniform(MSRAInitializer): Examples: .. code-block:: python - + :name: code-example1 import paddle import paddle.nn as nn diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 521839af90..6b5993744f 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -962,7 +962,7 @@ def tril(x, diagonal=0, name=None): def triu(x, diagonal=0, name=None): r""" - This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices + Return the upper triangular part of a matrix (2-D tensor) or batch of matrices :attr:`x`, the other elements of the result tensor are set to 0. The upper triangular part of the matrix is defined as the elements on and above the diagonal. diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 96d24a7f91..c445402412 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1887,7 +1887,7 @@ def split(x, num_or_sections, axis=0, name=None): def squeeze(x, axis=None, name=None): """ - This OP will squeeze the dimension(s) of size 1 of input tensor x's shape. + Squeeze the dimension(s) of size 1 of input tensor x's shape. Note that the output Tensor will share data with origin Tensor and doesn't have a Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, @@ -1944,7 +1944,7 @@ def squeeze(x, axis=None, name=None): Examples: .. code-block:: python - + :name: code-example1 import paddle x = paddle.rand([5, 1, 10]) @@ -2139,13 +2139,13 @@ def unique(x, :ref:`api_guide_Name`. Default: None. Returns: - tuple: (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \ + tuple (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \ provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \ is True. `counts` is provided only if `return_counts` is True. Examples: .. code-block:: python - + :name: code-example1 import paddle x = paddle.to_tensor([2, 3, 3, 1, 5, 3]) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ffca233ff1..1cb350f4d7 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1319,7 +1319,7 @@ def nanmean(x, axis=None, keepdim=False, name=None): @templatedoc(op_type="sum") def add_n(inputs, name=None): """ - This OP is used to sum one or more Tensor of the input. + Sum one or more Tensor of the input. For example: @@ -1365,7 +1365,7 @@ def add_n(inputs, name=None): Examples: .. code-block:: python - + :name: code-example1 import paddle input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32') diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index f43bda1129..990b20a267 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -631,13 +631,13 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): If ``high`` is None (the default), the range is [0, ``low``). Args: - low (int): The lower bound on the range of random values to generate. + low (int, optional): The lower bound on the range of random values to generate. The ``low`` is included in the range. If ``high`` is None, the range is [0, ``low``). Default is 0. high (int, optional): The upper bound on the range of random values to generate, the ``high`` is excluded in the range. Default is None (see above for behavior if high = None). Default is None. - shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape`` + shape (list|tuple|Tensor, optional): The shape of the output Tensor. If ``shape`` is a list or tuple, the elements of it should be integers or Tensors (with the shape [1], and the data type int32 or int64). If ``shape`` is a Tensor, it should be a 1-D Tensor(with the data type int32 or diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index 94a05294aa..f46b53a3b7 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -207,7 +207,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): """ - This OP computes the indices of the min elements of the input tensor's + Computing the indices of the min elements of the input tensor's element along the provided axis. Args: @@ -217,7 +217,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): is [-R, R), where R is x.ndim. when axis < 0, it works the same way as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index. keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False. - dtype(str): Data type of the output tensor which can + dtype(str, optional): Data type of the output tensor which can be int32, int64. The default value is 'int64', and it will return the int64 indices. name(str, optional): The default value is None. Normally there is no @@ -225,11 +225,11 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): refer to :ref:`api_guide_Name`. Returns: - Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64` + Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`. Examples: .. code-block:: python - + :name: code-example1 import paddle x = paddle.to_tensor([[5,8,9,5], @@ -834,7 +834,7 @@ def masked_select(x, mask, name=None): def topk(x, k, axis=None, largest=True, sorted=True, name=None): """ - This OP is used to find values and indices of the k largest or smallest at the optional axis. + Return values and indices of the k largest or smallest at the optional axis. If the input is a 1-D Tensor, finds the k largest or smallest values and indices. If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`. @@ -856,35 +856,27 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): Examples: .. code-block:: python + :name: code-example1 + import paddle - import paddle + data_1 = paddle.to_tensor([1, 4, 5, 7]) + value_1, indices_1 = paddle.topk(data_1, k=1) + print(value_1) # [7] + print(indices_1) # [3] + + data_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]]) + value_2, indices_2 = paddle.topk(data_2, k=1) + print(value_2) # [[7], [6]] + print(indices_2) # [[3], [1]] + + value_3, indices_3 = paddle.topk(data_2, k=1, axis=-1) + print(value_3) # [[7], [6]] + print(indices_3) # [[3], [1]] + + value_4, indices_4 = paddle.topk(data_2, k=1, axis=0) + print(value_4) # [[2, 6, 5, 7]] + print(indices_4) # [[1, 1, 0, 0]] - tensor_1 = paddle.to_tensor([1, 4, 5, 7]) - value_1, indices_1 = paddle.topk(tensor_1, k=1) - print(value_1) - # [7] - print(indices_1) - # [3] - tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]]) - value_2, indices_2 = paddle.topk(tensor_2, k=1) - print(value_2) - # [[7] - # [6]] - print(indices_2) - # [[3] - # [1]] - value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1) - print(value_3) - # [[7] - # [6]] - print(indices_3) - # [[3] - # [1]] - value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0) - print(value_4) - # [[2 6 5 7]] - print(indices_4) - # [[1 1 0 0]] """ -- GitLab