From 29a0a8e232da63c04304a9d912a1be450ce39910 Mon Sep 17 00:00:00 2001 From: Ligoml <39876205+Ligoml@users.noreply.github.com> Date: Wed, 18 May 2022 11:05:32 +0800 Subject: [PATCH] [docs_epoch1] update 50+ paddle.* API docs (#42794) * docs_epoch1 * docs_epoch1 * update kron api docs * update erfinv;test=document_fix * subtract * for ci;test=document_fix --- python/paddle/framework/framework.py | 2 +- python/paddle/framework/random.py | 6 +- python/paddle/hapi/dynamic_flops.py | 4 +- python/paddle/hapi/model_summary.py | 2 +- python/paddle/tensor/creation.py | 114 ++++++++++++--------------- python/paddle/tensor/linalg.py | 27 ++++--- python/paddle/tensor/logic.py | 43 +++++----- python/paddle/tensor/manipulation.py | 57 +++++--------- python/paddle/tensor/math.py | 68 ++++++++-------- python/paddle/tensor/ops.py | 8 +- python/paddle/tensor/random.py | 10 +-- python/paddle/tensor/search.py | 16 ++-- python/paddle/tensor/stat.py | 53 ++++++------- python/paddle/tensor/to_string.py | 1 - 14 files changed, 187 insertions(+), 224 deletions(-) diff --git a/python/paddle/framework/framework.py b/python/paddle/framework/framework.py index e899d267289..350b1f1567b 100644 --- a/python/paddle/framework/framework.py +++ b/python/paddle/framework/framework.py @@ -24,7 +24,7 @@ __all__ = [] def set_default_dtype(d): """ - Set default dtype. The default dtype is initially float32 + Set default dtype. The default dtype is initially float32. Args: d(string|np.dtype): the dtype to make the default. It only diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index a560072cf5a..147f6be39c5 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -57,7 +57,7 @@ def get_cuda_rng_state(): Get random state of cuda generators. Args: - None + None. Returns: GeneratorState: object. @@ -80,13 +80,13 @@ def get_cuda_rng_state(): def set_cuda_rng_state(state_list): """ - Sets generator state for all cuda generators + Sets generator state for all cuda generators. Args: state_list(list|tuple): The cuda states to set back to cuda generators. state_list is obtained from get_cuda_rng_state(). Returns: - None + None. Examples: .. code-block:: python diff --git a/python/paddle/hapi/dynamic_flops.py b/python/paddle/hapi/dynamic_flops.py index 077a70c9101..4dd1aa03aa2 100644 --- a/python/paddle/hapi/dynamic_flops.py +++ b/python/paddle/hapi/dynamic_flops.py @@ -28,10 +28,10 @@ def flops(net, input_size, custom_ops=None, print_detail=False): Args: net (paddle.nn.Layer||paddle.static.Program): The network which could be a instance of paddle.nn.Layer in dygraph or paddle.static.Program in static graph. - input_size (list): size of input tensor. Note that the batch_size in argument 'input_size' only support 1. + input_size (list): size of input tensor. Note that the batch_size in argument ``input_size`` only support 1. custom_ops (A dict of function, optional): A dictionary which key is the class of specific operation such as paddle.nn.Conv2D and the value is the function used to count the FLOPs of this operation. This - argument only work when argument 'net' is an instance of paddle.nn.Layer. The details could be found + argument only work when argument ``net`` is an instance of paddle.nn.Layer. The details could be found in following example code. Default is None. print_detail (bool, optional): Whether to print the detail information, like FLOPs per layer, about the net FLOPs. Default is False. diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index 8d581f38e9b..8cd95a5ea58 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -30,7 +30,7 @@ def summary(net, input_size=None, dtypes=None, input=None): Args: net (Layer): the network which must be a subinstance of Layer. - input_size (tuple|InputSpec|list[tuple|InputSpec]): size of input tensor. if model only + input_size (tuple|InputSpec|list[tuple|InputSpec], optional): size of input tensor. if model only have one input, input_size can be tuple or InputSpec. if model have multiple input, input_size must be a list which contain every input's shape. Note that input_size only dim of diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 5163e6e5395..ab55ead71e9 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -294,12 +294,6 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): Returns: Tensor: A Tensor constructed from ``data`` . - Raises: - TypeError: If the data type of ``data`` is not scalar, list, tuple, np.ndarray, paddle.Tensor - ValueError: If ``data`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]] - TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128 - ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace or specified pattern string. - Examples: .. code-block:: python @@ -765,7 +759,7 @@ def full(shape, fill_value, dtype=None, name=None): def arange(start=0, end=None, step=1, dtype=None, name=None): """ - This OP returns a 1-D Tensor with spaced values within a given interval. + Returns a 1-D Tensor with spaced values within a given interval. Values are generated into the half-open interval [``start``, ``end``) with the ``step``. (the interval including ``start`` but excluding ``end``). @@ -789,18 +783,13 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): dtype(str|np.dtype, optional): The data type of the output tensor. Supported data types: int32, int64, float32, float64. If ``dytpe`` is None, the data type is float32. Default is None. - name(str, optional): The default value is None. Normally there is no - need for user to set this property. For more information, please - refer to :ref:`api_guide_Name`. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A 1-D Tensor with values from the interval [``start``, ``end``) taken with common difference ``step`` beginning from ``start``. Its data type is set by ``dtype``. - Raises: - TypeError: If ``dtype`` is not int32, int64, float32, float64. - Examples: .. code-block:: python @@ -914,7 +903,7 @@ def _tril_triu_op(helper): def tril(x, diagonal=0, name=None): r""" - This op returns the lower triangular part of a matrix (2-D tensor) or batch + Returns the lower triangular part of a matrix (2-D tensor) or batch of matrices :attr:`x`, the other elements of the result tensor are set to 0. The lower triangular part of the matrix is defined as the elements on and below the diagonal. @@ -929,48 +918,42 @@ def tril(x, diagonal=0, name=None): the main diagonal. The main diagonal are the set of indices :math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where :math:`d_{1}, d_{2}` are the dimensions of the matrix. - name (str, optional): The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Results of lower triangular operation by the specified diagonal of input tensor x, it's data type is the same as x's Tensor. - Raises: - TypeError: diagonal is not a int type. - ValueError: dimension of :attr:`x` is less than 2. - Examples: .. code-block:: python - import numpy as np import paddle - data = np.arange(1, 13, dtype="int64").reshape(3,-1) - # array([[ 1, 2, 3, 4], - # [ 5, 6, 7, 8], - # [ 9, 10, 11, 12]]) - + data = paddle.arange(1, 13, dtype="int64").reshape([3,-1]) + # Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True, + # [[1 , 2 , 3 , 4 ], + # [5 , 6 , 7 , 8 ], + # [9 , 10, 11, 12]]) - x = paddle.to_tensor(data) - - tril1 = paddle.tensor.tril(x) - # array([[ 1, 0, 0, 0], - # [ 5, 6, 0, 0], - # [ 9, 10, 11, 0]]) + tril1 = paddle.tril(data) + # Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True, + # [[1 , 0 , 0 , 0 ], + # [5 , 6 , 0 , 0 ], + # [9 , 10, 11, 0 ]]) # example 2, positive diagonal value - tril2 = paddle.tensor.tril(x, diagonal=2) - # array([[ 1, 2, 3, 0], - # [ 5, 6, 7, 8], - # [ 9, 10, 11, 12]]) + tril2 = paddle.tril(data, diagonal=2) + # Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True, + # [[1 , 2 , 3 , 0 ], + # [5 , 6 , 7 , 8 ], + # [9 , 10, 11, 12]]) # example 3, negative diagonal value - tril3 = paddle.tensor.tril(x, diagonal=-1) - # array([[ 0, 0, 0, 0], - # [ 5, 0, 0, 0], - # [ 9, 10, 0, 0]]) - + tril3 = paddle.tril(data, diagonal=-1) + # Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True, + # [[0 , 0 , 0 , 0 ], + # [5 , 0 , 0 , 0 ], + # [9 , 10, 0 , 0 ]]) """ if in_dygraph_mode(): return _C_ops.final_state_tril_triu(x, diagonal, True) @@ -1342,7 +1325,7 @@ def diag(x, offset=0, padding_value=0, name=None): def empty(shape, dtype=None, name=None): """ - This Op returns a Tensor with uninitialized data which size is same as ``shape``. + Returns a Tensor with uninitialized data which size is same as ``shape``. Args: shape(list|tuple|Tensor): Shape of the Tensor to be created. @@ -1362,29 +1345,32 @@ def empty(shape, dtype=None, name=None): Examples: .. code-block:: python - import paddle - import numpy as np - - paddle.set_device("cpu") # and use cpu device + import paddle - # example 1: argument ``shape`` is a list which doesn't contain Tensor. - data1 = paddle.empty(shape=[2,3], dtype='float32') - #[[4.3612203e+27 1.8176809e+31 1.3555911e-19] # uninitialized - # [1.1699684e-19 1.3563156e-19 3.6408321e-11]] # uninitialized - - # example 2: argument ``shape`` is a Tensor, the data type must be int64 or int32. - shape_data = np.array([2, 3]).astype('int32') - shape = paddle.to_tensor(shape_data) - data2 = paddle.empty(shape=shape, dtype='float32') - #[[1.7192326e-37 4.8125365e-38 1.9866003e-36] # uninitialized - # [1.3284029e-40 7.1117408e-37 2.5353012e+30]] # uninitialized - - # example 3: argument ``shape`` is a list which contains Tensor. - dim2_data = np.array([3]).astype('int32') - dim2 = paddle.to_tensor(dim2_data) - data3 = paddle.empty(shape=[2, dim2], dtype='float32') - #[[1.1024214e+24 7.0379409e+22 6.5737699e-34] # uninitialized - # [7.5563101e+31 7.7130405e+31 2.8020654e+20]] # uninitialized + paddle.set_device("cpu") # and use cpu device + + # example 1: argument ``shape`` is a list which doesn't contain Tensor. + data1 = paddle.empty(shape=[2, 3], dtype='float32') + print(data1) + # Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + # [[0.00000000, 0. , 0.00000000], + # [0. , 0.29652897, 0.09356152]]) # uninitialized + + # example 2: argument ``shape`` is a Tensor, the data type must be int64 or int32. + shape_data = paddle.to_tensor([2, 3]).astype('int32') + data2 = paddle.empty(shape=shape_data, dtype='float32') + print(data2) + # Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + # [[-0.50543123, -0.09872390, -0.92634487], + # [-0.51007903, -0.02454148, 1.29315734]]) # uninitialized + + # example 3: argument ``shape`` is a list which contains Tensor. + dim2 = paddle.to_tensor([3]).astype('int32') + data3 = paddle.empty(shape=[2, dim2], dtype='float32') + print(data3) + # Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + # [[ 0.00000000, 0. , -0.92634487], + # [-0.51007903, -0.02454148, 1.29315734]]) # uninitialized """ if dtype is None: diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 2a77dbd1157..303a0438276 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -1492,10 +1492,12 @@ def bmm(x, y, name=None): y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) - #output size: (2, 2, 2) - #output value: - #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] - out_np = out.numpy() + # Tensor(shape=[2, 2, 2], dtype=float32, place=Place(cpu), stop_gradient=True, + # [[[6. , 6. ], + # [12., 12.]], + + # [[45., 45.], + # [60., 60.]]]) """ x_shape = x.shape @@ -1530,9 +1532,10 @@ def histogram(input, bins=100, min=0, max=0, name=None): Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. - bins (int): number of histogram bins - min (int): lower end of the range (inclusive) - max (int): upper end of the range (inclusive) + bins (int, optional): number of histogram bins. + min (int, optional): lower end of the range (inclusive). + max (int, optional): upper end of the range (inclusive). + name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. Returns: Tensor: data type is int64, shape is (nbins,). @@ -1640,14 +1643,14 @@ def mv(x, vec, name=None): # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] - import numpy as np import paddle - x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") - x = paddle.to_tensor(x_data) - vec_data = np.array([3, 5, 1]) - vec = paddle.to_tensor(vec_data).astype("float64") + x = paddle.to_tensor([[2, 1, 3], [3, 0, 1]]).astype("float64") + vec = paddle.to_tensor([3, 5, 1]).astype("float64") out = paddle.mv(x, vec) + print(out) + # Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True, + # [14., 10.]) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index d99b9973b48..80a5f84824b 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -275,9 +275,10 @@ def is_empty(x, name=None): def equal_all(x, y, name=None): """ - This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise. + Returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise. - **NOTICE**: The output of this OP has no gradient. + Note: + The output has no gradient. Args: x(Tensor): Tensor, data type is bool, float32, float64, int32, int64. @@ -402,7 +403,8 @@ def equal(x, y, name=None): This layer returns the truth value of :math:`x == y` elementwise. - **NOTICE**: The output of this OP has no gradient. + Note: + The output has no gradient. Args: x(Tensor): Tensor, data type is bool, float32, float64, int32, int64. @@ -459,9 +461,10 @@ def equal(x, y, name=None): @templatedoc() def greater_equal(x, y, name=None): """ - This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`. + Returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`. - **NOTICE**: The output of this OP has no gradient. + Note: + The output has no gradient. Args: x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64. @@ -469,7 +472,7 @@ def greater_equal(x, y, name=None): name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`. + Tensor: The output shape is same as input :attr:`x`. The output data type is bool. Examples: .. code-block:: python @@ -509,9 +512,10 @@ def greater_equal(x, y, name=None): @templatedoc() def greater_than(x, y, name=None): """ - This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`. + Returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`. - **NOTICE**: The output of this OP has no gradient. + Note: + The output has no gradient. Args: x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64. @@ -519,7 +523,7 @@ def greater_than(x, y, name=None): name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x` . + Tensor: The output shape is same as input :attr:`x`. The output data type is bool. Examples: .. code-block:: python @@ -558,9 +562,10 @@ def greater_than(x, y, name=None): @templatedoc() def less_equal(x, y, name=None): """ - This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`. + Returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`. - **NOTICE**: The output of this OP has no gradient. + Note: + The output has no gradient. Args: x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64. @@ -569,7 +574,7 @@ def less_equal(x, y, name=None): user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`. + Tensor: The output shape is same as input :attr:`x`. The output data type is bool. Examples: .. code-block:: python @@ -609,9 +614,10 @@ def less_equal(x, y, name=None): @templatedoc() def less_than(x, y, name=None): """ - This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`. + Returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`. - **NOTICE**: The output of this OP has no gradient. + Note: + The output has no gradient. Args: x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64. @@ -620,7 +626,7 @@ def less_than(x, y, name=None): user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`. + Tensor: The output shape is same as input :attr:`x`. The output data type is bool. Examples: .. code-block:: python @@ -660,9 +666,10 @@ def less_than(x, y, name=None): @templatedoc() def not_equal(x, y, name=None): """ - This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`. + Returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`. - **NOTICE**: The output of this OP has no gradient. + Note: + The output has no gradient. Args: x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64. @@ -671,7 +678,7 @@ def not_equal(x, y, name=None): user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`. + Tensor: The output shape is same as input :attr:`x`. The output data type is bool. Examples: .. code-block:: python diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 9a4025a768e..fd95d5de44e 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -970,7 +970,7 @@ def tolist(x): def concat(x, axis=0, name=None): """ - This OP concatenates the input along the axis. + Concatenates the input along the axis. Args: x (list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16, @@ -1330,13 +1330,11 @@ def rot90(x, k=1, axes=[0, 1], name=None): def flatten(x, start_axis=0, stop_axis=-1, name=None): r""" - **Flatten op** - Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis. - Note that the output Tensor will share data with origin Tensor and doesn't have a - Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, please - use `Tensor.clone` like ``flatten_clone_x = x.flatten().clone()``. + Note: + The output Tensor will share data with origin Tensor and doesn't have a Tensor copy in ``dygraph`` mode. + If you want to use the Tensor copy version, please use `Tensor.clone` like ``flatten_clone_x = x.flatten().clone()``. For Example: @@ -1371,8 +1369,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): float64, int8, int32, int64, uint8. start_axis (int): the start axis to flatten stop_axis (int): the stop axis to flatten - name(str, Optional): For details, please refer to :ref:`api_guide_Name`. - Generally, no setting is required. Default: None. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor with the contents of the input tensor, with input \ @@ -1565,7 +1562,7 @@ def roll(x, shifts, axis=None, name=None): def stack(x, axis=0, name=None): """ - This OP stacks all the input tensors ``x`` along ``axis`` dimemsion. + Stacks all the input tensors ``x`` along ``axis`` dimemsion. All tensors must be of the same shape and same dtype. For example, given N tensors of shape [A, B], if ``axis == 0``, the shape of stacked @@ -1621,7 +1618,7 @@ def stack(x, axis=0, name=None): axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``, where ``R`` is the number of dimensions of the first input tensor ``x[0]``. If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0. - name (str, optional): Please refer to :ref:`api_guide_Name`, Default None. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The stacked tensor with same data type as input. @@ -2433,10 +2430,10 @@ def unbind(input, axis=0): .. code-block:: python import paddle - import numpy as np + # input is a variable which shape is [3, 4, 5] - np_input = np.random.rand(3, 4, 5).astype('float32') - input = paddle.to_tensor(np_input) + input = paddle.rand([3, 4, 5]).astype('float32') + [x0, x1, x2] = paddle.unbind(input, axis=0) # x0.shape [4, 5] # x1.shape [4, 5] @@ -2446,7 +2443,6 @@ def unbind(input, axis=0): # x1.shape [3, 5] # x2.shape [3, 5] # x3.shape [3, 5] - """ if in_dygraph_mode(): return _C_ops.final_state_unbind(input, axis) @@ -2933,8 +2929,7 @@ def broadcast_to(x, shape, name=None): shape (list|tuple|Tensor): The result shape after broadcasting. The data type is int32. If shape is a list or tuple, all its elements should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32. The value -1 in shape means keeping the corresponding dimension unchanged. - name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . - + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``. @@ -3097,7 +3092,7 @@ def expand(x, shape, name=None): def reshape(x, shape, name=None): """ - This operator changes the shape of ``x`` without changing its data. + Changes the shape of ``x`` without changing its data. Note that the output Tensor will share data with origin Tensor and doesn't have a Tensor copy in ``dygraph`` mode. @@ -3106,32 +3101,17 @@ def reshape(x, shape, name=None): Some tricks exist when specifying the target shape. - 1. -1 means the value of this dimension is inferred from the total element - number of x and remaining dimensions. Thus one and only one dimension can - be set -1. + - 1. -1 means the value of this dimension is inferred from the total element number of x and remaining dimensions. Thus one and only one dimension can be set -1. - 2. 0 means the actual dimension value is going to be copied from the - corresponding dimension of x. The index of 0s in shape can not exceed - the dimension of x. + - 2. 0 means the actual dimension value is going to be copied from the corresponding dimension of x. The index of 0s in shape can not exceed the dimension of x. Here are some examples to explain it. - 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape - is [6, 8], the reshape operator will transform x into a 2-D tensor with - shape [6, 8] and leaving x's data unchanged. + - 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape is [6, 8], the reshape operator will transform x into a 2-D tensor with shape [6, 8] and leaving x's data unchanged. - 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape - specified is [2, 3, -1, 2], the reshape operator will transform x into a - 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this - case, one dimension of the target shape is set to -1, the value of this - dimension is inferred from the total element number of x and remaining - dimensions. + - 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape specified is [2, 3, -1, 2], the reshape operator will transform x into a 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this case, one dimension of the target shape is set to -1, the value of this dimension is inferred from the total element number of x and remaining dimensions. - 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape - is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor - with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case, - besides -1, 0 means the actual dimension value is going to be copied from - the corresponding dimension of x. + - 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case, besides -1, 0 means the actual dimension value is going to be copied from the corresponding dimension of x. Args: x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool`` @@ -3363,8 +3343,7 @@ def gather_nd(x, index, name=None): x (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64. index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank. Its dtype should be int32, int64. - name(str, optional): The default value is None. Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name` . + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 9e2384a8d9c..35a16aa5f47 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -292,6 +292,7 @@ def multiplex(inputs, index, name=None): :name: code-example1 import paddle + img1 = paddle.to_tensor([[1, 2], [3, 4]], dtype=paddle.float32) img2 = paddle.to_tensor([[5, 6], [7, 8]], dtype=paddle.float32) inputs = [img1, img2] @@ -498,6 +499,7 @@ def add(x, y, name=None): .. code-block:: python import paddle + x = paddle.to_tensor([2, 3, 4], 'float64') y = paddle.to_tensor([1, 5, 2], 'float64') z = paddle.add(x, y) @@ -539,8 +541,8 @@ def subtract(x, y, name=None): .. math:: out = x - y - **Note**: - ``paddle.subtract`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . + Note: + ``paddle.subtract`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . Args: x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64. @@ -554,35 +556,37 @@ def subtract(x, y, name=None): .. code-block:: python - import numpy as np import paddle x = paddle.to_tensor([[1, 2], [7, 8]]) y = paddle.to_tensor([[5, 6], [3, 4]]) res = paddle.subtract(x, y) print(res) - # [[-4, -4], - # [4, 4]] + # Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True, + # [[-4, -4], + # [ 4, 4]]) x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) y = paddle.to_tensor([1, 0, 4]) res = paddle.subtract(x, y) print(res) - # [[[ 0, 2, -1], - # [ 0, 2, -1]]] + # Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, + # [[[ 0, 2, -1], + # [ 0, 2, -1]]]) - x = paddle.to_tensor([2, np.nan, 5], dtype='float32') - y = paddle.to_tensor([1, 4, np.nan], dtype='float32') + x = paddle.to_tensor([2, float('nan'), 5], dtype='float32') + y = paddle.to_tensor([1, 4, float('nan')], dtype='float32') res = paddle.subtract(x, y) print(res) - # [ 1., nan, nan] + # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, + # [1. , nan, nan]) - x = paddle.to_tensor([5, np.inf, -np.inf], dtype='float64') + x = paddle.to_tensor([5, float('inf'), -float('inf')], dtype='float64') y = paddle.to_tensor([1, 4, 5], dtype='float64') res = paddle.subtract(x, y) print(res) - # [ 4., inf., -inf.] - + # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, + # [ 4. , inf., -inf.]) """ op_type = 'elementwise_sub' axis = -1 @@ -1083,9 +1087,6 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): if `x.dtype='bool'`, `x.dtype='int32'`, it's data type is `'int64'`, otherwise it's data type is the same as `x`. - Raises: - TypeError: The type of :attr:`axis` must be int, list or tuple. - Examples: .. code-block:: python @@ -1571,7 +1572,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): """ **addmm** - This operator is used to perform matrix multiplication for input $x$ and $y$. + Perform matrix multiplication for input $x$ and $y$. $input$ is added to the final result. The equation is: @@ -1584,12 +1585,12 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): input (Tensor): The input Tensor to be added to the final result. x (Tensor): The first input Tensor for matrix multiplication. y (Tensor): The second input Tensor for matrix multiplication. - beta (float): Coefficient of $input$. - alpha (float): Coefficient of $x*y$. + beta (float, optional): Coefficient of $input$, default is 1. + alpha (float, optional): Coefficient of $x*y$, default is 1. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor: The output Tensor of addmm op. + Tensor: The output Tensor of addmm. Examples: .. code-block:: python @@ -1830,7 +1831,7 @@ def outer(x, y, name=None): def logsumexp(x, axis=None, keepdim=False, name=None): r""" - This OP calculates the log of the sum of exponentials of ``x`` along ``axis`` . + Calculates the log of the sum of exponentials of ``x`` along ``axis`` . .. math:: logsumexp(x) = \log\sum exp(x) @@ -2543,9 +2544,9 @@ def clip(x, min=None, max=None, name=None): Args: x (Tensor): An N-D Tensor with data type float32, float64, int32 or int64. - min (float|int|Tensor): The lower bound with type ``float`` , ``int`` or a ``Tensor`` + min (float|int|Tensor, optional): The lower bound with type ``float`` , ``int`` or a ``Tensor`` with shape [1] and type ``int32``, ``float32``, ``float64``. - max (float|int|Tensor): The upper bound with type ``float``, ``int`` or a ``Tensor`` + max (float|int|Tensor, optional): The upper bound with type ``float``, ``int`` or a ``Tensor`` with shape [1] and type ``int32``, ``float32``, ``float64``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -2661,7 +2662,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): """ **trace** - This OP computes the sum along diagonals of the input tensor x. + Computes the sum along diagonals of the input tensor x. If ``x`` is 2D, returns the sum of diagonal. @@ -2862,18 +2863,15 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): def kron(x, y, name=None): """ -${comment} + ${comment} Args: - x (Tensor): the fist operand of kron op, data type: float16, float32, - float64, int32 or int64. - y (Tensor): the second operand of kron op, data type: float16, - float32, float64, int32 or int64. Its data type should be the same - with x. + x (Tensor): the fist operand of kron op, data type: float16, float32, float64, int32 or int64. + y (Tensor): the second operand of kron op, data type: float16, float32, float64, int32 or int64. Its data type should be the same with x. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x. + Tensor: The output of kron, data type: float16, float32, float64, int32 or int64. Its data is the same with x. Examples: .. code-block:: python @@ -3135,12 +3133,12 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): multiply all elements of `x` and return a Tensor with a single element, otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`, the axis to reduce is :math:`x.ndim + axis[i]`. Default is None. + keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result + tensor will have one fewer dimension than the input unless `keepdim` is true. Default is False. dtype (str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64, int32, int64. If specified, the input tensor is casted to dtype before operator performed. This is very useful for avoiding data type overflows. The default value is None, the dtype of output is the same as input Tensor `x`. - keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result - tensor will have one fewer dimension than the input unless `keepdim` is true. Default is False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: @@ -3224,7 +3222,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): def sign(x, name=None): """ - This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero. + Returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero. Args: x (Tensor): The input tensor. The data type can be float16, float32 or float64. @@ -3856,7 +3854,7 @@ def lerp_(x, y, weight, name=None): def erfinv(x, name=None): r""" - The inverse error function of x, . + The inverse error function of x. Equation: .. math:: diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index 9ee59c6cfd8..7626552a85d 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -506,25 +506,27 @@ def erf(x, name=None): erf.__doc__ = r""" :strong:`Erf Operator` -For more details, see [Error function](https://en.wikipedia.org/wiki/Error_function). +For more details, see `Error function `_. Equation: .. math:: - out = \\frac{2}{\\sqrt{\\pi}} \\int_{0}^{x}e^{- \\eta^{2}}d\\eta + out = \frac{2}{\sqrt{\pi}} \int_{0}^{x}e^{- \eta^{2}}d\eta Args: x (Tensor): The input tensor, it's data type should be float32, float64. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor: The output of Erf op, dtype: float32 or float64, the same as the input, shape: the same as the input. + Tensor: The output of Erf, dtype: float32 or float64, the same as the input, shape: the same as the input. Examples: .. code-block:: python import paddle + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.erf(x) print(out) diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index b82f58ea3d0..1194d81a360 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -202,7 +202,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None): """ - This OP returns a Tensor filled with random values sampled from a Gaussian + Returns a Tensor filled with random values sampled from a Gaussian distribution, with ``shape`` and ``dtype``. Args: @@ -219,9 +219,7 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None): Supported data types: float32, float64. Default is None, use global default dtype (see ``get_default_dtype`` for details). - name (str, optional): The default value is None. Normally there is no - need for user to set this property. For more information, please - refer to :ref:`api_guide_Name`. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A Tensor filled with random values sampled from a Gaussian @@ -335,7 +333,7 @@ def standard_normal(shape, dtype=None, name=None): def randn(shape, dtype=None, name=None): """ - This OP returns a Tensor filled with random values sampled from a standard + Returns a Tensor filled with random values sampled from a standard normal distribution with mean 0 and standard deviation 1, with ``shape`` and ``dtype``. @@ -907,7 +905,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): def randperm(n, dtype="int64", name=None): """ - This OP returns a 1-D Tensor filled with random permutation values from 0 + Returns a 1-D Tensor filled with random permutation values from 0 to n-1, with ``dtype``. Args: diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index d86a6a3f627..f09a532cfd2 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -119,7 +119,7 @@ def argsort(x, axis=-1, descending=False, name=None): def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): """ - This OP computes the indices of the max elements of the input tensor's + Computes the indices of the max elements of the input tensor's element along the provided axis. Args: @@ -130,23 +130,21 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index. keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False. dtype(str|np.dtype, optional): Data type of the output tensor which can - be int32, int64. The default value is 'int64', and it will + be int32, int64. The default value is ``int64`` , and it will return the int64 indices. - name(str, optional): The default value is None. Normally there is no - need for user to set this property. For more information, please - refer to :ref:`api_guide_Name`. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64` + Tensor, return the tensor of int32 if set :attr:`dtype` is int32, otherwise return the tensor of int64. Examples: .. code-block:: python import paddle - x = paddle.to_tensor([[5,8,9,5], - [0,0,1,7], - [6,9,2,4]]) + x = paddle.to_tensor([[5,8,9,5], + [0,0,1,7], + [6,9,2,4]]) out1 = paddle.argmax(x) print(out1) # 2 out2 = paddle.argmax(x, axis=0) diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 991b86fd47d..52ccc601009 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -118,30 +118,18 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None): Args: x (Tensor): The input Tensor with data type float32, float64. - axis (int|list|tuple, optional): The axis along which to perform - variance calculations. ``axis`` should be int, list(int) or - tuple(int). If ``axis`` is a list/tuple of dimension(s), variance - is calculated along all element(s) of ``axis`` . ``axis`` or - element(s) of ``axis`` should be in range [-D, D), where D is the - dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is less - than 0, it works the same way as :math:`axis + D` . If ``axis`` is - None, variance is calculated over all elements of ``x``. Default - is None. - unbiased (bool, optional): Whether to use the unbiased estimation. If - ``unbiased`` is True, the divisor used in the computation is - :math:`N - 1`, where :math:`N` represents the number of elements - along ``axis`` , otherwise the divisor is :math:`N`. Default is True. - keepdim (bool, optional): Whether to reserve the reduced dimension(s) - in the output Tensor. If ``keepdim`` is True, the dimensions of - the output Tensor is the same as ``x`` except in the reduced - dimensions(it is of size 1 in this case). Otherwise, the shape of - the output Tensor is squeezed in ``axis`` . Default is False. - name (str, optional): Name for the operation (optional, default is None). - For more information, please refer to :ref:`api_guide_Name`. + axis (int|list|tuple, optional): The axis along which to perform variance calculations. ``axis`` should be int, list(int) or tuple(int). + + - If ``axis`` is a list/tuple of dimension(s), variance is calculated along all element(s) of ``axis`` . ``axis`` or element(s) of ``axis`` should be in range [-D, D), where D is the dimensions of ``x`` . + - If ``axis`` or element(s) of ``axis`` is less than 0, it works the same way as :math:`axis + D` . + - If ``axis`` is None, variance is calculated over all elements of ``x``. Default is None. + + unbiased (bool, optional): Whether to use the unbiased estimation. If ``unbiased`` is True, the divisor used in the computation is :math:`N - 1`, where :math:`N` represents the number of elements along ``axis`` , otherwise the divisor is :math:`N`. Default is True. + keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the input unless keep_dim is true. Default is False. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor, results of variance along ``axis`` of ``x``, with the same data - type as ``x``. + Tensor, results of variance along ``axis`` of ``x``, with the same data type as ``x``. Examples: .. code-block:: python @@ -223,7 +211,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None): def numel(x, name=None): """ Returns the number of elements for a tensor, which is a int64 Tensor with shape [1] in static mode - or a scalar value in imperative mode + or a scalar value in imperative mode. Args: x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64. @@ -280,21 +268,26 @@ def median(x, axis=None, keepdim=False, name=None): import paddle x = paddle.arange(12).reshape([3, 4]) - # x is [[0 , 1 , 2 , 3 ], - # [4 , 5 , 6 , 7 ], - # [8 , 9 , 10, 11]] + # Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True, + # [[0 , 1 , 2 , 3 ], + # [4 , 5 , 6 , 7 ], + # [8 , 9 , 10, 11]]) y1 = paddle.median(x) - # y1 is [5.5] + # Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, + # [5.50000000]) y2 = paddle.median(x, axis=0) - # y2 is [4., 5., 6., 7.] + # Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + # [4., 5., 6., 7.]) y3 = paddle.median(x, axis=1) - # y3 is [1.5, 5.5, 9.5] + # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, + # [1.50000000, 5.50000000, 9.50000000]) y4 = paddle.median(x, axis=0, keepdim=True) - # y4 is [[4., 5., 6., 7.]] + # Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + # [[4., 5., 6., 7.]]) """ if not isinstance(x, Variable): diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index 42d3bf9fca3..7935b4f2755 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -37,7 +37,6 @@ def set_printoptions(precision=None, sci_mode=None, linewidth=None): """Set the printing options for Tensor. - NOTE: The function is similar with numpy.set_printoptions() Args: precision (int, optional): Number of digits of the floating number, default 8. -- GitLab