From 94fdb8eb59afacf33d66ac0a9c2258f90054633a Mon Sep 17 00:00:00 2001 From: liuwei1031 <46661762+liuwei1031@users.noreply.github.com> Date: Mon, 20 Apr 2020 11:12:21 +0800 Subject: [PATCH] tweak doc of dot and logsumexp, test=develop (#23925) --- python/paddle/tensor/linalg.py | 4 ++- python/paddle/tensor/math.py | 62 +++++++++++++++++++++------------- 2 files changed, 41 insertions(+), 25 deletions(-) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 70624b63b9f..76b459327cf 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -421,11 +421,13 @@ def dot(x, y, name=None): Only support 1-d Tensor(vector). Parameters: - x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` + Returns: + Variable: the calculated result Tensor/LoDTensor. + Examples: .. code-block:: python diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ca9ae1d3779..c4feb007a95 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1012,38 +1012,52 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None): def logsumexp(x, dim=None, keepdim=False, out=None, name=None): """ -This operator calculates the log of the sum of exponentials of the input Tensor. + This operator calculates the log of the sum of exponentials of the input Tensor. -.. math:: - logsumexp(x) = \log\sum exp(x) + .. math:: + logsumexp(x) = \log\sum exp(x) + + + Parameters: + x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64. + dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`, + sum all elements of :attr:`input` and return a Tensor variable with a single element, + otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, + the dimension to reduce is :math:`rank + dim[i]`. + keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. + The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` + is true, default value is False. + out (Variable), optional): Enable user to explicitly specify an output variable to save result. + name (str, optional): The default value is None. Normally there is no need for user to + set this property. For more information, please refer to :ref:`api_guide_Name` + Returns: + Variable: The calcuated result Tensor/LoDTensor. -Parameters: - x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64. - dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`, - sum all elements of :attr:`input` and return a Tensor variable with a single element, - otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, - the dimension to reduce is :math:`rank + dim[i]`. - keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. - The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` - is true, default value is False. - name (str, optional): The default value is None. Normally there is no need for user to - set this property. For more information, please refer to :ref:`api_guide_Name` + Examples: + .. code-block:: python -Examples: + import paddle + import paddle.fluid as fluid + import numpy as np -.. code-block:: python + with fluid.dygraph.guard(): + np_x = np.random.uniform(0.1, 1, [10]).astype(np.float32) + x = fluid.dygraph.to_variable(np_x) + print(paddle.logsumexp(x).numpy()) - import paddle - import paddle.fluid as fluid - import numpy as np - - with fluid.dygraph.guard(): - np_x = np.random.uniform(0.1, 1, [10]).astype(np.float32) - x = fluid.dygraph.to_variable(np_x) - print(paddle.logsumexp(x).numpy()) + .. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32) + x = fluid.dygraph.to_variable(np_x) + print(paddle.logsumexp(x, dim=1).numpy()) + print(paddle.logsumexp(x, dim=[0, 2]).numpy()) """ op_type = 'logsumexp' -- GitLab