未验证 提交 bb5f8e35 编写于 作者: S ShenLiang 提交者: GitHub

fix doc of data,matmul,dot,cholesky,scatter,divide,remainder,inverse,sign (#28665)

上级 29b50507
...@@ -8545,7 +8545,8 @@ def scatter_nd_add(ref, index, updates, name=None): ...@@ -8545,7 +8545,8 @@ def scatter_nd_add(ref, index, updates, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32') ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32')
index = fluid.data(name='index', shape=[3, 2], dtype='int32') index = fluid.data(name='index', shape=[3, 2], dtype='int32')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32') updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
......
...@@ -57,6 +57,7 @@ def data(name, shape, dtype=None, lod_level=0): ...@@ -57,6 +57,7 @@ def data(name, shape, dtype=None, lod_level=0):
import numpy as np import numpy as np
import paddle import paddle
paddle.enable_static()
# Creates a variable with fixed size [3, 2, 1] # Creates a variable with fixed size [3, 2, 1]
# User can only feed data of the same shape to x # User can only feed data of the same shape to x
......
...@@ -96,7 +96,6 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): ...@@ -96,7 +96,6 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
# vector * vector # vector * vector
x_data = np.random.random([10]).astype(np.float32) x_data = np.random.random([10]).astype(np.float32)
y_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32)
...@@ -563,7 +562,7 @@ def dot(x, y, name=None): ...@@ -563,7 +562,7 @@ def dot(x, y, name=None):
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns: Returns:
Variable: the calculated result Tensor. Tensor: the calculated result Tensor.
Examples: Examples:
...@@ -572,13 +571,12 @@ def dot(x, y, name=None): ...@@ -572,13 +571,12 @@ def dot(x, y, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
z = paddle.dot(x, y) z = paddle.dot(x, y)
print(z.numpy()) print(z)
""" """
op_type = 'dot' op_type = 'dot'
...@@ -750,7 +748,7 @@ def cholesky(x, upper=False, name=None): ...@@ -750,7 +748,7 @@ def cholesky(x, upper=False, name=None):
:math:`L` is lower-triangular. :math:`L` is lower-triangular.
Args: Args:
x (Variable): The input tensor. Its shape should be `[*, M, M]`, x (Tensor): The input tensor. Its shape should be `[*, M, M]`,
where * is zero or more batch dimensions, and matrices on the where * is zero or more batch dimensions, and matrices on the
inner-most 2 dimensions all should be symmetric positive-definite. inner-most 2 dimensions all should be symmetric positive-definite.
Its data type should be float32 or float64. Its data type should be float32 or float64.
...@@ -758,7 +756,7 @@ def cholesky(x, upper=False, name=None): ...@@ -758,7 +756,7 @@ def cholesky(x, upper=False, name=None):
triangular matrices. Default: False. triangular matrices. Default: False.
Returns: Returns:
Variable: A Tensor with same shape and data type as `x`. It represents \ Tensor: A Tensor with same shape and data type as `x`. It represents \
triangular matrices generated by Cholesky decomposition. triangular matrices generated by Cholesky decomposition.
Examples: Examples:
...@@ -767,13 +765,12 @@ def cholesky(x, upper=False, name=None): ...@@ -767,13 +765,12 @@ def cholesky(x, upper=False, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
a = np.random.rand(3, 3) a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0]) a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03 x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = paddle.cholesky(x, upper=False) out = paddle.cholesky(x, upper=False)
print(out.numpy()) print(out)
# [[1.190523 0. 0. ] # [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ] # [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]] # [1.25450498 0.05600871 0.06400121]]
......
...@@ -862,6 +862,7 @@ def scatter(x, index, updates, overwrite=True, name=None): ...@@ -862,6 +862,7 @@ def scatter(x, index, updates, overwrite=True, name=None):
Output is obtained by updating the input on selected indices based on updates. Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
#input: #input:
x = np.array([[1, 1], [2, 2], [3, 3]]) x = np.array([[1, 1], [2, 2], [3, 3]])
...@@ -902,7 +903,6 @@ def scatter(x, index, updates, overwrite=True, name=None): ...@@ -902,7 +903,6 @@ def scatter(x, index, updates, overwrite=True, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32') x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64') index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
......
...@@ -312,12 +312,10 @@ def divide(x, y, name=None): ...@@ -312,12 +312,10 @@ def divide(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([2, 3, 4], dtype='float64') x = paddle.to_tensor([2, 3, 4], dtype='float64')
y = paddle.to_tensor([1, 5, 2], dtype='float64') y = paddle.to_tensor([1, 5, 2], dtype='float64')
z = paddle.divide(x, y) z = paddle.divide(x, y)
print(z.numpy()) # [2., 0.6, 2.] print(z) # [2., 0.6, 2.]
""" """
op_type = 'elementwise_div' op_type = 'elementwise_div'
...@@ -354,12 +352,10 @@ def floor_divide(x, y, name=None): ...@@ -354,12 +352,10 @@ def floor_divide(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([2, 3, 8, 7]) x = paddle.to_tensor([2, 3, 8, 7])
y = paddle.to_tensor([1, 5, 3, 3]) y = paddle.to_tensor([1, 5, 3, 3])
z = paddle.floor_divide(x, y) z = paddle.floor_divide(x, y)
print(z.numpy()) # [2, 0, 2, 2] print(z) # [2, 0, 2, 2]
""" """
op_type = 'elementwise_floordiv' op_type = 'elementwise_floordiv'
...@@ -376,10 +372,11 @@ def remainder(x, y, name=None): ...@@ -376,10 +372,11 @@ def remainder(x, y, name=None):
Mod two tensors element-wise. The equation is: Mod two tensors element-wise. The equation is:
.. math:: .. math::
out = x \% y out = x \% y
**Note**: **Note**:
``paddle.mod`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . ``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args: Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64. x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
...@@ -397,7 +394,7 @@ def remainder(x, y, name=None): ...@@ -397,7 +394,7 @@ def remainder(x, y, name=None):
x = paddle.to_tensor([2, 3, 8, 7]) x = paddle.to_tensor([2, 3, 8, 7])
y = paddle.to_tensor([1, 5, 3, 3]) y = paddle.to_tensor([1, 5, 3, 3])
z = paddle.mod(x, y) z = paddle.remainder(x, y)
print(z) # [0, 3, 2, 1] print(z) # [0, 3, 2, 1]
""" """
...@@ -1037,7 +1034,7 @@ def inverse(x, name=None): ...@@ -1037,7 +1034,7 @@ def inverse(x, name=None):
(2-D Tensor) or batches of square matrices. (2-D Tensor) or batches of square matrices.
Args: Args:
x (Variable): The input tensor. The last two x (Tensor): The input tensor. The last two
dimensions should be equal. When the number of dimensions is dimensions should be equal. When the number of dimensions is
greater than 2, it is treated as batches of square matrix. The data greater than 2, it is treated as batches of square matrix. The data
type can be float32 and float64. type can be float32 and float64.
...@@ -1046,14 +1043,13 @@ def inverse(x, name=None): ...@@ -1046,14 +1043,13 @@ def inverse(x, name=None):
please refer to :ref:`api_guide_Name` please refer to :ref:`api_guide_Name`
Returns: Returns:
Variable: A Tensor holds the inverse of x. The shape and data type Tensor: A Tensor holds the inverse of x. The shape and data type
is the same as x. is the same as x.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32') mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')
inv = paddle.inverse(mat) inv = paddle.inverse(mat)
...@@ -1915,7 +1911,6 @@ def sign(x, name=None): ...@@ -1915,7 +1911,6 @@ def sign(x, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32') x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32')
out = paddle.sign(x=x) out = paddle.sign(x=x)
print(out) # [1.0, 0.0, -1.0, 1.0] print(out) # [1.0, 0.0, -1.0, 1.0]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册