diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 121ec47d947f34daa4387819131a80fc10d6a563..030f2f26514ea06abb6b95325db8092627f915d8 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8545,7 +8545,8 @@ def scatter_nd_add(ref, index, updates, name=None): .. code-block:: python import paddle.fluid as fluid - + import paddle + paddle.enable_static() ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32') index = fluid.data(name='index', shape=[3, 2], dtype='int32') updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32') diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index d7a3cfcdb92debe0447cb4054478729e92dbab32..f05051d3e68281129d01b595f9cf2621666ac41e 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -57,6 +57,7 @@ def data(name, shape, dtype=None, lod_level=0): import numpy as np import paddle + paddle.enable_static() # Creates a variable with fixed size [3, 2, 1] # User can only feed data of the same shape to x diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index b1c0f0b446a3c727d29dbe551c38166137804951..d8d625c4a5cc5c52b86823cb892620d5c6a5040c 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -96,7 +96,6 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): import paddle import numpy as np - paddle.disable_static() # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) @@ -563,7 +562,7 @@ def dot(x, y, name=None): name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: - Variable: the calculated result Tensor. + Tensor: the calculated result Tensor. Examples: @@ -572,13 +571,12 @@ def dot(x, y, name=None): import paddle import numpy as np - paddle.disable_static() x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) - print(z.numpy()) + print(z) """ op_type = 'dot' @@ -750,7 +748,7 @@ def cholesky(x, upper=False, name=None): :math:`L` is lower-triangular. Args: - x (Variable): The input tensor. Its shape should be `[*, M, M]`, + x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. @@ -758,7 +756,7 @@ def cholesky(x, upper=False, name=None): triangular matrices. Default: False. Returns: - Variable: A Tensor with same shape and data type as `x`. It represents \ + Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: @@ -767,13 +765,12 @@ def cholesky(x, upper=False, name=None): import paddle import numpy as np - paddle.disable_static() a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.cholesky(x, upper=False) - print(out.numpy()) + print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 7ea8a9286c34e3365d2a03e39fa1825c2fab0780..b062a847d19f98be0b51f31bc3d1e09e82f5e18f 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -862,6 +862,7 @@ def scatter(x, index, updates, overwrite=True, name=None): Output is obtained by updating the input on selected indices based on updates. .. code-block:: python + import numpy as np #input: x = np.array([[1, 1], [2, 2], [3, 3]]) @@ -902,7 +903,6 @@ def scatter(x, index, updates, overwrite=True, name=None): .. code-block:: python import paddle - paddle.disable_static() x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32') index = paddle.to_tensor([2, 1, 0, 1], dtype='int64') diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 3a5dcd02fd786594ba1d858eee4d5e220becaada..eb11336327c82b305563ad44648ba6fd1bb8db91 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -312,12 +312,10 @@ def divide(x, y, name=None): import paddle - paddle.disable_static() - x = paddle.to_tensor([2, 3, 4], dtype='float64') y = paddle.to_tensor([1, 5, 2], dtype='float64') z = paddle.divide(x, y) - print(z.numpy()) # [2., 0.6, 2.] + print(z) # [2., 0.6, 2.] """ op_type = 'elementwise_div' @@ -354,12 +352,10 @@ def floor_divide(x, y, name=None): import paddle - paddle.disable_static() - x = paddle.to_tensor([2, 3, 8, 7]) y = paddle.to_tensor([1, 5, 3, 3]) z = paddle.floor_divide(x, y) - print(z.numpy()) # [2, 0, 2, 2] + print(z) # [2, 0, 2, 2] """ op_type = 'elementwise_floordiv' @@ -376,10 +372,11 @@ def remainder(x, y, name=None): Mod two tensors element-wise. The equation is: .. math:: + out = x \% y **Note**: - ``paddle.mod`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . + ``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . Args: x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64. @@ -397,7 +394,7 @@ def remainder(x, y, name=None): x = paddle.to_tensor([2, 3, 8, 7]) y = paddle.to_tensor([1, 5, 3, 3]) - z = paddle.mod(x, y) + z = paddle.remainder(x, y) print(z) # [0, 3, 2, 1] """ @@ -1037,7 +1034,7 @@ def inverse(x, name=None): (2-D Tensor) or batches of square matrices. Args: - x (Variable): The input tensor. The last two + x (Tensor): The input tensor. The last two dimensions should be equal. When the number of dimensions is greater than 2, it is treated as batches of square matrix. The data type can be float32 and float64. @@ -1046,14 +1043,13 @@ def inverse(x, name=None): please refer to :ref:`api_guide_Name` Returns: - Variable: A Tensor holds the inverse of x. The shape and data type + Tensor: A Tensor holds the inverse of x. The shape and data type is the same as x. Examples: .. code-block:: python import paddle - paddle.disable_static() mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32') inv = paddle.inverse(mat) @@ -1915,7 +1911,6 @@ def sign(x, name=None): import paddle - paddle.disable_static() x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32') out = paddle.sign(x=x) print(out) # [1.0, 0.0, -1.0, 1.0]