diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1eea810163bfdbb4d2445bd66edc068e89052fc3..2e5b76564568281cd668cf6fe3e2ef397674acab 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12175,13 +12175,10 @@ def logical_and(x, y, out=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_data = np.array([True], dtype=np.bool) - y_data = np.array([True, False, True, False], dtype=np.bool) - x = paddle.to_tensor(x_data) - y = paddle.to_tensor(y_data) + x = paddle.to_tensor([True]) + y = paddle.to_tensor([True, False, True, False]) res = paddle.logical_and(x, y) print(res.numpy()) # [True False True False] """ @@ -12294,11 +12291,9 @@ def logical_not(x, out=None, name=None): Examples: .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_data = np.array([True, False, True, False], dtype=np.bool) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([True, False, True, False]) res = paddle.logical_not(x) print(res.numpy()) # [False True False True] """ diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 84cacea6ba5723f8a06fc87fa9c59d96f802e65a..1efae3ddf1f3422a53f69c4b5b8eeec6183fae96 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -86,13 +86,11 @@ add_sample_code(globals()["sigmoid"], r""" Examples: .. code-block:: python - import numpy as np import paddle import paddle.nn.functional as F paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = F.sigmoid(x) print(out.numpy()) # [0.40131234 0.450166 0.52497919 0.57444252] @@ -103,13 +101,11 @@ add_sample_code(globals()["logsigmoid"], r""" Examples: .. code-block:: python - import numpy as np import paddle import paddle.nn.functional as F paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = F.logsigmoid(x) print(out.numpy()) # [-0.91301525 -0.79813887 -0.64439666 -0.55435524] @@ -120,12 +116,10 @@ add_sample_code(globals()["exp"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.exp(x) print(out.numpy()) # [0.67032005 0.81873075 1.10517092 1.34985881] @@ -136,12 +130,10 @@ add_sample_code(globals()["tanh"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.tanh(x) print(out.numpy()) # [-0.37994896 -0.19737532 0.09966799 0.29131261] @@ -152,12 +144,10 @@ add_sample_code(globals()["atan"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.atan(x) print(out.numpy()) # [-0.38050638 -0.19739556 0.09966865 0.29145679] @@ -170,11 +160,10 @@ Examples: import paddle import paddle.nn.functional as F - import numpy as np paddle.disable_static() - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] """) @@ -183,12 +172,10 @@ add_sample_code(globals()["sqrt"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([0.1, 0.2, 0.3, 0.4]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4]) out = paddle.sqrt(x) print(out.numpy()) # [0.31622777 0.4472136 0.54772256 0.63245553] @@ -199,12 +186,10 @@ add_sample_code(globals()["rsqrt"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([0.1, 0.2, 0.3, 0.4]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4]) out = paddle.rsqrt(x) print(out.numpy()) # [3.16227766 2.23606798 1.82574186 1.58113883] @@ -215,12 +200,10 @@ add_sample_code(globals()["abs"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.abs(x) print(out.numpy()) # [0.4 0.2 0.1 0.3] @@ -231,12 +214,10 @@ add_sample_code(globals()["ceil"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.ceil(x) print(out.numpy()) # [-0. -0. 1. 1.] @@ -247,12 +228,10 @@ add_sample_code(globals()["floor"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.floor(x) print(out.numpy()) # [-1. -1. 0. 0.] @@ -263,12 +242,10 @@ add_sample_code(globals()["cos"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.cos(x) print(out.numpy()) # [0.92106099 0.98006658 0.99500417 0.95533649] @@ -279,12 +256,10 @@ add_sample_code(globals()["acos"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.acos(x) print(out.numpy()) # [1.98231317 1.77215425 1.47062891 1.26610367] @@ -295,12 +270,10 @@ add_sample_code(globals()["sin"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.sin(x) print(out.numpy()) # [-0.38941834 -0.19866933 0.09983342 0.29552021] @@ -311,12 +284,10 @@ add_sample_code(globals()["asin"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.asin(x) print(out.numpy()) # [-0.41151685 -0.20135792 0.10016742 0.30469265] @@ -327,12 +298,10 @@ add_sample_code(globals()["cosh"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.cosh(x) print(out.numpy()) # [1.08107237 1.02006676 1.00500417 1.04533851] @@ -343,12 +312,10 @@ add_sample_code(globals()["sinh"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.sinh(x) print(out.numpy()) # [-0.41075233 -0.201336 0.10016675 0.30452029] @@ -359,12 +326,10 @@ add_sample_code(globals()["round"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.5, -0.2, 0.6, 1.5]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5]) out = paddle.round(x) print(out.numpy()) # [-1. -0. 1. 2.] @@ -375,12 +340,10 @@ add_sample_code(globals()["reciprocal"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.reciprocal(x) print(out.numpy()) # [-2.5 -5. 10. 3.33333333] @@ -391,12 +354,10 @@ add_sample_code(globals()["square"], r""" Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.square(x) print(out.numpy()) # [0.16 0.04 0.01 0.09] @@ -409,11 +370,10 @@ Examples: import paddle import paddle.nn.functional as F - import numpy as np paddle.disable_static() - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] """) @@ -424,11 +384,10 @@ Examples: import paddle import paddle.nn.functional as F - import numpy as np paddle.disable_static() - x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] """) @@ -761,11 +720,9 @@ Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_tensor(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.erf(x) print(out.numpy()) # [-0.42839236 -0.22270259 0.11246292 0.32862676] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index d2ddee654f4d04de152d15130ba53c424af3e5b2..3d5894064c44cb72259472fc638d46b67c5703fc 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -138,13 +138,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', .. code-block:: python import paddle - import numpy as np - input_data = np.array([0.5, 0.6, 0.7]).astype("float32") - label_data = np.array([1.0, 0.0, 1.0]).astype("float32") paddle.disable_static() - input = paddle.to_tensor(input_data) - label = paddle.to_tensor(label_data) + input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32') + label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32') output = paddle.nn.functional.binary_cross_entropy(input, label) print(output.numpy()) # [0.65537095] @@ -277,8 +274,8 @@ def binary_cross_entropy_with_logits(logit, import paddle paddle.disable_static() - logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32") - label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") + logit = paddle.to_tensor([5.0, 1.0, 3.0]) + label = paddle.to_tensor([1.0, 0.0, 1.0]) output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label) print(output.numpy()) # [0.45618808] @@ -569,13 +566,10 @@ def l1_loss(input, label, reduction='mean', name=None): Examples: .. code-block:: python import paddle - import numpy as np paddle.disable_static() - input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") - label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_tensor(input_data) - label = paddle.to_tensor(label_data) + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) l1_loss = paddle.nn.functional.l1_loss(input, label) print(l1_loss.numpy()) @@ -868,7 +862,7 @@ def mse_loss(input, label, reduction='mean', name=None): Examples: .. code-block:: python - import numpy as np + import paddle @@ -878,8 +872,6 @@ def mse_loss(input, label, reduction='mean', name=None): input = paddle.data(name="input", shape=[1]) label = paddle.data(name="label", shape=[1]) place = paddle.CPUPlace() - input_data = np.array([1.5]).astype("float32") - label_data = np.array([1.7]).astype("float32") output = mse_loss(input,label) exe = paddle.static.Executor(place) @@ -894,8 +886,8 @@ def mse_loss(input, label, reduction='mean', name=None): # dynamic graph mode paddle.disable_static() - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(1.5) + label = paddle.to_tensor(1.7) output = mse_loss(input, label) print(output.numpy()) # [0.04000002] diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index aa00710081d2e5aabfef66827fa649d2fb7bb96f..0f8306987c3f32a5d50082ba4c1bd590f4364b65 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -366,11 +366,10 @@ def ones_like(x, dtype=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x = paddle.to_tensor(np.array([1,2,3], dtype='float32')) + x = paddle.to_tensor([1,2,3]) out1 = paddle.zeros_like(x) # [1., 1., 1.] out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1] @@ -453,11 +452,10 @@ def zeros_like(x, dtype=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x = paddle.to_tensor(np.array([1,2,3], dtype='float32')) + x = paddle.to_tensor([1,2,3]) out1 = paddle.zeros_like(x) # [0., 0., 0.] out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0] @@ -619,7 +617,6 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() @@ -633,7 +630,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): out3 = paddle.arange(4.999, dtype='float32') # [0., 1., 2., 3., 4.] - start_var = paddle.to_tensor(np.array([3])) + start_var = paddle.to_tensor([3]) out4 = paddle.arange(start_var, 7) # [3, 4, 5, 6] @@ -725,7 +722,7 @@ def tril(x, diagonal=0, name=None): paddle.disable_static() - x = paddle.to_variable(data) + x = paddle.to_tensor(data) tril1 = paddle.tensor.tril(x) # array([[ 1, 0, 0, 0], @@ -797,7 +794,7 @@ def triu(x, diagonal=0, name=None): paddle.disable_static() # example 1, default diagonal - x = paddle.to_variable(data) + x = paddle.to_tensor(data) triu1 = paddle.tensor.triu(x) # array([[ 1, 2, 3, 4], # [ 0, 6, 7, 8], diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index b5b528325cd9f52a8b61ef21df0095c41da5a8ed..7ddda5091a0a260f56b29bcedfdcb0786e82ddd6 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -810,7 +810,7 @@ def cholesky(x, upper=False, name=None): a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 - x = paddle.to_variable(x_data) + x = paddle.to_tensor(x_data) out = paddle.cholesky(x, upper=False) print(out.numpy()) # [[1.190523 0. 0. ] @@ -855,15 +855,16 @@ def bmm(x, y, name=None): Examples: import paddle - # In imperative mode: - # size input1: (2, 2, 3) and input2: (2, 3, 2) - input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]]) - input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) - paddle.disable_static() - - x = paddle.to_variable(input1) - y = paddle.to_variable(input2) + + # In imperative mode: + # size x: (2, 2, 3) and y: (2, 3, 2) + x = paddle.to_tensor([[[1.0, 1.0, 1.0], + [2.0, 2.0, 2.0]], + [[3.0, 3.0, 3.0], + [4.0, 4.0, 4.0]]]) + y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], + [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: @@ -924,10 +925,8 @@ def histogram(input, bins=100, min=0, max=0): Code Example 2: .. code-block:: python import paddle - import numpy as np paddle.disable_static(paddle.CPUPlace()) - inputs_np = np.array([1, 2, 1]).astype(np.float) - inputs = paddle.to_variable(inputs_np) + inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] paddle.enable_static() diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 36b558d597c1ce1333a8f1eec54e2fd2813625e3..5fd714421c8ed14820738543a1824c779296d7c3 100644 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -71,13 +71,12 @@ def equal_all(x, y, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x = paddle.to_variable(np.array([1, 2, 3])) - y = paddle.to_variable(np.array([1, 2, 3])) - z = paddle.to_variable(np.array([1, 4, 3])) + x = paddle.to_tensor([1, 2, 3]) + y = paddle.to_tensor([1, 2, 3]) + z = paddle.to_tensor([1, 4, 3]) result1 = paddle.equal_all(x, y) print(result1.numpy()) # result1 = [True ] result2 = paddle.equal_all(x, z) @@ -120,14 +119,11 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - np_x = np.array([10000., 1e-07]).astype("float32") - np_y = np.array([10000.1, 1e-08]).astype("float32") - x = paddle.to_tensor(np_x) - y = paddle.to_tensor(np_y) + x = paddle.to_tensor([10000., 1e-07]) + y = paddle.to_tensor([10000.1, 1e-08]) result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan") np_result1 = result1.numpy() @@ -137,10 +133,8 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): np_result2 = result2.numpy() # [False] - np_x = np.array([1.0, float('nan')]).astype("float32") - np_y = np.array([1.0, float('nan')]).astype("float32") - x = paddle.to_tensor(np_x) - y = paddle.to_tensor(np_y) + x = paddle.to_tensor([1.0, float('nan')]) + y = paddle.to_tensor([1.0, float('nan')]) result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan") np_result1 = result1.numpy() @@ -195,12 +189,11 @@ def equal(x, y, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x = paddle.to_variable(np.array([1, 2, 3])) - y = paddle.to_variable(np.array([1, 3, 2])) + x = paddle.to_tensor([1, 2, 3]) + y = paddle.to_tensor([1, 3, 2]) result1 = paddle.equal(x, y) print(result1.numpy()) # result1 = [True False False] """ @@ -227,12 +220,11 @@ def greater_equal(x, y, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x = paddle.to_variable(np.array([1, 2, 3])) - y = paddle.to_variable(np.array([1, 3, 2])) + x = paddle.to_tensor([1, 2, 3]) + y = paddle.to_tensor([1, 3, 2]) result1 = paddle.greater_equal(x, y) print(result1.numpy()) # result1 = [True False True] """ @@ -259,12 +251,11 @@ def greater_than(x, y, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x = paddle.to_variable(np.array([1, 2, 3])) - y = paddle.to_variable(np.array([1, 3, 2])) + x = paddle.to_tensor([1, 2, 3]) + y = paddle.to_tensor([1, 3, 2]) result1 = paddle.greater_than(x, y) print(result1.numpy()) # result1 = [False False True] """ @@ -292,12 +283,11 @@ def less_equal(x, y, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x = paddle.to_variable(np.array([1, 2, 3])) - y = paddle.to_variable(np.array([1, 3, 2])) + x = paddle.to_tensor([1, 2, 3]) + y = paddle.to_tensor([1, 3, 2]) result1 = paddle.less_equal(x, y) print(result1.numpy()) # result1 = [True True False] """ @@ -325,12 +315,11 @@ def less_than(x, y, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x = paddle.to_variable(np.array([1, 2, 3])) - y = paddle.to_variable(np.array([1, 3, 2])) + x = paddle.to_tensor([1, 2, 3]) + y = paddle.to_tensor([1, 3, 2]) result1 = paddle.less_than(x, y) print(result1.numpy()) # result1 = [False True False] """ @@ -358,12 +347,12 @@ def not_equal(x, y, name=None): Examples: .. code-block:: python - import numpy as np + import paddle paddle.disable_static() - x = paddle.to_variable(np.array([1, 2, 3])) - y = paddle.to_variable(np.array([1, 3, 2])) + x = paddle.to_tensor([1, 2, 3]) + y = paddle.to_tensor([1, 3, 2]) result1 = paddle.not_equal(x, y) print(result1.numpy()) # result1 = [False True True] """ diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 040af773184291aaa69382877ba71397be04944d..e2d413d40d86d7259395cb331abc8468c53d5d4c 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -98,18 +98,14 @@ def concat(x, axis=0, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() # Now we are in imperative mode - in1 = np.array([[1, 2, 3], - [4, 5, 6]]) - in2 = np.array([[11, 12, 13], - [14, 15, 16]]) - in3 = np.array([[21, 22], - [23, 24]]) - x1 = paddle.to_tensor(in1) - x2 = paddle.to_tensor(in2) - x3 = paddle.to_tensor(in3) + x1 = paddle.to_tensor([[1, 2, 3], + [4, 5, 6]]) + x2 = paddle.to_tensor([[11, 12, 13], + [14, 15, 16]]) + x3 = paddle.to_tensor([[21, 22], + [23, 24]]) zero = paddle.full(shape=[1], dtype='int32', fill_value=0) # When the axis is negative, the real axis is (axis + Rank(x)) # As follow, axis is -1, Rank(x) is 2, the real axis is 1 @@ -158,7 +154,7 @@ def flip(x, axis, name=None): image_shape=(3, 2, 2) x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape) x = x.astype('float32') - img = paddle.to_variable(x) + img = paddle.to_tensor(x) out = paddle.flip(img, [0,1]) print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]] @@ -250,7 +246,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100. x = x.astype('float32') - img = paddle.to_variable(x) + img = paddle.to_tensor(x) out = paddle.flatten(img, start_axis=1, stop_axis=2) # out shape is [2, 12, 4] """ @@ -315,15 +311,13 @@ def roll(x, shifts, axis=None, name=None): Examples: .. code-block:: python - import numpy as np import paddle import paddle.fluid as fluid - data = np.array([[1.0, 2.0, 3.0], - [4.0, 5.0, 6.0], - [7.0, 8.0, 9.0]]) paddle.disable_static() - x = paddle.to_variable(data) + x = paddle.to_tensor([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + [7.0, 8.0, 9.0]]) out_z1 = paddle.roll(x, shifts=1) print(out_z1.numpy()) #[[9. 1. 2.] @@ -447,8 +441,7 @@ def stack(x, axis=0, name=None): .. code-block:: python import paddle - import numpy as np - + paddle.disable_static() x1 = paddle.to_tensor([[1.0, 2.0]]) x2 = paddle.to_tensor([[3.0, 4.0]]) @@ -632,12 +625,10 @@ def unique(x, Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([2, 3, 3, 1, 5, 3]) - x = paddle.to_tensor(x_data) + x = paddle.to_tensor([2, 3, 3, 1, 5, 3]) unique = paddle.unique(x) np_unique = unique.numpy() # [1 2 3 5] _, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True) @@ -645,8 +636,7 @@ def unique(x, np_inverse = inverse.numpy() # [1 2 2 0 3 2] np_counts = counts.numpy() # [1 1 3 1] - x_data = np.array([[2, 1, 3], [3, 0, 1], [2, 1, 3]]) - x = paddle.to_tensor(x_data) + x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]]) unique = paddle.unique(x) np_unique = unique.numpy() # [0 1 2 3] @@ -815,14 +805,11 @@ def gather(x, index, axis=None, name=None): .. code-block:: python - import numpy as np import paddle paddle.disable_static() - input_1 = np.array([[1,2],[3,4],[5,6]]) - index_1 = np.array([0,1]) - input = paddle.to_tensor(input_1) - index = paddle.to_tensor(index_1) + input = paddle.to_tensor([[1,2],[3,4],[5,6]]) + index = paddle.to_tensor([0,1]) output = paddle.gather(input, index, axis=0) # expected output: [[1,2],[3,4]] """ @@ -958,16 +945,11 @@ def scatter(x, index, updates, overwrite=True, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) - index_data = np.array([2, 1, 0, 1]).astype(np.int64) - updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) - - x = paddle.to_tensor(x_data) - index = paddle.to_tensor(index_data) - updates = paddle.to_tensor(updates_data) + x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32') + index = paddle.to_tensor([2, 1, 0, 1], dtype='int64') + updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32') output1 = paddle.scatter(x, index, updates, overwrite=False) # [[3., 3.], @@ -1074,11 +1056,9 @@ def tile(x, repeat_times, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - np_data = np.array([1, 2, 3]).astype('int32') - data = paddle.to_tensor(np_data) + data = paddle.to_tensor([1, 2, 3], dtype='int32') out = paddle.tile(data, repeat_times=[2, 1]) np_out = out.numpy() # [[1, 2, 3], [1, 2, 3]] @@ -1087,8 +1067,7 @@ def tile(x, repeat_times, name=None): np_out = out.numpy() # [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]] - np_repeat_times = np.array([2, 1]).astype("int32") - repeat_times = paddle.to_tensor(np_repeat_times) + repeat_times = paddle.to_tensor([2, 1], dtype='int32') out = paddle.tile(data, repeat_times=repeat_times) np_out = out.numpy() # [[1, 2, 3], [1, 2, 3]] @@ -1156,15 +1135,12 @@ def expand_as(x, y, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - np_data_x = np.array([1, 2, 3]).astype('int32') - np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype('int32') - data_x = paddle.to_tensor(np_data_x) - data_y = paddle.to_tensor(np_data_y) + data_x = paddle.to_tensor([1, 2, 3], 'int32') + data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32') out = paddle.expand_as(data_x, data_y) np_out = out.numpy() # [[1, 2, 3], [1, 2, 3]] @@ -1212,12 +1188,10 @@ def expand(x, shape, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - np_data = np.array([1, 2, 3]).astype('int32') - data = paddle.to_tensor(np_data) + data = paddle.to_tensor([1, 2, 3], dtype='int32') out = paddle.expand(data, shape=[2, 3]) out = out.numpy() # [[1, 2, 3], [1, 2, 3]] @@ -1416,14 +1390,11 @@ def gather_nd(x, index, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - np_x = np.array([[[1, 2], [3, 4], [5, 6]], - [[7, 8], [9, 10], [11, 12]]]) - np_index = [[0, 1]] - x = paddle.to_tensor(np_x) - index = paddle.to_tensor(np_index) + x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]], + [[7, 8], [9, 10], [11, 12]]]) + index = paddle.to_tensor([[0, 1]]) output = paddle.gather_nd(x, index) #[[3, 4]] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index b6314ef1ba37937a39073ec68cf1cf540b27bf64..0b72c91c83984bc41669fb9f1a6afad7bd166fda 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -174,14 +174,12 @@ def pow(x, y, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() # example 1: y is a float - x_data = np.array([1, 2, 3]) + x = paddle.to_tensor([1, 2, 3]) y = 2 - x = paddle.to_tensor(x_data) res = paddle.pow(x, y) print(res.numpy()) # [1 4 9] @@ -291,13 +289,10 @@ Examples: .. code-block:: python import paddle - import numpy as np paddle.disable_static() - np_x = np.array([2, 3, 4]).astype('float64') - np_y = np.array([1, 5, 2]).astype('float64') - x = paddle.to_variable(np_x) - y = paddle.to_variable(np_y) + x = paddle.to_tensor([2, 3, 4], 'float64') + y = paddle.to_tensor([1, 5, 2], 'float64') z = paddle.add(x, y) np_z = z.numpy() print(np_z) # [3., 8., 6. ] @@ -335,14 +330,11 @@ def divide(x, y, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - np_x = np.array([2, 3, 4]).astype('float64') - np_y = np.array([1, 5, 2]).astype('float64') - x = paddle.to_tensor(np_x) - y = paddle.to_tensor(np_y) + x = paddle.to_tensor([2, 3, 4], dtype='float64') + y = paddle.to_tensor([1, 5, 2], dtype='float64') z = paddle.divide(x, y) print(z.numpy()) # [2., 0.6, 2.] @@ -440,14 +432,11 @@ def floor_divide(x, y, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - np_x = np.array([2, 3, 8, 7]) - np_y = np.array([1, 5, 3, 3]) - x = paddle.to_tensor(np_x) - y = paddle.to_tensor(np_y) + x = paddle.to_tensor([2, 3, 8, 7]) + y = paddle.to_tensor([1, 5, 3, 3]) z = paddle.floor_divide(x, y) print(z.numpy()) # [2, 0, 2, 2] @@ -530,14 +519,11 @@ def remainder(x, y, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - np_x = np.array([2, 3, 8, 7]) - np_y = np.array([1, 5, 3, 3]) - x = paddle.to_tensor(np_x) - y = paddle.to_tensor(np_y) + x = paddle.to_tensor([2, 3, 8, 7]) + y = paddle.to_tensor([1, 5, 3, 3]) z = paddle.remainder(x, y) print(z.numpy()) # [0, 3, 2, 1] @@ -612,20 +598,15 @@ def multiply(x, y, axis=-1, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.to_tensor(x_data) - y = paddle.to_tensor(y_data) + x = paddle.to_tensor([[1, 2], [3, 4]]) + y = paddle.to_tensor([[5, 6], [7, 8]]) res = paddle.multiply(x, y) print(res.numpy()) # [[5, 12], [21, 32]] - x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) - y_data = np.array([1, 2], dtype=np.float32) - x = paddle.to_tensor(x_data) - y = paddle.to_tensor(y_data) + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) + y = paddle.to_tensor([1, 2]) res = paddle.multiply(x, y, axis=1) print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]] @@ -654,36 +635,28 @@ Examples: paddle.disable_static() - x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[1, 2], [3, 4]]) + y = paddle.to_tensor([[5, 6], [7, 8]]) res = paddle.maximum(x, y) print(res.numpy()) #[[5. 6.] # [7. 8.]] - x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) - y_data = np.array([1, 2], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) + y = paddle.to_tensor([1, 2]) res = paddle.maximum(x, y, axis=1) print(res.numpy()) #[[[1. 2. 3.] # [2. 2. 3.]]] - x_data = np.array([2, 3, 5], dtype=np.float32) - y_data = np.array([1, 4, np.nan], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') res = paddle.maximum(x, y) print(res.numpy()) #[ 2. 4. nan] - x_data = np.array([5, 3, np.inf], dtype=np.float32) - y_data = np.array([1, 4, 5], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') res = paddle.maximum(x, y) print(res.numpy()) #[ 5. 4. inf] @@ -703,38 +676,31 @@ Examples: import paddle import numpy as np + paddle.disable_static() - x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[[1. 2.] # [3. 4.]] - x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) - y_data = np.array([1, 2], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32') + y = paddle.to_tensor([1, 2], dtype='float32') res = paddle.minimum(x, y, axis=1) print(res.numpy()) #[[[1. 1. 1.] # [2. 2. 2.]]] - x_data = np.array([2, 3, 5], dtype=np.float32) - y_data = np.array([1, 4, np.nan], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[ 1. 3. nan] - x_data = np.array([5, 3, np.inf], dtype=np.float32) - y_data = np.array([1, 4, 5], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[1. 3. 5.] @@ -800,27 +766,26 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - # x is a Tensor variable with following elements: + # x is a Tensor with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. - x_data = np.array([[0.2, 0.3, 0.5, 0.9],[0.1, 0.2, 0.6, 0.7]]).astype('float32') - x = paddle.to_variable(x_data) + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) out1 = paddle.sum(x) # [3.5] out2 = paddle.sum(x, axis=0) # [0.3, 0.5, 1.1, 1.6] out3 = paddle.sum(x, axis=-1) # [1.9, 1.6] out4 = paddle.sum(x, axis=1, keepdim=True) # [[1.9], [1.6]] - # y is a Tensor variable with shape [2, 2, 2] and elements as below: + # y is a Tensor with shape [2, 2, 2] and elements as below: # [[[1, 2], [3, 4]], # [[5, 6], [7, 8]]] # Each example is followed by the corresponding output tensor. - y_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype('float32') - y = paddle.to_variable(y_data) + y = paddle.to_tensor([[[1, 2], [3, 4]], + [[5, 6], [7, 8]]]) out5 = paddle.sum(y, axis=[1, 2]) # [10, 26] out6 = paddle.sum(y, axis=[0, 1]) # [16, 20] """ @@ -1121,9 +1086,9 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): paddle.disable_static() - x = paddle.to_variable(data_x) - y = paddle.to_variable(data_y) - input = paddle.to_variable(data_input) + x = paddle.to_tensor(data_x) + y = paddle.to_tensor(data_y) + input = paddle.to_tensor(data_input) out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 ) @@ -1204,12 +1169,10 @@ def logsumexp(x, axis=None, keepdim=False, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x = np.array([[-1.5, 0., 2.], [3., 1.2, -2.4]]) - x = paddle.to_tensor(x) + x = paddle.to_tensor([[-1.5, 0., 2.], [3., 1.2, -2.4]]) out1 = paddle.logsumexp(x) # [3.4691226] out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602] @@ -1260,12 +1223,10 @@ def inverse(x, name=None): Examples: .. code-block:: python - import numpy as np import paddle - - mat_np = np.array([[2, 0], [0, 2]]).astype("float32") paddle.disable_static() - mat = paddle.to_variable(mat_np) + + mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32') inv = paddle.inverse(mat) print(inv) # [[0.5, 0], [0, 0.5]] @@ -1316,16 +1277,15 @@ def max(x, axis=None, keepdim=False, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() # data_x is a variable with shape [2, 4] # the axis is a int element - data_x = np.array([[0.2, 0.3, 0.5, 0.9], - [0.1, 0.2, 0.6, 0.7]]) - x = paddle.to_variable(data_x) + + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) result1 = paddle.max(x) print(result1.numpy()) #[0.9] @@ -1342,9 +1302,9 @@ def max(x, axis=None, keepdim=False, name=None): # data_y is a variable with shape [2, 2, 2] # the axis is list - data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], - [[5.0, 6.0], [7.0, 8.0]]]) - y = paddle.to_variable(data_y) + + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) result5 = paddle.max(y, axis=[1, 2]) print(result5.numpy()) #[4. 8.] @@ -1411,16 +1371,14 @@ def min(x, axis=None, keepdim=False, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - # data_x is a variable with shape [2, 4] + # x is a tensor with shape [2, 4] # the axis is a int element - data_x = np.array([[0.2, 0.3, 0.5, 0.9], - [0.1, 0.2, 0.6, 0.7]]) - x = paddle.to_variable(data_x) + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) result1 = paddle.min(x) print(result1.numpy()) #[0.1] @@ -1435,11 +1393,10 @@ def min(x, axis=None, keepdim=False, name=None): #[[0.2] # [0.1]] - # data_y is a variable with shape [2, 2, 2] + # y is a variable with shape [2, 2, 2] # the axis is list - data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], - [[5.0, 6.0], [7.0, 8.0]]]) - y = paddle.to_variable(data_y) + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) result5 = paddle.min(y, axis=[1, 2]) print(result5.numpy()) #[1. 5.] @@ -1596,11 +1553,9 @@ def clip(x, min=None, max=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x = np.array([[1.2,3.5], [4.5,6.4]]).astype('float32') - x1 = paddle.to_variable(x) + x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32') out1 = paddle.clip(x1, min=3.5, max=5.0) out2 = paddle.clip(x1, min=2.5) print(out1.numpy()) @@ -1701,9 +1656,9 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): paddle.disable_static() - case1 = paddle.to_variable(case1) - case2 = paddle.to_variable(case2) - case3 = paddle.to_variable(case3) + case1 = paddle.to_tensor(case1) + case2 = paddle.to_tensor(case2) + case3 = paddle.to_tensor(case3) data1 = paddle.trace(case1) # data1.shape = [1] data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3] data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5] @@ -1894,10 +1849,8 @@ def isfinite(x, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) - x = paddle.to_tensor(x_np) + x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) out = paddle.tensor.isfinite(x) print(out.numpy()) # [False True True False True False False] """ @@ -1925,10 +1878,8 @@ def isinf(x, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) - x = paddle.to_tensor(x_np) + x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) out = paddle.tensor.isinf(x) print(out.numpy()) # [ True False False True False False False] """ @@ -1956,10 +1907,8 @@ def isnan(x, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) - x = paddle.to_tensor(x_np) + x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) out = paddle.tensor.isnan(x) print(out.numpy()) # [False False False False False True True] """ @@ -2002,14 +1951,12 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() # the axis is a int element - data_x = np.array([[0.2, 0.3, 0.5, 0.9], - [0.1, 0.2, 0.6, 0.7]]).astype(np.float32) - x = paddle.to_tensor(data_x) + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) out1 = paddle.prod(x) print(out1.numpy()) # [0.0002268] @@ -2035,9 +1982,8 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): # int64 # the axis is list - data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], - [[5.0, 6.0], [7.0, 8.0]]]) - y = paddle.to_tensor(data_y) + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) out6 = paddle.prod(y, [0, 1]) print(out6.numpy()) # [105. 384.] @@ -2070,12 +2016,10 @@ def sign(x, name=None): Examples: .. code-block:: python - import numpy as np import paddle - data = np.array([3.0, 0.0, -2.0, 1.7], dtype='float32') paddle.disable_static() - x = paddle.to_tensor(data) + x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32') out = paddle.sign(x=x) print(out) # [1.0, 0.0, -1.0, 1.0] """ @@ -2110,12 +2054,9 @@ def tanh(x, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() - - x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.to_tensor(x_data) + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.tanh(x) print(out.numpy()) # [-0.37994896 -0.19737532 0.09966799 0.29131261] diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 19ac4afcbec8acd384072aef8fb615f80d9de16a..b38a1d0f5b7e92b0eac907170aad76a2b5c69bc1 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -60,7 +60,6 @@ def bernoulli(x, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() @@ -188,7 +187,6 @@ def standard_normal(shape, dtype=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() @@ -209,8 +207,9 @@ def standard_normal(shape, dtype=None, name=None): # [ 0.8086993 , 0.6868893 ]]] # random # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor(np.array([2, 3])) - out3 = paddle.standard_normal(shape_tensor) + shape_tensor = paddle.to_tensor([2, 3]) + result_3 = paddle.standard_normal(shape_tensor) + # [[-2.878077 , 0.17099959, 0.05111201] # random # [-0.3761474, -1.044801 , 1.1870178 ]] # random @@ -258,7 +257,6 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() @@ -266,11 +264,11 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): # [[ 0.17501129 0.32364586 1.561118 ] # random # [-1.7232178 1.1545963 -0.76156676]] # random - mean_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0])) + mean_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) out2 = paddle.normal(mean=mean_tensor) # [ 0.18644847 -1.19434458 3.93694787] # random - std_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0])) + std_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) out3 = paddle.normal(mean=mean_tensor, std=std_tensor) # [1.00780561 3.78457445 5.81058198] # random @@ -357,7 +355,6 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() @@ -379,8 +376,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): # example 3: # attr shape is a Tensor, the data type must be int64 or int32. - shape = np.array([2, 3]) - shape_tensor = paddle.to_tensor(shape) + shape_tensor = paddle.to_tensor([2, 3]) result_3 = paddle.tensor.random.uniform(shape_tensor) # if shape_tensor's value is [2, 3] # result_3 is: @@ -454,7 +450,6 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() @@ -473,8 +468,10 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): # example 3: # attr shape is a Tensor - shape_tensor = paddle.to_tensor(np.array([3])) - out3 = paddle.randint(low=-5, high=5, shape=shape_tensor) + + shape_tensor = paddle.to_tensor(3) + result_3 = paddle.randint(low=-5, high=5, shape=shape_tensor) + # [-2, 2, 3] # random # example 4: @@ -604,7 +601,6 @@ def rand(shape, dtype=None, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() # example 1: attr shape is a list which doesn't contain Tensor. @@ -624,8 +620,9 @@ def rand(shape, dtype=None, name=None): # [0.870881 , 0.2984597 ]]] # random # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor(np.array([2, 3])) - out2 = paddle.rand(shape_tensor) + shape_tensor = paddle.to_tensor([2, 3]) + result_3 = paddle.rand(shape_tensor) + # [[0.22920267, 0.841956 , 0.05981819], # random # [0.4836288 , 0.24573246, 0.7516129 ]] # random diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index 552da3401c61d9c046c29bc86b429a8ae1242fa5..9d720ac20aab93cd8ce598760ca6fc00f095c2a0 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -66,16 +66,15 @@ def argsort(x, axis=-1, descending=False, name=None): Examples: .. code-block:: python import paddle - import numpy as np paddle.disable_static() - input_array = np.array([[[5,8,9,5], - [0,0,1,7], - [6,9,2,4]], - [[5,2,4,2], - [4,7,7,9], - [1,7,0,6]]]).astype(np.float32) - x = paddle.to_variable(input_array) + x = paddle.to_tensor([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]], + dtype='float32') out1 = paddle.argsort(x=x, axis=-1) out2 = paddle.argsort(x=x, axis=0) out3 = paddle.argsort(x=x, axis=1) @@ -148,14 +147,12 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - data = np.array([[5,8,9,5], - [0,0,1,7], - [6,9,2,4]]) - x = paddle.to_variable(data) + x = paddle.to_tensor([[5,8,9,5], + [0,0,1,7], + [6,9,2,4]]) out1 = paddle.argmax(x) print(out1.numpy()) # 2 out2 = paddle.argmax(x, axis=1) @@ -222,14 +219,12 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): Examples: .. code-block:: python - import numpy as np import paddle paddle.disable_static() - data = np.array([[5,8,9,5], - [0,0,1,7], - [6,9,2,4]]) - x = paddle.to_variable(data) + x = paddle.to_tensor([[5,8,9,5], + [0,0,1,7], + [6,9,2,4]]) out1 = paddle.argmin(x) print(out1.numpy()) # 4 out2 = paddle.argmin(x, axis=1) @@ -300,16 +295,12 @@ def index_select(x, index, axis=0, name=None): .. code-block:: python import paddle - import numpy as np paddle.disable_static() # Now we are in imperative mode - data = np.array([[1.0, 2.0, 3.0, 4.0], - [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]]) - data_index = np.array([0, 1, 1]).astype('int32') - - x = paddle.to_tensor(data) - index = paddle.to_tensor(data_index) + x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]) + index = paddle.to_tensor([0, 1, 1], dtype='int32') out_z1 = paddle.index_select(x=x, index=index) #[[1. 2. 3. 4.] # [5. 6. 7. 8.] @@ -363,48 +354,44 @@ def nonzero(input, as_tuple=False): Examples: .. code-block:: python import paddle - import paddle.fluid as fluid - import numpy as np - - data1 = np.array([[1.0, 0.0, 0.0], - [0.0, 2.0, 0.0], - [0.0, 0.0, 3.0]]) - data2 = np.array([0.0, 1.0, 0.0, 3.0]) - data3 = np.array([0.0, 0.0, 0.0]) - with fluid.dygraph.guard(): - x1 = fluid.dygraph.to_variable(data1) - x2 = fluid.dygraph.to_variable(data2) - x3 = fluid.dygraph.to_variable(data3) - out_z1 = paddle.nonzero(x1) - print(out_z1.numpy()) - #[[0 0] - # [1 1] - # [2 2]] - out_z1_tuple = paddle.nonzero(x1, as_tuple=True) - for out in out_z1_tuple: - print(out.numpy()) - #[[0] - # [1] - # [2]] - #[[0] - # [1] - # [2]] - out_z2 = paddle.nonzero(x2) - print(out_z2.numpy()) - #[[1] - # [3]] - out_z2_tuple = paddle.nonzero(x2, as_tuple=True) - for out in out_z2_tuple: - print(out.numpy()) - #[[1] - # [3]] - out_z3 = paddle.nonzero(x3) - print(out_z3.numpy()) - #[] - out_z3_tuple = paddle.nonzero(x3, as_tuple=True) - for out in out_z3_tuple: - print(out.numpy()) - #[] + + paddle.disable_static() + + x1 = paddle.to_tensor([[1.0, 0.0, 0.0], + [0.0, 2.0, 0.0], + [0.0, 0.0, 3.0]]) + x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0]) + x3 = paddle.to_tensor([0.0, 0.0, 0.0]) + out_z1 = paddle.nonzero(x1) + print(out_z1.numpy()) + #[[0 0] + # [1 1] + # [2 2]] + out_z1_tuple = paddle.nonzero(x1, as_tuple=True) + for out in out_z1_tuple: + print(out.numpy()) + #[[0] + # [1] + # [2]] + #[[0] + # [1] + # [2]] + out_z2 = paddle.nonzero(x2) + print(out_z2.numpy()) + #[[1] + # [3]] + out_z2_tuple = paddle.nonzero(x2, as_tuple=True) + for out in out_z2_tuple: + print(out.numpy()) + #[[1] + # [3]] + out_z3 = paddle.nonzero(x3) + print(out_z3.numpy()) + #[] + out_z3_tuple = paddle.nonzero(x3, as_tuple=True) + for out in out_z3_tuple: + print(out.numpy()) + #[] """ list_out = [] shape = input.shape @@ -451,16 +438,15 @@ def sort(x, axis=-1, descending=False, name=None): Examples: .. code-block:: python import paddle - import numpy as np paddle.disable_static() - input_array = np.array([[[5,8,9,5], - [0,0,1,7], - [6,9,2,4]], - [[5,2,4,2], - [4,7,7,9], - [1,7,0,6]]]).astype(np.float32) - x = paddle.to_variable(input_array) + x = paddle.to_tensor([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]], + dtype='float32') out1 = paddle.sort(x=x, axis=-1) out2 = paddle.sort(x=x, axis=0) out3 = paddle.sort(x=x, axis=1) @@ -536,16 +522,11 @@ def where(condition, x, y, name=None): .. code-block:: python import paddle - import numpy as np - import paddle.fluid as fluid - - x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32") - y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float32") - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_i) - y = fluid.dygraph.to_variable(y_i) - out = paddle.where(x>1, x, y) + paddle.disable_static() + x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2]) + y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0]) + out = paddle.where(x>1, x, y) print(out.numpy()) #out: [1.0, 1.0, 3.2, 1.2] @@ -622,50 +603,41 @@ def index_sample(x, index): .. code-block:: python import paddle - import paddle.fluid as fluid - import numpy as np - - data = np.array([[1.0, 2.0, 3.0, 4.0], - [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]]).astype('float32') - - data_index = np.array([[0, 1, 2], - [1, 2, 3], - [0, 0, 0]]).astype('int32') - - target_data = np.array([[100, 200, 300, 400], - [500, 600, 700, 800], - [900, 1000, 1100, 1200]]).astype('int32') - - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(data) - index = fluid.dygraph.to_variable(data_index) - target = fluid.dygraph.to_variable(target_data) - - out_z1 = paddle.index_sample(x, index) - print(out_z1.numpy()) - #[[1. 2. 3.] - # [6. 7. 8.] - # [9. 9. 9.]] - - # Use the index of the maximum value by topk op - # get the value of the element of the corresponding index in other tensors - top_value, top_index = fluid.layers.topk(x, k=2) - out_z2 = paddle.index_sample(target, top_index) - print(top_value.numpy()) - #[[ 4. 3.] - # [ 8. 7.] - # [12. 11.]] - - print(top_index.numpy()) - #[[3 2] - # [3 2] - # [3 2]] - - print(out_z2.numpy()) - #[[ 400 300] - # [ 800 700] - # [1200 1100]] + + paddle.disable_static() + x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]], dtype='float32') + index = paddle.to_tensor([[0, 1, 2], + [1, 2, 3], + [0, 0, 0]], dtype='int32') + target = paddle.to_tensor([[100, 200, 300, 400], + [500, 600, 700, 800], + [900, 1000, 1100, 1200]], dtype='int32') + out_z1 = paddle.index_sample(x, index) + print(out_z1.numpy()) + #[[1. 2. 3.] + # [6. 7. 8.] + # [9. 9. 9.]] + + # Use the index of the maximum value by topk op + # get the value of the element of the corresponding index in other tensors + top_value, top_index = paddle.topk(x, k=2) + out_z2 = paddle.index_sample(target, top_index) + print(top_value.numpy()) + #[[ 4. 3.] + # [ 8. 7.] + # [12. 11.]] + + print(top_index.numpy()) + #[[3 2] + # [3 2] + # [3 2]] + + print(out_z2.numpy()) + #[[ 400 300] + # [ 800 700] + # [1200 1100]] """ @@ -707,18 +679,15 @@ def masked_select(x, mask, name=None): .. code-block:: python import paddle - import numpy as np - + paddle.disable_static() - data = np.array([[1.0, 2.0, 3.0, 4.0], - [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]]).astype('float32') - - mask_data = np.array([[True, False, False, False], - [True, True, False, False], - [True, False, False, False]]).astype('bool') - x = paddle.to_tensor(data) - mask = paddle.to_tensor(mask_data) + + x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]) + mask = paddle.to_tensor([[True, False, False, False], + [True, True, False, False], + [True, False, False, False]]) out = paddle.masked_select(x, mask) #[1.0 5.0 6.0 9.0] """ @@ -763,20 +732,17 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): .. code-block:: python - import numpy as np import paddle paddle.disable_static() - data_1 = np.array([1, 4, 5, 7]) - tensor_1 = paddle.to_tensor(data_1) + tensor_1 = paddle.to_tensor([1, 4, 5, 7]) value_1, indices_1 = paddle.topk(tensor_1, k=1) print(value_1.numpy()) # [7] print(indices_1.numpy()) # [3] - data_2 = np.array([[1, 4, 5, 7], [2, 6, 2, 5]]) - tensor_2 = paddle.to_tensor(data_2) + tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]]) value_2, indices_2 = paddle.topk(tensor_2, k=1) print(value_2.numpy()) # [[7]