diff --git a/doc/fluid/api_cn/layers_cn/abs_cn.rst b/doc/fluid/api_cn/layers_cn/abs_cn.rst index 3c0cdf4f06dd720c7c1281ede892b01e2089521c..755477bb7a34a13226d90a7a2c421af3eb792bcf 100644 --- a/doc/fluid/api_cn/layers_cn/abs_cn.rst +++ b/doc/fluid/api_cn/layers_cn/abs_cn.rst @@ -29,11 +29,9 @@ abs .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-1, -2, -3, -4]).astype(np.float32) - x = paddle.to_variable(x_data) - res = paddle.abs(x) - print(res.numpy()) - # [1, 2, 3, 4] + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.abs(x) + print(out.numpy()) + # [0.4 0.2 0.1 0.3] diff --git a/doc/fluid/api_cn/layers_cn/acos_cn.rst b/doc/fluid/api_cn/layers_cn/acos_cn.rst index dad19ff258cbf0b89b6d45fd86eb7cc69c730636..288c3121081ac22005f6bd0926cd6ed42d4675a0 100644 --- a/doc/fluid/api_cn/layers_cn/acos_cn.rst +++ b/doc/fluid/api_cn/layers_cn/acos_cn.rst @@ -30,11 +30,9 @@ arccosine函数。 .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - x = paddle.to_variable(x_data) - res = paddle.acos(x) - print(res.numpy()) - # [2.5293, 1.0573, 2.2711, 1.5336] + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.acos(x) + print(out.numpy()) + # [1.98231317 1.77215425 1.47062891 1.26610367] diff --git a/doc/fluid/api_cn/layers_cn/asin_cn.rst b/doc/fluid/api_cn/layers_cn/asin_cn.rst index 16ee357e985a6dce813cf2569685b7cc48351363..7960b807a60d25f2f4bfb7e3b46695f99e706eac 100644 --- a/doc/fluid/api_cn/layers_cn/asin_cn.rst +++ b/doc/fluid/api_cn/layers_cn/asin_cn.rst @@ -29,11 +29,9 @@ arcsine函数。 .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - x = paddle.to_variable(x_data) - res = paddle.asin(x) - print(res.numpy()) - # [-0.9585, 0.5135, -0.7003, 0.0372] + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.asin(x) + print(out.numpy()) + # [-0.41151685 -0.20135792 0.10016742 0.30469265] diff --git a/doc/fluid/api_cn/layers_cn/atan_cn.rst b/doc/fluid/api_cn/layers_cn/atan_cn.rst index eec8000171fe3192fa6f4267b7a955bbebff5b9a..2b5b11b6f9ffa00fc6bb09520713b22439fea4cf 100644 --- a/doc/fluid/api_cn/layers_cn/atan_cn.rst +++ b/doc/fluid/api_cn/layers_cn/atan_cn.rst @@ -29,11 +29,9 @@ arctangent函数。 .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - x = paddle.to_variable(x_data) - res = paddle.atan(x) - print(res.numpy()) - # [-0.6858, 0.4566, -0.5724, 0.0371] + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.atan(x) + print(out.numpy()) + # [-0.38050638 -0.19739556 0.09966865 0.29145679] diff --git a/doc/fluid/api_cn/layers_cn/ceil_cn.rst b/doc/fluid/api_cn/layers_cn/ceil_cn.rst index 5564c641ecc9b722cc60e3fc85913c941542eb88..2ee8e634f28e5b154ba365b5f91a519a0b8758f1 100644 --- a/doc/fluid/api_cn/layers_cn/ceil_cn.rst +++ b/doc/fluid/api_cn/layers_cn/ceil_cn.rst @@ -31,12 +31,9 @@ ceil .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([[-1.5,6],[1,15.6]]).astype(np.float32) - x = paddle.to_variable(x_data) - res = paddle.ceil(x) - print(res.numpy()) - # [[-1. 6.] - # [ 1. 16.]] + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.ceil(x) + print(out.numpy()) + # [-0. -0. 1. 1.] diff --git a/doc/fluid/api_cn/layers_cn/cos_cn.rst b/doc/fluid/api_cn/layers_cn/cos_cn.rst index 0b0d1cd6b8aeb296e385d5499f6718f40afc6829..7d0727576e0351584d8d2bd5fc7e0fa2f9f32546 100644 --- a/doc/fluid/api_cn/layers_cn/cos_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cos_cn.rst @@ -32,12 +32,9 @@ cos .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([[-1,np.pi],[1,15.6]]).astype(np.float32) - x = paddle.to_variable(x_data) - res = paddle.cos(x) - print(res.numpy()) - # [[ 0.54030231 -1. ] - # [ 0.54030231 -0.99417763]] + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.cos(x) + print(out.numpy()) + # [0.92106099 0.98006658 0.99500417 0.95533649] diff --git a/doc/fluid/api_cn/nn_cn/MaxPool1d_cn.rst b/doc/fluid/api_cn/nn_cn/MaxPool1d_cn.rst index cd6c92f9a064e442786f394ad3f61ccaed648611..e6cddc468a33abc836c54038ed9f8b1c78dc7b3a 100755 --- a/doc/fluid/api_cn/nn_cn/MaxPool1d_cn.rst +++ b/doc/fluid/api_cn/nn_cn/MaxPool1d_cn.rst @@ -52,7 +52,7 @@ MaxPool1d import numpy as np paddle.disable_static() - data = paddle.to_variable(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0) pool_out = MaxPool1d(data) # pool_out shape: [1, 3, 16] diff --git a/doc/fluid/api_cn/nn_cn/PairwiseDistance_cn.rst b/doc/fluid/api_cn/nn_cn/PairwiseDistance_cn.rst index 94e1d04bbe23d9c5e49277902c9fd206a6e02c12..8f9e38dfe6b9b550189d78d53be8fb85dd7a5f03 100644 --- a/doc/fluid/api_cn/nn_cn/PairwiseDistance_cn.rst +++ b/doc/fluid/api_cn/nn_cn/PairwiseDistance_cn.rst @@ -30,12 +30,9 @@ PairwiseDistance .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_np = np.array([[1., 3.], [3., 5.]]).astype(np.float64) - y_np = np.array([[5., 6.], [7., 8.]]).astype(np.float64) - x = paddle.to_variable(x_np) - y = paddle.to_variable(y_np) + x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype='float64') + y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype='float64') dist = paddle.nn.PairwiseDistance() distance = dist(x, y) print(distance.numpy()) # [5. 5.] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Hardshrink_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Hardshrink_cn.rst index 61ba40a8f6e7b9b1a4f09fda94c26ea6b9c34830..0dea4122ddfa5349bb64cedb9a1583afaf680f0c 100644 --- a/doc/fluid/api_cn/nn_cn/activation_cn/Hardshrink_cn.rst +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Hardshrink_cn.rst @@ -38,6 +38,6 @@ Hardshrink激活层 paddle.disable_static() - x = paddle.to_variable(np.array([-1, 0.3, 2.5])) + x = paddle.to_tensor([-1, 0.3, 2.5]) m = paddle.nn.Hardshrink() out = m(x) # [-1., 0., 2.5] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/l1_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/l1_loss_cn.rst index 77c9a232a61d8f922a3cfd70c3e95508249f199b..7e2e408c23fde0f605b44f3b68a394fe74d35a0a 100644 --- a/doc/fluid/api_cn/nn_cn/functional_cn/l1_loss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/functional_cn/l1_loss_cn.rst @@ -40,14 +40,11 @@ l1_loss .. code-block:: python - import numpy as np - import paddle - + import paddle paddle.disable_static() - input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") - label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) l1_loss = paddle.nn.functional.l1_loss(input, label) print(l1_loss.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/margin_ranking_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/margin_ranking_loss_cn.rst index edc1d7c7d1ffe659255e1f92a6a43e0d78af1bcf..febba0d453d55964da83b2ef54e47df456b147e9 100644 --- a/doc/fluid/api_cn/nn_cn/functional_cn/margin_ranking_loss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/functional_cn/margin_ranking_loss_cn.rst @@ -40,13 +40,11 @@ Tensor, 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状 .. code-block:: python - import numpy as np - import paddle - + import paddle paddle.disable_static() - input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype('float32')) - other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype('float32')) - label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype('float32')) + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') loss = paddle.nn.functional.margin_ranking_loss(input, other, label) print(loss.numpy()) # [0.75] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/mse_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/mse_loss_cn.rst index 99041e8ac0bf5c0f5558c096d6e152b8b62b9094..b5213836569abff9e4f78e04f603f1b727183467 100644 --- a/doc/fluid/api_cn/nn_cn/functional_cn/mse_loss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/functional_cn/mse_loss_cn.rst @@ -58,8 +58,8 @@ mse_loss # [array([0.04000002], dtype=float32)] # dynamic graph mode paddle.disable_static() - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) output = mse_loss(input, label) print(output.numpy()) # [0.04000002] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/nll_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/nll_loss_cn.rst index 59a1c6355e304b7ac6d9dca44031408bda008f78..4840c999342c5e17715105ea5963dfff7b89112d 100644 --- a/doc/fluid/api_cn/nn_cn/functional_cn/nll_loss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/functional_cn/nll_loss_cn.rst @@ -38,8 +38,8 @@ nll_loss place = paddle.CPUPlace() paddle.disable_static(place) - input = paddle.to_variable(input_np) + input = paddle.to_tensor(input_np) log_out = log_softmax(input) - label = paddle.to_variable(label_np) + label = paddle.to_tensor(label_np) result = nll_loss(log_out, label) print(result.numpy()) # [1.0720209] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/normalize_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/normalize_cn.rst index 04b5b185be37be17039652abf3e7c854b925d644..703a22ddef7b99fe18a9e8d52fb34747ed6cc106 100644 --- a/doc/fluid/api_cn/nn_cn/functional_cn/normalize_cn.rst +++ b/doc/fluid/api_cn/nn_cn/functional_cn/normalize_cn.rst @@ -42,7 +42,7 @@ normalize paddle.disable_static() x = np.arange(6, dtype=np.float32).reshape(2,3) - x = paddle.to_variable(x) + x = paddle.to_tensor(x) y = F.normalize(x) print(y.numpy()) # [[0. 0.4472136 0.8944272 ] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/sigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/sigmoid_cn.rst index d5db5aa06ea6a8de5923d8c397c8745965ff18db..d58730bb9732ac4edb8fc2bfeeab60ae3ea5fafd 100644 --- a/doc/fluid/api_cn/nn_cn/functional_cn/sigmoid_cn.rst +++ b/doc/fluid/api_cn/nn_cn/functional_cn/sigmoid_cn.rst @@ -24,12 +24,10 @@ sigmoid 激活函数。 ::::::::: .. code-block:: python - import numpy as np import paddle import paddle.nn.functional as F paddle.disable_static() - input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32') - x = paddle.to_variable(input_data) + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) output = F.sigmoid(x) print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376] diff --git a/doc/fluid/api_cn/nn_cn/hardshrink_cn.rst b/doc/fluid/api_cn/nn_cn/hardshrink_cn.rst index 66c492e749c5992f8f86bd06c6d521330b67c7ac..f849d89d5d77b1dbd8c7056333755413ef1e59d9 100644 --- a/doc/fluid/api_cn/nn_cn/hardshrink_cn.rst +++ b/doc/fluid/api_cn/nn_cn/hardshrink_cn.rst @@ -34,9 +34,7 @@ hardshrink激活层。计算公式如下: import paddle import paddle.nn.functional as F - import numpy as np paddle.disable_static() - - x = paddle.to_variable(np.array([-1, 0.3, 2.5])) + x = paddle.to_tensor([-1, 0.3, 2.5]) out = F.hardshrink(x) # [-1., 0., 2.5] diff --git a/doc/fluid/api_cn/nn_cn/layer_cn/Sigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/layer_cn/Sigmoid_cn.rst index 560bc22dd57ddd2d4c1e60b0a7e238e169bc49ea..ad3d29982aa4276beacf09c57f723f82201e86b7 100644 --- a/doc/fluid/api_cn/nn_cn/layer_cn/Sigmoid_cn.rst +++ b/doc/fluid/api_cn/nn_cn/layer_cn/Sigmoid_cn.rst @@ -29,12 +29,10 @@ Sigmoid .. code-block:: python - import numpy as np import paddle paddle.disable_static() - input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32') m = paddle.nn.Sigmoid() - x = paddle.to_variable(input_data) + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) output = m(x) print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376 diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/L1Loss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/L1Loss_cn.rst index f5a41b3a80888c195da84a470a45c87d70b08ed3..050a1c0b19073d294bb519a447b0ec2f284bae57 100644 --- a/doc/fluid/api_cn/nn_cn/loss_cn/L1Loss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/loss_cn/L1Loss_cn.rst @@ -39,14 +39,11 @@ L1Loss .. code-block:: python - import numpy as np import paddle paddle.disable_static() - input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") - label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) l1_loss = paddle.nn.loss.L1Loss() output = l1_loss(input, label) diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/MSELoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/MSELoss_cn.rst index 3ddeca33f5034d569fdf9362ceb83e05f1b35943..d147671a0d703dbbc4e57bed5d335c5b13ebd43c 100644 --- a/doc/fluid/api_cn/nn_cn/loss_cn/MSELoss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/loss_cn/MSELoss_cn.rst @@ -63,8 +63,8 @@ MSELoss # dynamic graph mode paddle.disable_static() - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) output = mse_loss(input, label) print(output.numpy()) # [0.04000002] diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/MarginRankingLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/MarginRankingLoss_cn.rst index ab7cd2175d25e3f3e724c3417e1b02db2158a895..e82ea4b0bca147c1439e3bef4330321a410ddcc1 100644 --- a/doc/fluid/api_cn/nn_cn/loss_cn/MarginRankingLoss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/loss_cn/MarginRankingLoss_cn.rst @@ -46,15 +46,12 @@ MarginRankingLoss .. code-block:: python - - import numpy as np import paddle - paddle.disable_static() - input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype("float32")) - other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype("float32")) - label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype("float32")) + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') margin_rank_loss = paddle.nn.MarginRankingLoss() loss = margin_rank_loss(input, other, label) print(loss.numpy()) # [0.75] diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/NLLLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/NLLLoss_cn.rst index f2b1559091cc5c6f211a4fa64f2eaa77e869fc3a..93172ab232dc3384e931743394f2f5d72bdc5990 100644 --- a/doc/fluid/api_cn/nn_cn/loss_cn/NLLLoss_cn.rst +++ b/doc/fluid/api_cn/nn_cn/loss_cn/NLLLoss_cn.rst @@ -63,8 +63,8 @@ NLLLoss place = paddle.CPUPlace() paddle.disable_static(place) - input = paddle.to_variable(input_np) + input = paddle.to_tensor(input_np) log_out = log_softmax(input) - label = paddle.to_variable(label_np) + label = paddle.to_tensor(label_np) result = nll_loss(log_out, label) print(result.numpy()) # [1.0720209] diff --git a/doc/fluid/api_cn/tensor_cn/dot_cn.rst b/doc/fluid/api_cn/tensor_cn/dot_cn.rst index 242ea893d7b1ac3337e42d764aba4becdb9122d6..5914eb30d807572d0cc439690ea834c89a11a355 100644 --- a/doc/fluid/api_cn/tensor_cn/dot_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/dot_cn.rst @@ -31,7 +31,7 @@ dot paddle.disable_static() x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z.numpy()) diff --git a/doc/fluid/api_cn/tensor_cn/gather_cn.rst b/doc/fluid/api_cn/tensor_cn/gather_cn.rst index 7cda6107ba2e08103d49dc65a5059341a024a0fd..ed1288b473a546be541f7ac8284d207f5324723a 100644 --- a/doc/fluid/api_cn/tensor_cn/gather_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/gather_cn.rst @@ -1,4 +1,5 @@ .. _cn_api_paddle_tensor_gather + gather ------------------------------- diff --git a/doc/fluid/api_cn/tensor_cn/max_cn.rst b/doc/fluid/api_cn/tensor_cn/max_cn.rst index e3f1620fa1064f0db21f5fc308cd31521da95354..e0a3520d5490dd79e937172842d6546fe2904cd1 100644 --- a/doc/fluid/api_cn/tensor_cn/max_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/max_cn.rst @@ -34,9 +34,8 @@ max # data_x is a variable with shape [2, 4] # the axis is a int element - data_x = np.array([[0.2, 0.3, 0.5, 0.9], - [0.1, 0.2, 0.6, 0.7]]) - x = paddle.to_variable(data_x) + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) result1 = paddle.max(x) print(result1.numpy()) #[0.9] @@ -53,9 +52,8 @@ max # data_y is a variable with shape [2, 2, 2] # the axis is list - data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], - [[5.0, 6.0], [7.0, 8.0]]]) - y = paddle.to_variable(data_y) + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) result5 = paddle.max(y, axis=[1, 2]) print(result5.numpy()) #[4. 8.] diff --git a/doc/fluid/api_cn/tensor_cn/maximum_cn.rst b/doc/fluid/api_cn/tensor_cn/maximum_cn.rst index 3db694f291098d8ed4c8d2bd08bd4a56adb49f81..bad60bbd1d2efadbfd46e257665757523c1e4a1e 100644 --- a/doc/fluid/api_cn/tensor_cn/maximum_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/maximum_cn.rst @@ -58,39 +58,30 @@ maximum import paddle import numpy as np - paddle.disable_static() - x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[1, 2], [3, 4]]) + y = paddle.to_tensor([[5, 6], [7, 8]]) res = paddle.maximum(x, y) print(res.numpy()) #[[5. 6.] # [7. 8.]] - x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) - y_data = np.array([1, 2], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) + y = paddle.to_tensor([1, 2]) res = paddle.maximum(x, y, axis=1) print(res.numpy()) #[[[1. 2. 3.] # [2. 2. 3.]]] - x_data = np.array([2, 3, 5], dtype=np.float32) - y_data = np.array([1, 4, np.nan], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') res = paddle.maximum(x, y) print(res.numpy()) #[ 2. 4. nan] - x_data = np.array([5, 3, np.inf], dtype=np.float32) - y_data = np.array([1, 4, 5], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') res = paddle.maximum(x, y) print(res.numpy()) #[ 5. 4. inf] diff --git a/doc/fluid/api_cn/tensor_cn/min_cn.rst b/doc/fluid/api_cn/tensor_cn/min_cn.rst index 7231c1b20519c2fb807a05d6d35354177763830e..d3034d0d0054712bb80de2b2de69e7aa0a75ae18 100644 --- a/doc/fluid/api_cn/tensor_cn/min_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/min_cn.rst @@ -26,16 +26,12 @@ min :::::::::: .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - # data_x is a variable with shape [2, 4] # the axis is a int element - data_x = np.array([[0.2, 0.3, 0.5, 0.9], - [0.1, 0.2, 0.6, 0.7]]) - x = paddle.to_variable(data_x) + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) result1 = paddle.min(x) print(result1.numpy()) #[0.1] @@ -50,11 +46,9 @@ min #[[0.2] # [0.1]] - # data_y is a variable with shape [2, 2, 2] # the axis is list - data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], - [[5.0, 6.0], [7.0, 8.0]]]) - y = paddle.to_variable(data_y) + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) result5 = paddle.min(y, axis=[1, 2]) print(result5.numpy()) #[1. 5.] diff --git a/doc/fluid/api_cn/tensor_cn/minimum_cn.rst b/doc/fluid/api_cn/tensor_cn/minimum_cn.rst index 1d04313385ff36c3104ec9fd173939bcae8e5c6f..1a8fda137aed190fdc730fa0ff07a1b21536a154 100644 --- a/doc/fluid/api_cn/tensor_cn/minimum_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/minimum_cn.rst @@ -61,36 +61,28 @@ minimum import numpy as np paddle.disable_static() - x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[[1. 2.] # [3. 4.]] - x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) - y_data = np.array([1, 2], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32') + y = paddle.to_tensor([1, 2], dtype='float32') res = paddle.minimum(x, y, axis=1) print(res.numpy()) #[[[1. 1. 1.] # [2. 2. 2.]]] - x_data = np.array([2, 3, 5], dtype=np.float32) - y_data = np.array([1, 4, np.nan], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[ 1. 3. nan] - x_data = np.array([5, 3, np.inf], dtype=np.float32) - y_data = np.array([1, 4, 5], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[1. 3. 5.] diff --git a/doc/fluid/api_cn/tensor_cn/sqrt_cn.rst b/doc/fluid/api_cn/tensor_cn/sqrt_cn.rst index ce74caa93efb368672577ff6665878b66183073c..18067626a15099296f4580d110863fc7f639f907 100644 --- a/doc/fluid/api_cn/tensor_cn/sqrt_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/sqrt_cn.rst @@ -30,11 +30,9 @@ sqrt .. code-block:: python - import numpy as np import paddle paddle.disable_static() - x_data = np.array([0.1, 0.2, 0.3, 0.4]) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4]) out = paddle.sqrt(x) print(out.numpy()) # [0.31622777 0.4472136 0.54772256 0.63245553] diff --git a/doc/fluid/api_cn/tensor_cn/stack_cn.rst b/doc/fluid/api_cn/tensor_cn/stack_cn.rst index 3016b30fdb675a45d80aadaf662f8677209dddbb..33953b9821290741b713d1a2eedcee1432522075 100644 --- a/doc/fluid/api_cn/tensor_cn/stack_cn.rst +++ b/doc/fluid/api_cn/tensor_cn/stack_cn.rst @@ -67,16 +67,10 @@ stack .. code-block:: python import paddle - import numpy as np - - data1 = np.array([[1.0, 2.0]]) - data2 = np.array([[3.0, 4.0]]) - data3 = np.array([[5.0, 6.0]]) - paddle.disable_static() - x1 = paddle.to_variable(data1) - x2 = paddle.to_variable(data2) - x3 = paddle.to_variable(data3) + x1 = paddle.to_tensor([[1.0, 2.0]]) + x2 = paddle.to_tensor([[3.0, 4.0]]) + x3 = paddle.to_tensor([[5.0, 6.0]]) out = paddle.stack([x1, x2, x3], axis=0) print(out.shape) # [3, 1, 2] diff --git a/doc/paddle/api/alias_api_mapping b/doc/paddle/api/alias_api_mapping index 5e25d480bfd46c6ca1077599c769c70986303a63..ffc564cb08cf16eb49e4220cb1fa30916007031a 100644 --- a/doc/paddle/api/alias_api_mapping +++ b/doc/paddle/api/alias_api_mapping @@ -150,6 +150,7 @@ paddle.fluid.layers.bilinear_tensor_product paddle.static.nn.bilinear_tensor_pro paddle.fluid.framework.name_scope paddle.static.name_scope paddle.fluid.layers.is_empty paddle.is_empty,paddle.tensor.is_empty,paddle.tensor.logic.is_empty paddle.tensor.math.multiply paddle.multiply,paddle.tensor.multiply +paddle.tensor.creation.Tensor paddle.Tensor paddle.tensor.creation.to_tensor paddle.to_tensor,paddle.tensor.to_tensor paddle.fluid.initializer.Normal paddle.nn.initializer.Normal paddle.nn.layer.common.AlphaDropout paddle.nn.AlphaDropout,paddle.nn.layer.AlphaDropout @@ -378,7 +379,6 @@ paddle.tensor.manipulation.concat paddle.concat,paddle.tensor.concat paddle.tensor.stat.std paddle.std,paddle.tensor.std paddle.fluid.layers.dice_loss paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss paddle.nn.functional.loss.binary_cross_entropy paddle.nn.functional.binary_cross_entropy -paddle.fluid.dygraph.base.to_variable paddle.to_variable,paddle.framework.to_variable paddle.fluid.dygraph.Linear paddle.nn.Linear,paddle.nn.layer.Linear,paddle.nn.layer.common.Linear paddle.fluid.layers.box_clip paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip paddle.nn.layer.activation.ReLU6 paddle.nn.ReLU6 diff --git a/doc/paddle/api/api_label b/doc/paddle/api/api_label index e6fe930c1d46b0a85a9865fd6bb0d5965b9a2146..57126c7d0779bee63ca708225893dc8dffc17c03 100644 --- a/doc/paddle/api/api_label +++ b/doc/paddle/api/api_label @@ -1,3 +1,4 @@ +to_tensor .. _api_paddle_to_tensor train .. _api_paddle_dataset_wmt14_train: roi_pool .. _api_paddle_fluid_layers_roi_pool: expand .. _api_paddle_fluid_layers_expand: diff --git a/doc/paddle/api/paddle/fluid/layers/abs_cn.rst b/doc/paddle/api/paddle/fluid/layers/abs_cn.rst index 77eee850b290a80c994605271785e18711fbfc99..b294f3de09c7f329163e873c68975c90d294f3de 100644 --- a/doc/paddle/api/paddle/fluid/layers/abs_cn.rst +++ b/doc/paddle/api/paddle/fluid/layers/abs_cn.rst @@ -26,11 +26,8 @@ abs .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-1, -2, -3, -4]).astype(np.float32) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-1, -2, -3, -4], dtype='float32') res = paddle.abs(x) print(res.numpy()) # [1, 2, 3, 4] diff --git a/doc/paddle/api/paddle/fluid/layers/acos_cn.rst b/doc/paddle/api/paddle/fluid/layers/acos_cn.rst index 89a82aa604a85637f1b6af6834824289c41e5e1f..99798c8653d79ebc48dac88b153056eadf058c75 100644 --- a/doc/paddle/api/paddle/fluid/layers/acos_cn.rst +++ b/doc/paddle/api/paddle/fluid/layers/acos_cn.rst @@ -27,11 +27,8 @@ arccosine函数。 .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371]) res = paddle.acos(x) print(res.numpy()) # [2.5293, 1.0573, 2.2711, 1.5336] diff --git a/doc/paddle/api/paddle/fluid/layers/asin_cn.rst b/doc/paddle/api/paddle/fluid/layers/asin_cn.rst index cc39e0355867c61ce1e5eb616039e2b88b5cab80..2b39afaa2a83424c3b435dbc53d8213195c72c00 100644 --- a/doc/paddle/api/paddle/fluid/layers/asin_cn.rst +++ b/doc/paddle/api/paddle/fluid/layers/asin_cn.rst @@ -26,11 +26,8 @@ arcsine函数。 .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371]) res = paddle.asin(x) print(res.numpy()) # [-0.9585, 0.5135, -0.7003, 0.0372] diff --git a/doc/paddle/api/paddle/fluid/layers/atan_cn.rst b/doc/paddle/api/paddle/fluid/layers/atan_cn.rst index e357b1264002a783ff4e8b45687d806d0e189cdc..ce1e632e48961f668b4c5c5fa53a5ec70cea0730 100644 --- a/doc/paddle/api/paddle/fluid/layers/atan_cn.rst +++ b/doc/paddle/api/paddle/fluid/layers/atan_cn.rst @@ -26,11 +26,8 @@ arctangent函数。 .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371]) res = paddle.atan(x) print(res.numpy()) # [-0.6858, 0.4566, -0.5724, 0.0371] diff --git a/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst b/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst index 215099171ecffc0a5b76584a3690563801c0ae48..a3cdb47c9c1d4ec4a101d22075a7af67b032d9a7 100644 --- a/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst +++ b/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst @@ -28,11 +28,8 @@ ceil .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - x_data = np.array([[-1.5,6],[1,15.6]]).astype(np.float32) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([[-1.5,6], [1,15.6]]) res = paddle.ceil(x) print(res.numpy()) # [[-1. 6.] diff --git a/doc/paddle/api/paddle/fluid/layers/cos_cn.rst b/doc/paddle/api/paddle/fluid/layers/cos_cn.rst index 1fb232861bd1bdd6e9342af09ff510fc61322be0..35c6061757e614ac91c4e7ff3f8bdbd016b93e9b 100644 --- a/doc/paddle/api/paddle/fluid/layers/cos_cn.rst +++ b/doc/paddle/api/paddle/fluid/layers/cos_cn.rst @@ -30,10 +30,8 @@ cos import paddle import numpy as np - paddle.disable_static() - x_data = np.array([[-1,np.pi],[1,15.6]]).astype(np.float32) - x = paddle.to_variable(x_data) + x = paddle.to_tensor([[-1, np.pi], [1, 15.6]], dtype='float32') res = paddle.cos(x) print(res.numpy()) # [[ 0.54030231 -1. ] diff --git a/doc/paddle/api/paddle/nn/functional/activation/hardshrink_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/hardshrink_cn.rst index 66c492e749c5992f8f86bd06c6d521330b67c7ac..e63411ffcfb57ded805757e3230ba0372a6ea638 100644 --- a/doc/paddle/api/paddle/nn/functional/activation/hardshrink_cn.rst +++ b/doc/paddle/api/paddle/nn/functional/activation/hardshrink_cn.rst @@ -34,9 +34,7 @@ hardshrink激活层。计算公式如下: import paddle import paddle.nn.functional as F - import numpy as np - paddle.disable_static() - x = paddle.to_variable(np.array([-1, 0.3, 2.5])) + x = paddle.to_tensor([-1, 0.3, 2.5]) out = F.hardshrink(x) # [-1., 0., 2.5] diff --git a/doc/paddle/api/paddle/nn/functional/loss/l1_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/l1_loss_cn.rst index 77c9a232a61d8f922a3cfd70c3e95508249f199b..ff4fc9bf4c99a86a56c7ee3ff1dbb5da4042bbfe 100644 --- a/doc/paddle/api/paddle/nn/functional/loss/l1_loss_cn.rst +++ b/doc/paddle/api/paddle/nn/functional/loss/l1_loss_cn.rst @@ -40,14 +40,10 @@ l1_loss .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") - label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) l1_loss = paddle.nn.functional.l1_loss(input, label) print(l1_loss.numpy()) diff --git a/doc/paddle/api/paddle/nn/functional/loss/margin_ranking_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/margin_ranking_loss_cn.rst index edc1d7c7d1ffe659255e1f92a6a43e0d78af1bcf..7cb375833858922cc173322895b2a22120918ba2 100644 --- a/doc/paddle/api/paddle/nn/functional/loss/margin_ranking_loss_cn.rst +++ b/doc/paddle/api/paddle/nn/functional/loss/margin_ranking_loss_cn.rst @@ -40,13 +40,11 @@ Tensor, 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状 .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype('float32')) - other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype('float32')) - label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype('float32')) + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') loss = paddle.nn.functional.margin_ranking_loss(input, other, label) print(loss.numpy()) # [0.75] diff --git a/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst index 99041e8ac0bf5c0f5558c096d6e152b8b62b9094..b5213836569abff9e4f78e04f603f1b727183467 100644 --- a/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst +++ b/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst @@ -58,8 +58,8 @@ mse_loss # [array([0.04000002], dtype=float32)] # dynamic graph mode paddle.disable_static() - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) output = mse_loss(input, label) print(output.numpy()) # [0.04000002] diff --git a/doc/paddle/api/paddle/nn/functional/loss/nll_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/nll_loss_cn.rst index 59a1c6355e304b7ac6d9dca44031408bda008f78..4840c999342c5e17715105ea5963dfff7b89112d 100644 --- a/doc/paddle/api/paddle/nn/functional/loss/nll_loss_cn.rst +++ b/doc/paddle/api/paddle/nn/functional/loss/nll_loss_cn.rst @@ -38,8 +38,8 @@ nll_loss place = paddle.CPUPlace() paddle.disable_static(place) - input = paddle.to_variable(input_np) + input = paddle.to_tensor(input_np) log_out = log_softmax(input) - label = paddle.to_variable(label_np) + label = paddle.to_tensor(label_np) result = nll_loss(log_out, label) print(result.numpy()) # [1.0720209] diff --git a/doc/paddle/api/paddle/nn/functional/norm/normalize_cn.rst b/doc/paddle/api/paddle/nn/functional/norm/normalize_cn.rst index 04b5b185be37be17039652abf3e7c854b925d644..703a22ddef7b99fe18a9e8d52fb34747ed6cc106 100644 --- a/doc/paddle/api/paddle/nn/functional/norm/normalize_cn.rst +++ b/doc/paddle/api/paddle/nn/functional/norm/normalize_cn.rst @@ -42,7 +42,7 @@ normalize paddle.disable_static() x = np.arange(6, dtype=np.float32).reshape(2,3) - x = paddle.to_variable(x) + x = paddle.to_tensor(x) y = F.normalize(x) print(y.numpy()) # [[0. 0.4472136 0.8944272 ] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Hardshrink_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Hardshrink_cn.rst index 61ba40a8f6e7b9b1a4f09fda94c26ea6b9c34830..124b6f3ea93642ecab7a935e5eb16adc05ba9a13 100644 --- a/doc/paddle/api/paddle/nn/layer/activation/Hardshrink_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/activation/Hardshrink_cn.rst @@ -34,10 +34,7 @@ Hardshrink激活层 .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - - x = paddle.to_variable(np.array([-1, 0.3, 2.5])) + x = paddle.to_tensor([-1, 0.3, 2.5]) m = paddle.nn.Hardshrink() out = m(x) # [-1., 0., 2.5] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Sigmoid_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Sigmoid_cn.rst index 560bc22dd57ddd2d4c1e60b0a7e238e169bc49ea..10698265ff6a4f4a149ec37f0bb198d51e2d402a 100644 --- a/doc/paddle/api/paddle/nn/layer/activation/Sigmoid_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/activation/Sigmoid_cn.rst @@ -29,12 +29,9 @@ Sigmoid .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32') m = paddle.nn.Sigmoid() - x = paddle.to_variable(input_data) + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) output = m(x) print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376 diff --git a/doc/paddle/api/paddle/nn/layer/distance/PairwiseDistance_cn.rst b/doc/paddle/api/paddle/nn/layer/distance/PairwiseDistance_cn.rst index 94e1d04bbe23d9c5e49277902c9fd206a6e02c12..8f9e38dfe6b9b550189d78d53be8fb85dd7a5f03 100644 --- a/doc/paddle/api/paddle/nn/layer/distance/PairwiseDistance_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/distance/PairwiseDistance_cn.rst @@ -30,12 +30,9 @@ PairwiseDistance .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_np = np.array([[1., 3.], [3., 5.]]).astype(np.float64) - y_np = np.array([[5., 6.], [7., 8.]]).astype(np.float64) - x = paddle.to_variable(x_np) - y = paddle.to_variable(y_np) + x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype='float64') + y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype='float64') dist = paddle.nn.PairwiseDistance() distance = dist(x, y) print(distance.numpy()) # [5. 5.] diff --git a/doc/paddle/api/paddle/nn/layer/loss/L1Loss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/L1Loss_cn.rst index f5a41b3a80888c195da84a470a45c87d70b08ed3..050a1c0b19073d294bb519a447b0ec2f284bae57 100644 --- a/doc/paddle/api/paddle/nn/layer/loss/L1Loss_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/loss/L1Loss_cn.rst @@ -39,14 +39,11 @@ L1Loss .. code-block:: python - import numpy as np import paddle paddle.disable_static() - input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") - label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) l1_loss = paddle.nn.loss.L1Loss() output = l1_loss(input, label) diff --git a/doc/paddle/api/paddle/nn/layer/loss/MSELoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/MSELoss_cn.rst index 3ddeca33f5034d569fdf9362ceb83e05f1b35943..d147671a0d703dbbc4e57bed5d335c5b13ebd43c 100644 --- a/doc/paddle/api/paddle/nn/layer/loss/MSELoss_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/loss/MSELoss_cn.rst @@ -63,8 +63,8 @@ MSELoss # dynamic graph mode paddle.disable_static() - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) output = mse_loss(input, label) print(output.numpy()) # [0.04000002] diff --git a/doc/paddle/api/paddle/nn/layer/loss/MarginRankingLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/MarginRankingLoss_cn.rst index ab7cd2175d25e3f3e724c3417e1b02db2158a895..4d840b1751cd3146415ee075ac7a3838f47fbe87 100644 --- a/doc/paddle/api/paddle/nn/layer/loss/MarginRankingLoss_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/loss/MarginRankingLoss_cn.rst @@ -46,15 +46,11 @@ MarginRankingLoss .. code-block:: python - - import numpy as np - import paddle - + import paddle paddle.disable_static() - - input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype("float32")) - other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype("float32")) - label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype("float32")) + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') margin_rank_loss = paddle.nn.MarginRankingLoss() loss = margin_rank_loss(input, other, label) print(loss.numpy()) # [0.75] diff --git a/doc/paddle/api/paddle/nn/layer/loss/NLLLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/NLLLoss_cn.rst index f2b1559091cc5c6f211a4fa64f2eaa77e869fc3a..30d19fedfc3983d05b96fb9deabe5e444448d71a 100644 --- a/doc/paddle/api/paddle/nn/layer/loss/NLLLoss_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/loss/NLLLoss_cn.rst @@ -55,16 +55,16 @@ NLLLoss log_softmax = paddle.nn.LogSoftmax(axis=1) input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], - [0.53331435, 0.07999352, 0.8549948 ], - [0.25879037, 0.39530203, 0.698465 ], - [0.73427284, 0.63575995, 0.18827209], - [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) + [0.53331435, 0.07999352, 0.8549948 ], + [0.25879037, 0.39530203, 0.698465 ], + [0.73427284, 0.63575995, 0.18827209], + [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) place = paddle.CPUPlace() paddle.disable_static(place) - input = paddle.to_variable(input_np) + input = paddle.to_tensor(input_np) log_out = log_softmax(input) - label = paddle.to_variable(label_np) + label = paddle.to_tensor(label_np) result = nll_loss(log_out, label) print(result.numpy()) # [1.0720209] diff --git a/doc/paddle/api/paddle/nn/layer/pooling/MaxPool1d_cn.rst b/doc/paddle/api/paddle/nn/layer/pooling/MaxPool1d_cn.rst index cd6c92f9a064e442786f394ad3f61ccaed648611..e6cddc468a33abc836c54038ed9f8b1c78dc7b3a 100755 --- a/doc/paddle/api/paddle/nn/layer/pooling/MaxPool1d_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/pooling/MaxPool1d_cn.rst @@ -52,7 +52,7 @@ MaxPool1d import numpy as np paddle.disable_static() - data = paddle.to_variable(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0) pool_out = MaxPool1d(data) # pool_out shape: [1, 3, 16] diff --git a/doc/paddle/api/paddle/tensor/creation/Tensor_cn.rst b/doc/paddle/api/paddle/tensor/creation/Tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f6e15aee81bf14e1f299ebb22319065628f8ace7 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/Tensor_cn.rst @@ -0,0 +1,1426 @@ +.. _cn_api_paddle_Tensor: + +Tensor +------------------------------- + +.. py:class:: paddle.Tensor + +``Tensor`` 是Paddle中最为基础的数据结构,有几种创建Tensor的不同方式: + +- 用预先存在的 ``data`` 数据创建1个Tensor,请参考 :ref:`cn_api_paddle_to_tensor` +- 创建一个指定 ``shape`` 的Tensor,请参考 :ref:`cn_api_tensor_ones` 、 :ref:`cn_api_tensor_zeros`、 :ref:`cn_api_tensor_full` +- 创建一个与其他Tensor具有相同 ``shape`` 与 ``dtype`` 的Tensor,请参考 :ref:`cn_api_tensor_ones_like` 、 :ref:`cn_api_tensor_zeros_like` 、 :ref:`cn_api_tensor_full_like` + +.. py:attribute:: dtype + +查看一个Tensor的数据类型,支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64' 类型。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([1.0, 2.0, 3.0]) + print("tensor's grad is: {}".format(x.dtype)) + +.. py:attribute:: grad + +查看一个Tensor的梯度,数据类型为numpy\.ndarray。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) + y = paddle.to_tensor([4.0, 5.0, 6.0], stop_gradient=False) + z = x * y + z.backward() + print("tensor's grad is: {}".format(x.grad)) + +.. py:attribute:: name + +查看一个Tensor的name,Tensor的name是其唯一标识符,为python的字符串类型。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor name: ", paddle.to_tensor(1).name) + # Tensor name: generated_tensor_0 + +.. py:attribute:: ndim + +查看一个Tensor的维度,也称作rank。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).ndim) + # Tensor's number of dimensition: 2 + +.. py:attribute:: persistable + +查看一个Tensor的persistable属性,该属性为True时表示持久性变量,持久性变量在每次迭代之后都不会删除。模型参数、学习率等Tensor,都是 +持久性变量。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Whether Tensor is persistable: ", paddle.to_tensor(1).persistable) + # Whether Tensor is persistable: false + + +.. py:attribute:: place + +查看一个Tensor的设备位置,Tensor可能的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存, +其与GPU之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能, +因为其减少了用于存储虚拟内存数据的可分页内存。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) + print(cpu_tensor.place) + +.. py:attribute:: shape + +查看一个Tensor的shape,shape是Tensor的一个重要的概念,其描述了tensor在每个维度上的元素数量。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's shape: ", paddle.to_tensor([[1, 2], [3, 4]]).shape) + # Tensor's shape: [2, 2] + +.. py:attribute:: stop_gradient + +查看一个Tensor是否计算并传播梯度,如果stop_gradient为True,则该Tensor不会计算梯度,并会阻绝Autograd的梯度传播。 +反之,则会计算梯度并传播梯度。用户自行创建的的Tensor,默认是True,模型参数的stop_gradient都为False。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's stop_gradient: ", paddle.to_tensor([[1, 2], [3, 4]]).stop_gradient) + # Tensor's stop_gradient: True + +.. py:method:: abs(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_abs` + +.. py:method:: acos(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_acos` + +.. py:method:: add(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_add` + +.. py:method:: addcmul(tensor1, tensor2, value=1.0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_addcmul` + +.. py:method:: addmm(x, y, beta=1.0, alpha=1.0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_addmm` + +.. py:method:: allclose(y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_allclose` + +.. py:method:: argmax(axis=None, keepdim=False, dtype=int64, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_argmax` + +.. py:method:: argmin(axis=None, keepdim=False, dtype=int64, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_argmin` + +.. py:method:: argsort(axis=-1, descending=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_argsort` + +.. py:method:: asin(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_asin` + +.. py:method:: astype(dtype) + +将Tensor的类型转换为 ``dtype`` ,并返回一个新的Tensor。 + +参数: + - **dtype** (str) - 转换后的dtype,支持'bool','float16','float32','float64','int8','int16', + 'int32','int64','uint8'。 + +返回:类型转换后的新的Tensor + +返回类型:Tensor + +**代码示例** + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor(1.0) + print("original tensor's dtype is: {}".format(x.dtype)) + print("new tensor's dtype is: {}".format(x.astype('float64').dtype)) + +.. py:method:: atan(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_atan` + +.. py:method:: backward(retain_graph=False) + +从当前Tensor开始计算反向的神经网络,传导并计算计算图中Tensor的梯度。 + +参数: + - **retain_graph** (bool, optional) - 如果为False,反向计算图将被释放。如果在backward()之后继续添加OP, + 需要设置为True,此时之前的反向计算图会保留。将其设置为False会更加节省内存。默认值:False。 + +返回:无 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x = np.ones([2, 2], np.float32) + inputs = [] + for _ in range(10): + tmp = paddle.to_tensor(x) + # if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since + # there is no one need gradient on it. + tmp.stop_gradient=False + inputs.append(tmp) + ret = paddle.sums(inputs) + loss = paddle.reduce_sum(ret) + loss.backward() + +.. py:method:: bmm(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_bmm` + +.. py:method:: broadcast_to(shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_broadcast_to` + +.. py:method:: cast(dtype) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_cast` + +.. py:method:: ceil(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_ceil` + +.. py:method:: cholesky(upper=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cholesky` + +.. py:method:: chunk(chunks, axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_chunk` + + +.. py:method:: clear_gradient() + +清除当前Tensor的梯度。 + +返回:无 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + x = np.ones([2, 2], np.float32) + inputs2 = [] + for _ in range(10): + tmp = paddle.to_tensor(x) + tmp.stop_gradient=False + inputs2.append(tmp) + ret2 = fluid.layers.sums(inputs2) + loss2 = fluid.layers.reduce_sum(ret2) + loss2.backward() + print(loss2.gradient()) + loss2.clear_gradient() + print("After clear {}".format(loss2.gradient())) + + +.. py:method:: clip(min=None, max=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_clip` + +.. py:method:: concat(axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_concat` + +.. py:method:: cos(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_cos` + +.. py:method:: cosh(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_cosh` + +.. py:method:: cross(y, axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_linalg_cross` + +.. py:method:: cumsum(axis=None, dtype=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_cumsum` + +.. py:method:: detach() + +返回一个新的Tensor,从当前计算图分离。 + +返回:与当前计算图分离的Tensor。 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') + linear = Linear(32, 64) + data = paddle.to_tensor(data) + x = linear(data) + y = x.detach() + +.. py:method:: dim() + +查看一个Tensor的维度,也称作rank。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).dim()) + # Tensor's number of dimensition: 2 + +.. py:method:: dist(y, p=2) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_linalg_dist` + +.. py:method:: divide(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_divide` + +.. py:method:: dot(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_linalg_dot` + +.. py:method:: elementwise_add(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_add` + +.. py:method:: elementwise_div(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_div` + +.. py:method:: elementwise_floordiv(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_floordiv` + +.. py:method:: elementwise_mod(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_mod` + +.. py:method:: elementwise_pow(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_pow` + +.. py:method:: elementwise_sub(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_sub` + +.. py:method:: elementwise_sum(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_elementwise_sum` + +.. py:method:: equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_equal` + +.. py:method:: equal_all(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_equal_all` + +.. py:method:: erf(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_erf` + +.. py:method:: exp(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_exp` + +.. py:method:: expand(shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_expand` + +.. py:method:: expand_as(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_expand_as` + +.. py:method:: flatten(start_axis=0, stop_axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_flatten` + +.. py:method:: flip(axis, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_flip` + +.. py:method:: floor(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_floor` + +.. py:method:: floor_divide(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_floor_divide` + +.. py:method:: floor_mod(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_remainder` + +.. py:method:: gather(index, axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_gather` + +.. py:method:: gather_nd(index, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_gather_nd` + +.. py:method:: gradient() + +与 ``Tensor.grad`` 相同,查看一个Tensor的梯度,数据类型为numpy\.ndarray。 + +返回:该Tensor的梯度 +返回类型:numpy\.ndarray + +**代码示例** + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) + y = paddle.to_tensor([4.0, 5.0, 6.0], stop_gradient=False) + z = x * y + z.backward() + print("tensor's grad is: {}".format(x.grad)) + +.. py:method:: greater_equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_greater_equal` + +.. py:method:: greater_than(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_greater_than` + +.. py:method:: has_inf() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_has_inf` + +.. py:method:: has_nan() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_has_nan` + +.. py:method:: histogram(bins=100, min=0, max=0) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_histogram` + +.. py:method:: increment(value=1.0, in_place=True) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_increment` + +.. py:method:: index_sample(index) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_search_index_sample` + +.. py:method:: index_select(index, axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_search_index_select` + +.. py:method:: inverse(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_inverse` + +.. py:method:: is_empty(cond=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_is_empty` + +.. py:method:: isfinite(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_isfinite` + +.. py:method:: isinf(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_isinf` + +.. py:method:: isnan(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_isnan` + +.. py:method:: kron(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_kron` + +.. py:method:: less_equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_less_equal` + +.. py:method:: less_than(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_less_than` + +.. py:method:: log(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_log` + +.. py:method:: log1p(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_log1p` + +.. py:method:: logical_and(y, out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_and` + +.. py:method:: logical_not(out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_not` + +.. py:method:: logical_or(y, out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_or` + +.. py:method:: logical_xor(y, out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_xor` + +.. py:method:: logsigmoid() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logsigmoid` + +.. py:method:: logsumexp(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_math_logsumexp` + +.. py:method:: masked_select(mask, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_masked_select` + +.. py:method:: matmul(y, transpose_x=False, transpose_y=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_matmul` + +.. py:method:: max(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_max` + +.. py:method:: maximum(y, axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_maximum` + +.. py:method:: mean(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_mean` + +.. py:method:: min(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_min` + +.. py:method:: minimum(y, axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_minimum` + +.. py:method:: mm(mat2, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_mm` + +.. py:method:: mod(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_remainder` + +.. py:method:: multiplex(index) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_multiplex` + +.. py:method:: multiply(y, axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_multiply` + +.. py:method:: ndimension() + +查看一个Tensor的维度,也称作rank。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).ndimension()) + # Tensor's number of dimensition: 2 + +.. py:method:: nonzero(as_tuple=False) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_search_nonzero` + +.. py:method:: norm(p=fro, axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_norm` + +.. py:method:: not_equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_not_equal` + +.. py:method:: numel(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_numel` + +.. py:method:: numpy() + +将当前Tensor转化为numpy\.ndarray。 + +返回:Tensor转化成的numpy\.ndarray。 +返回类型:numpy\.ndarray + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') + linear = paddle.nn.Linear(32, 64) + data = paddle.to_tensor(data) + x = linear(data) + print(x.numpy()) + +.. py:method:: pow(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_pow` + +.. py:method:: prod(axis=None, keepdim=False, dtype=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_prod` + +.. py:method:: rank() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_rank` + +.. py:method:: reciprocal(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reciprocal` + +.. py:method:: reduce_all(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_all` + +.. py:method:: reduce_any(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_any` + +.. py:method:: reduce_max(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_max` + +.. py:method:: reduce_mean(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_mean` + +.. py:method:: reduce_min(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_min` + +.. py:method:: reduce_prod(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_prod` + +.. py:method:: reduce_sum(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_sum` + +.. py:method:: remainder(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_remainder` + +.. py:method:: reshape(shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reshape` + +.. py:method:: reverse(axis, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reverse` + +.. py:method:: roll(shifts, axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_manipulation_roll` + +.. py:method:: round(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_round` + +.. py:method:: rsqrt(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_rsqrt` + +.. py:method:: scale(scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scale` + +.. py:method:: scatter(index, updates, overwrite=True, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scatter` + +.. py:method:: scatter_nd(updates, shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scatter_nd` + +.. py:method:: scatter_nd_add(index, updates, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scatter_nd_add` + +.. py:method:: set_value(value) + +设置当前Tensor的值。 + +参数: + - **value** (Tensor|np.ndarray) - 需要被设置的值,类型为Tensor或者numpy\.array。 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + data = np.ones([3, 1024], dtype='float32') + linear = paddle.nn.Linear(1024, 4) + input = paddle.to_tensor(data) + linear(input) # call with default weight + custom_weight = np.random.randn(1024, 4).astype("float32") + linear.weight.set_value(custom_weight) # change existing weight + out = linear(input) # call with different weight + +返回:计算后的Tensor + +.. py:method:: shard_index(index_num, nshards, shard_id, ignore_value=-1) + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_shard_index` + +.. py:method:: sigmoid() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sigmoid` + +.. py:method:: sign(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_sign` + +.. py:method:: sin(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sin` + +.. py:method:: sinh(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sinh` + +.. py:method:: size() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_size` + +.. py:method:: slice(axes, starts, ends) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_slice` + +.. py:method:: softplus() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_softplus` + +.. py:method:: softsign() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_softsign` + +.. py:method:: sort(axis=-1, descending=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_sort` + +.. py:method:: split(num_or_sections, axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_split` + +.. py:method:: sqrt(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sqrt` + +.. py:method:: square(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_square` + +.. py:method:: squeeze(axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_squeeze` + +.. py:method:: stack(axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_stack` + +.. py:method:: stanh(scale_a=0.67, scale_b=1.7159, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_stanh` + +.. py:method:: std(axis=None, unbiased=True, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_std` + +.. py:method:: strided_slice(axes, starts, ends, strides) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_strided_slice` + +.. py:method:: sum(axis=None, dtype=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_sum` + +.. py:method:: sums(out=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sums` + +.. py:method:: t(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_t` + +.. py:method:: tanh(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_tanh` + +.. py:method:: tanh_shrink() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_tanh_shrink` + +.. py:method:: tile(repeat_times, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_tile` + +.. py:method:: topk(k, axis=None, largest=True, sorted=True, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_topk` + +.. py:method:: trace(offset=0, axis1=0, axis2=1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_trace` + +.. py:method:: transpose(perm, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_transpose` + +.. py:method:: unbind(axis=0) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_unbind` + +.. py:method:: unique(return_index=False, return_inverse=False, return_counts=False, axis=None, dtype=int64, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unique` + +.. py:method:: unique_with_counts(dtype=int32) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unique_with_counts` + +.. py:method:: unsqueeze(axis, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unsqueeze` + +.. py:method:: unstack(axis=0, num=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unstack` + +.. py:method:: var(axis=None, unbiased=True, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_var` + +.. py:method:: where(x, y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_where` \ No newline at end of file diff --git a/doc/paddle/api/paddle/tensor/creation/to_tensor_cn.rst b/doc/paddle/api/paddle/tensor/creation/to_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..24fff2ad786ff669677646d0279c4de3f60f09d9 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/to_tensor_cn.rst @@ -0,0 +1,96 @@ +.. _cn_api_paddle_to_tensor: + +to_tensor +------------------------------- + + +.. py:function:: paddle.to_tensor(data, dtype=None, place=None, stop_gradient=True) + +该API通过已知的 ``data`` 来创建一个 tensor,tensor类型为 ``paddle.Tensor`` 或 ``paddle.ComplexTensor`` 。 +``data`` 可以是 scalar,tuple,list,numpy\.ndarray,paddle\.Tensor,paddle\.ComplexTensor。 + +如果 ``data`` 已经是一个tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 tensor 的拷贝并返回原来的 tensor。 +否则会创建一个新的tensor,且不保留原来计算图。 + +``ComplexTensor`` 是Paddle特有的数据类型。对于 ``ComplexTensor`` ``x`` , ``x.real`` 表示实部,``x.imag`` 表示虚部。 + +参数: + - **data** (scalar|tuple|list|ndarray|Tensor|ComplexTensor) - 初始化tensor的数据,可以是 + scalar,list,tuple,numpy\.ndarray,paddle\.Tensor,paddle\.ComplexTensor类型。 + - **dtype** (str, optional) - 创建tensor的数据类型,可以是 'bool' ,'float16','float32', + 'float64' ,'int8','int16','int32','int64','uint8'。如果创建的是 ``ComplexTensor`` , + 则dtype还可以是 'complex64','complex128'。默认值为None,如果 ``data`` 为python浮点类型,则从 + :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``data`` 为其他类型, + 则会自动推导类型。 + - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace, optional) - 创建tensor的设备位置,可以是 + CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为None,使用全局的place。 + - **stop_gradient** (bool, optional) - 是否阻断Autograd的梯度传导。默认值为True,此时不进行梯度传传导。 + +返回:通过 ``data`` 创建的 tensor。其类型为 ``paddle.Tensor`` 或 ``paddle.ComplexTensor`` + +抛出异常: + - ``TypeError``: 当 ``data`` 不是 scalar,list,tuple,numpy.ndarray,paddle.Tensor或paddle.ComplexTensor类型时 + - ``ValueError``: 当 ``data`` 是包含不等长子序列的tuple或list时, 例如[[1, 2], [3, 4, 5]] + - ``TypeError``: 当 ``dtype`` 不是 bool,float16,float32,float64,int8,int16,int32,int64,uint8,complex64,complex128时 + - ``ValueError``: 当 ``place`` 不是 paddle.CPUPlace,paddle.CUDAPinnedPlace,paddle.CUDAPlace时 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + type(paddle.to_tensor(1)) + # + + paddle.to_tensor(1) + # Tensor: generated_tensor_0 + # - place: CUDAPlace(0) # allocate on global default place CPU:0 + # - shape: [1] + # - layout: NCHW + # - dtype: int64_t + # - data: [1] + + x = paddle.to_tensor(1) + paddle.to_tensor(x, dtype='int32', place=paddle.CPUPlace()) # A new tensor will be constructed due to different dtype or place + # Tensor: generated_tensor_01 + # - place: CPUPlace + # - shape: [1] + # - layout: NCHW + # - dtype: int + # - data: [1] + + paddle.to_tensor((1.1, 2.2), place=paddle.CUDAPinnedPlace()) + # Tensor: generated_tensor_1 + # - place: CUDAPinnedPlace + # - shape: [2] + # - layout: NCHW + # - dtype: double + # - data: [1.1 2.2] + + paddle.to_tensor([[0.1, 0.2], [0.3, 0.4]], place=paddle.CUDAPlace(0), stop_gradient=False) + # Tensor: generated_tensor_2 + # - place: CUDAPlace(0) + # - shape: [2, 2] + # - layout: NCHW + # - dtype: double + # - data: [0.1 0.2 0.3 0.4] + + type(paddle.to_tensor([[1+1j, 2], [3+2j, 4]]), dtype='complex64') + # + + paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64') + # ComplexTensor[real]: generated_tensor_0.real + # - place: CUDAPlace(0) + # - shape: [2, 2] + # - layout: NCHW + # - dtype: float + # - data: [1 2 3 4] + # ComplexTensor[imag]: generated_tensor_0.imag + # - place: CUDAPlace(0) + # - shape: [2, 2] + # - layout: NCHW + # - dtype: float + # - data: [1 0 2 0] \ No newline at end of file diff --git a/doc/paddle/api/paddle/tensor/linalg/dot_cn.rst b/doc/paddle/api/paddle/tensor/linalg/dot_cn.rst index 242ea893d7b1ac3337e42d764aba4becdb9122d6..5914eb30d807572d0cc439690ea834c89a11a355 100644 --- a/doc/paddle/api/paddle/tensor/linalg/dot_cn.rst +++ b/doc/paddle/api/paddle/tensor/linalg/dot_cn.rst @@ -31,7 +31,7 @@ dot paddle.disable_static() x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z.numpy()) diff --git a/doc/paddle/api/paddle/tensor/manipulation/stack_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/stack_cn.rst index 3016b30fdb675a45d80aadaf662f8677209dddbb..33953b9821290741b713d1a2eedcee1432522075 100644 --- a/doc/paddle/api/paddle/tensor/manipulation/stack_cn.rst +++ b/doc/paddle/api/paddle/tensor/manipulation/stack_cn.rst @@ -67,16 +67,10 @@ stack .. code-block:: python import paddle - import numpy as np - - data1 = np.array([[1.0, 2.0]]) - data2 = np.array([[3.0, 4.0]]) - data3 = np.array([[5.0, 6.0]]) - paddle.disable_static() - x1 = paddle.to_variable(data1) - x2 = paddle.to_variable(data2) - x3 = paddle.to_variable(data3) + x1 = paddle.to_tensor([[1.0, 2.0]]) + x2 = paddle.to_tensor([[3.0, 4.0]]) + x3 = paddle.to_tensor([[5.0, 6.0]]) out = paddle.stack([x1, x2, x3], axis=0) print(out.shape) # [3, 1, 2] diff --git a/doc/paddle/api/paddle/tensor/math/max_cn.rst b/doc/paddle/api/paddle/tensor/math/max_cn.rst index b8210b9100124e91a25d2c72bf4b6b588dd6082e..62a805fd95b96081c75982bfb3cc8df44881729a 100644 --- a/doc/paddle/api/paddle/tensor/math/max_cn.rst +++ b/doc/paddle/api/paddle/tensor/math/max_cn.rst @@ -25,16 +25,12 @@ max .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - # data_x is a variable with shape [2, 4] # the axis is a int element - data_x = np.array([[0.2, 0.3, 0.5, 0.9], - [0.1, 0.2, 0.6, 0.7]]) - x = paddle.to_variable(data_x) + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]])) result1 = paddle.max(x) print(result1.numpy()) #[0.9] @@ -49,11 +45,9 @@ max #[[0.9] # [0.7]] - # data_y is a variable with shape [2, 2, 2] # the axis is list - data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], - [[5.0, 6.0], [7.0, 8.0]]]) - y = paddle.to_variable(data_y) + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) result5 = paddle.max(y, axis=[1, 2]) print(result5.numpy()) #[4. 8.] diff --git a/doc/paddle/api/paddle/tensor/math/maximum_cn.rst b/doc/paddle/api/paddle/tensor/math/maximum_cn.rst index f002922cbdd13d73a168de006544475e328e52f6..14d43ff8600ce6cc5c91158f278afbe1a203fb68 100644 --- a/doc/paddle/api/paddle/tensor/math/maximum_cn.rst +++ b/doc/paddle/api/paddle/tensor/math/maximum_cn.rst @@ -56,39 +56,30 @@ maximum import paddle import numpy as np - paddle.disable_static() - x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[1, 2], [3, 4]]) + y = paddle.to_tensor([[5, 6], [7, 8]]) res = paddle.maximum(x, y) print(res.numpy()) #[[5. 6.] # [7. 8.]] - x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) - y_data = np.array([1, 2], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) + y = paddle.to_tensor([1, 2]) res = paddle.maximum(x, y, axis=1) print(res.numpy()) #[[[1. 2. 3.] # [2. 2. 3.]]] - x_data = np.array([2, 3, 5], dtype=np.float32) - y_data = np.array([1, 4, np.nan], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') res = paddle.maximum(x, y) print(res.numpy()) #[ 2. 4. nan] - x_data = np.array([5, 3, np.inf], dtype=np.float32) - y_data = np.array([1, 4, 5], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') res = paddle.maximum(x, y) print(res.numpy()) #[ 5. 4. inf] diff --git a/doc/paddle/api/paddle/tensor/math/min_cn.rst b/doc/paddle/api/paddle/tensor/math/min_cn.rst index 13a5db955ee5f290af64beb990699cd4549f4a0e..a242a37cff72a3738759e445f47fe2915b758ae6 100644 --- a/doc/paddle/api/paddle/tensor/math/min_cn.rst +++ b/doc/paddle/api/paddle/tensor/math/min_cn.rst @@ -24,16 +24,12 @@ min :::::::::: .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - # data_x is a variable with shape [2, 4] # the axis is a int element - data_x = np.array([[0.2, 0.3, 0.5, 0.9], - [0.1, 0.2, 0.6, 0.7]]) - x = paddle.to_variable(data_x) + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) result1 = paddle.min(x) print(result1.numpy()) #[0.1] @@ -48,11 +44,9 @@ min #[[0.2] # [0.1]] - # data_y is a variable with shape [2, 2, 2] # the axis is list - data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], - [[5.0, 6.0], [7.0, 8.0]]]) - y = paddle.to_variable(data_y) + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) result5 = paddle.min(y, axis=[1, 2]) print(result5.numpy()) #[1. 5.] diff --git a/doc/paddle/api/paddle/tensor/math/minimum_cn.rst b/doc/paddle/api/paddle/tensor/math/minimum_cn.rst index c4772e1690471e1ca65dadaf25201bd6841b05e2..6fdf6dda658f1d30601d7605dfb4154b9cc7de06 100644 --- a/doc/paddle/api/paddle/tensor/math/minimum_cn.rst +++ b/doc/paddle/api/paddle/tensor/math/minimum_cn.rst @@ -56,39 +56,30 @@ minimum .. code-block:: python import paddle - import numpy as np paddle.disable_static() - x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[[1. 2.] # [3. 4.]] - x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) - y_data = np.array([1, 2], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32') + y = paddle.to_tensor([1, 2], dtype='float32') res = paddle.minimum(x, y, axis=1) print(res.numpy()) #[[[1. 1. 1.] # [2. 2. 2.]]] - x_data = np.array([2, 3, 5], dtype=np.float32) - y_data = np.array([1, 4, np.nan], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[ 1. 3. nan] - x_data = np.array([5, 3, np.inf], dtype=np.float32) - y_data = np.array([1, 4, 5], dtype=np.float32) - x = paddle.to_variable(x_data) - y = paddle.to_variable(y_data) + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') res = paddle.minimum(x, y) print(res.numpy()) #[1. 3. 5.] diff --git a/doc/paddle/guides/index_cn.rst b/doc/paddle/guides/index_cn.rst index a674139300998a0967c45f6e7d3b3ddb15162181..22c01a2181ef8af898663ddcf6af1708ee994c5b 100644 --- a/doc/paddle/guides/index_cn.rst +++ b/doc/paddle/guides/index_cn.rst @@ -8,6 +8,8 @@ PaddlePaddle (PArallel Distributed Deep LEarning)是一个易用、高效、灵 让我们从学习PaddlePaddle基本概念这里开始: + +- `Tensor概念介绍 `_ : 飞桨中数据的表示方式,Tensor概念介绍, - `版本迁移 <./migration_cn.html>`_:介绍 Paddle 1 到Paddle 2的变化与Paddle1to2转换工具的使用。 - `动态图转静态图 <./dygraph_to_static/index_cn.html>`_:介绍 Paddle 动态图转静态图的方法 - `模型存储与载入 <./model_save_load_cn.html>`_:介绍 Paddle 模型与参数存储载入的方法 @@ -16,6 +18,7 @@ PaddlePaddle (PArallel Distributed Deep LEarning)是一个易用、高效、灵 .. toctree:: :hidden: + tensor_introduction.md migration_cn.rst dygraph_to_static/index_cn.rst model_save_load_cn.rst diff --git a/doc/paddle/guides/index_en.rst b/doc/paddle/guides/index_en.rst index ff7bad5895f4e52c5e42de52d5c646ae1675aa92..e9e751e1d81cc817dcc7b67bd54c491e114e83fb 100644 --- a/doc/paddle/guides/index_en.rst +++ b/doc/paddle/guides/index_en.rst @@ -9,11 +9,13 @@ Please refer to `PaddlePaddle Github `_ Let's start with studying basic concept of PaddlePaddle: +- `Introduction to Tensor `_ : Introduction of Tensor, which is the representation of data in Paddle. - `migration tools <./migration_en.html>`_:how to use migration tools to upgrade your code. - `dynamic to static <./dygraph_to_static/index_en.html>`_:how to convert your model from dynamic graph to static graph. .. toctree:: :hidden: + tensor_introduction_en.md migration_en.rst dynamic_to_static/index_en.rst diff --git a/doc/paddle/guides/tensor_introduction.md b/doc/paddle/guides/tensor_introduction.md new file mode 100644 index 0000000000000000000000000000000000000000..ace9c23cb495415aa058ed7e7ef706bb923feb60 --- /dev/null +++ b/doc/paddle/guides/tensor_introduction.md @@ -0,0 +1,497 @@ + + +# Tensor概念介绍 + +飞桨(PaddlePaddle,以下简称Paddle)和其他深度学习框架一样,使用**Tensor**来表示数据,在神经网络中传递的数据均为**Tensor**。 + +**Tensor**可以将其理解为多维数组,其可以具有任意多的维度,不同**Tensor**可以有不同的**数据类型** (dtype) 和**形状** (shape)。 + +同一**Tensor**的中所有元素的dtype均相同。如果你对 [Numpy](https://www.paddlepaddle.org.cn/tutorials/projectdetail/590690) 熟悉,**Tensor**是类似于 **Numpy array** 的概念。 + +### 目录 + +* [Tensor的创建](#1) +* [Tensor的shape](#2) +* [Tensor其他属性](#3) +* [Tensor的操作](#4) + + +---------- + +##

Tensor的创建

+ +首先,让我们开始创建一个 **Tensor** : + +### 1. 创建类似于vector的**1-D Tensor**,其rank为1 +```python +# 可通过dtype来指定Tensor数据类型,否则会创建float32类型的Tensor +rank_1_tensor = paddle.to_tensor([2.0, 3.0, 4.0], dtype='float64') +print(rank_1_tensor) +``` + +```text +Tensor: generated_tensor_1 + - place: CUDAPlace(0) + - shape: [3] + - layout: NCHW + - dtype: double + - data: [2.0, 3.0, 4.0] +``` +特殊地,如果仅输入单个scalar类型数据(例如float/int/bool类型的单个元素),则会创建shape为[1]的**Tensor** +```python +paddle.to_tensor(2) +paddle.to_tensor([2]) +``` +上述两种创建方式完全一致,shape均为[1],输出如下: +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) + - shape: [1] + - layout: NCHW + - dtype: int32_t + - data: [2] +``` + +### 2. 创建类似于matrix的**2-D Tensor**,其rank为2 +```python +rank_2_tensor = paddle.to_tensor([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]]) +print(rank_2_tensor) +``` +```text +Tensor: generated_tensor_2 + - place: CUDAPlace(0) + - shape: [2, 3] + - layout: NCHW + - dtype: double + - data: [1.0 2.0 3.0 4.0 5.0 6.0] +``` + +### 3. 同样地,还可以创建rank为3、4...N等更复杂的多维Tensor +``` +# Tensor可以有任意数量的轴(也称为维度) +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]]]) +print(rank_3_tensor) +``` +```text +Tensor: generated_tensor_3 + - place: CUDAPlace(0) + - shape: [2, 2, 5] + - layout: NCHW + - dtype: double + - data: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20] +``` +上述不同rank的**Tensor**可以可视化的表示为: + +
+
图1 不同rank的Tensor可视化表示
+ + +你可以通过 Tensor.numpy() 方法方便地将 **Tensor** 转化为 **Numpy array**: +```python +print(rank_2_tensor.numpy()) +``` +```text +array([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]], dtype=float32) +``` + +**Tensor**不仅支持 floats、ints 类型数据,也支持 complex numbers 数据: +```python +rank_2_complex_tensor = paddle.to_tensor([[1+1j, 2+2j], + [3+3j, 4+4j]]) +``` +```text +CompleTensor[real]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +CompleTensor[imag]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +``` +如果检测到输入数据包含complex numbers,则会自动创建一个**ComplexTensor**,**ComplexTensor**是Paddle中一种特殊的数据结构, +其包含实部(real)与虚部(imag)两个形状与数据类型相同的**Tensor**,其结构可视化表示为: + +
+
图2 ComplexTensor的可视化表示
+ +**Tensor**必须形状规则,类似于“矩形”的概念,也就是,沿任何一个轴(也称作维度)上,元素的数量都是相等的,如果为以下情况: +``` +rank_2_tensor = paddle.to_tensor([[1.0, 2.0], + [4.0, 5.0, 6.0]]) +``` +该情况下将会抛出异常: +```text +ValueError: + Faild to convert input data to a regular ndarray : + - Usually this means the input data contains nested lists with different lengths. +``` + +上面介绍了通过Python数据来创建**Tensor**的方法,我们也可以通过 **Numpy array** 来创建**Tensor**: +```python +rank_1_tensor = paddle.to_tensor(Numpy array([1.0, 2.0])) + +rank_2_tensor = paddle.to_tensor(Numpy array([[1.0, 2.0], + [3.0, 4.0]])) + +rank_3_tensor = paddle.to_tensor(numpy.random.rand(3, 2)) +``` +创建的 **Tensor** 与原 **Numpy array** 具有相同的 shape 与 dtype。 + +如果要创建一个指定shape的**Tensor**,Paddle也提供了一些API: +```text +paddle.zeros([m, n]) # 创建数据全为0,shape为[m, n]的Tensor +paddle.ones([m, n]) # 创建数据全为1,shape为[m, n]的Tensor +paddle.full([m, n], 10) # 创建数据全为10,shape为[m, n]的Tensor +paddle.arrange(start, end, step) # 创建从start到end,步长为step的Tensor +paddle.linspace(start, end, num) # 创建从start到end,元素个数固定为num的Tensor +``` + +---------- +##

Tensor的shape

+ +### 基本概念 +查看一个**Tensor**的形状可以通过 **Tensor.shape**,shape是 **Tensor** 的一个重要属性,以下为相关概念: + +1. shape:描述了tensor的每个维度上的元素数量 +2. rank: tensor的维度的数量,例如vector的rank为1,matrix的rank为2. +3. axis或者dimension:指tensor某个特定的维度 +4. size:指tensor中全部元素的个数 + +让我们来创建1个4-D **Tensor**,并通过图形来直观表达以上几个概念之间的关系; +```python +rank_4_tensor = paddle.ones([2, 3, 4, 5]) +``` + +
+
图3 Tensor的shape、axis、dimension、rank之间的关系
+ +```python +print("Data Type of every element:", rank_4_tensor.dtype) +print("Number of dimensions:", rank_4_tensor.ndim) +print("Shape of tensor:", rank_4_tensor.shape) +print("Elements number along axis 0 of tensor:", rank_4_tensor.shape[0]) +print("Elements number along the last axis of tensor:", rank_4_tensor.shape[-1]) +``` +```text +Data Type of every element: VarType.FP32 +Number of dimensions: 4 +Shape of tensor: [2, 3, 4, 5] +Elements number along axis 0 of tensor: 2 +Elements number along the last axis of tensor: 5 +``` + +### 索引 +通过索引能方便地对Tensor进行“切片”操作。Paddle使用标准的 Python索引规则 与 Numpy索引规则,与[ndexing a list or a string in Python](https://docs.python.org/3/tutorial/introduction.html#strings)类似。具有以下特点: + +1. 如果索引为负数,则从尾部开始计算 +2. 如果索引使用 ``:`` ,则其对应格式为start: stop: step,其中start、stop、step均可缺省 + +* 针对1-D **Tensor**,则仅有单个轴上的索引: +```python +rank_1_tensor = paddle.to_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8]) +print("Origin Tensor:", rank_1_tensor.numpy()) + +print("First element:", rank_1_tensor[0].numpy()) +print("Last element:", rank_1_tensor[-1].numpy()) +print("All element:", rank_1_tensor[:].numpy()) +print("Before 3:", rank_1_tensor[:3].numpy()) +print("From 6 to the end:", rank_1_tensor[6:].numpy()) +print("From 3 to 6:", rank_1_tensor[3:6].numpy()) +print("Interval of 3:", rank_1_tensor[::3].numpy()) +print("Reverse:", rank_1_tensor[::-1].numpy()) +``` +```text +Origin Tensor: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=int64) +First element: [0] +Last element: [8] +All element: [0 1 2 3 4 5 6 7 8] +Before 3: [0 1 2] +From 6 to the end: [6 7 8] +From 3 to 6: [3 4 5] +Interval of 3: [0 3 6] +Reverse: [8 7 6 5 4 3 2 1 0] +``` + +* 针对2-D及以上的 **Tensor**,则会有多个轴上的索引: +```python +rank_2_tensor = paddle.to_tensor([[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]]) +print("Origin Tensor:", rank_2_tensor.numpy()) +print("First row:", rank_2_tensor[0].numpy()) +print("First row:", rank_2_tensor[0, :].numpy()) +print("First column:", rank_2_tensor[:, 0].numpy()) +print("Last column:", rank_2_tensor[:, -1].numpy()) +print("All element:", rank_2_tensor[:].numpy()) +print("First row and second column:", rank_2_tensor[0, 1].numpy()) +``` +```text +Origin Tensor: array([[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]], dtype=int64) +First row: [0 1 2 3] +First row: [0 1 2 3] +First column: [0 4 8] +Last column: [ 3 7 11] +All element: [[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]] +First row and second column: [1] +``` + +输入索引的第一个值对应axis 0,第二个值对应axis 1,以此类推,如果某个axis上未指定索引,则默认为 ``:`` 。例如: +``` +rank_3_tensor[1] +rank_3_tensor[1, :] +rank_3_tensor[1, :, :] +``` +以上三种索引的结果是完全相同的。 + +### 对shape进行操作 + +重新定义**Tensor**的shape在实际编程中具有重要意义。 +```python +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]], + [[21, 22, 23, 24, 25], + [26, 27, 28, 29, 30]]]) +print("the shape of rank_3_tensor:", rank_3_tensor.shape) +``` +```text +the shape of rank_3_tensor: [3, 2, 5] +``` + +Paddle提供了reshape接口来改变Tensor的shape: +```python +rank_3_tensor = paddle.reshape(rank_3_tensor, [2, 5, 3]) +print("After reshape:", rank_3_tensor.shape) +``` +```text +After reshape: [2, 5, 3] +``` + +在指定新的shape时存在一些技巧: + +**1.** -1 表示这个维度的值是从Tensor的元素总数和剩余维度推断出来的。因此,有且只有一个维度可以被设置为-1。 +**2.** 0 表示实际的维数是从Tensor的对应维数中复制出来的,因此shape中0的索引值不能超过x的维度。 + +有一些例子可以很好解释这些技巧: +```text +origin:[3, 2, 5] reshape:[3, 10] actual: [3, 10] +origin:[3, 2, 5] reshape:[-1] actual: [30] +origin:[3, 2, 5] reshape:[0, 5, -1] actual: [3, 5, 2] +``` + +可以发现,reshape为[-1]时,会将tensor按其在计算机上的内存分布展平为1-D Tensor。 +```python +print("Tensor flattened to Vector:", paddle.reshape(rank_3_tensor, [-1]).numpy()) +``` +```text +Tensor flattened to Vector: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30] +``` + +---------- +##

Tensor其他属性

+### Tensor的dtype + +**Tensor**的数据类型,可以通过 Tensor.dtype 来查看,dtype支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64'。 + +* 通过Python元素创建的Tensor,可以通过dtype来进行指定,如果未指定: + + * 对于python整型数据,则会创建int64型Tensor + * 对于python浮点型数据,默认会创建float32型Tensor,并且可以通过set_default_type来调整浮点型数据的默认类型。 + +* 通过Numpy array创建的Tensor,则与其原来的dtype保持相同。 + +```python +print("Tensor dtype from Python integers:", paddle.to_tensor(1).dtype) +print("Tensor dtype from Python floating point:", paddle.to_tensor(1.0).dtype) +``` +```text +Tensor dtype from Python integers: VarType.INT64 +Tensor dtype from Python floating point: VarType.FP32 +``` + +Paddle提供了**cast**接口来改变dtype: +```python +float32_tensor = paddle.to_tensor(1.0) + +float64_tensor = paddle.cast(float32_tensor, dtype='float64') +print("Tensor after cast to float64:", float64_tensor.dtype) + +int64_tensor = paddle.cast(float32_tensor, dtype='int64') +print("Tensor after cast to int64:", int64_tensor.dthpe) +``` +```text +Tensor after cast to float64: VarType.FP64 +Tensor after cast to int64: VarType.INT64 +``` + +### Tensor的place + +初始化**Tensor**时可以通过**place**来指定其分配的设备位置,可支持的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存,其与GPU之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能,因为其减少了用于存储虚拟内存数据的可分页内存。 + +* **创建CPU上的Tensor**: +```python +cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) +print(cpu_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CPUPlace +``` + +* **创建GPU上的Tensor**: +```python +gpu_tensor = paddle.to_tensor(1, place=paddle.CUDAPlace(0)) +print(gpu_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) + +``` + +* **创建固定内存上的Tensor**: +```python +pin_memory_tensor = paddle.to_tensor(1, place=paddle.CUDAPinnedPlace()) +print(pin_memory_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CUDAPinnedPlace + +``` +### Tensor的name + +Tensor的name是其唯一的标识符,为python 字符串类型,查看一个Tensor的name可以通过Tensor.name属性。默认地,在每个Tensor创建时,Paddle会自定义一个独一无二的name。 + +```python +print("Tensor name:", paddle.to_tensor(1).name) +``` +```text +Tensor name: generated_tensor_0 +``` + +---------- +##

Tensor的操作

+ +Paddle提供了丰富的Tensor操作的API,包括数学运算符、逻辑运算符、线性代数相关等100+余种API,这些API调用有两种方法: +```python +x = paddle.to_tensor([[1.1, 2.2], [3.3, 4.4]]) +y = paddle.to_tensor([[5.5, 6.6], [7.7, 8.8]]) + +print(paddle.add(x, y), "\n") +print(x.add(y), "\n") +``` +```text +Tensor: eager_tmp_2 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] + +Tensor: eager_tmp_3 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] +``` + +可以看出,使用 **Tensor类成员函数** 和 **paddle API** 具有相同的效果,由于 **类成员函数** 操作更为方便,以下均从 **Tensor类成员函数** 的角度,对常用**Tensor**操作进行介绍。 + +#### 数学运算符 +```python +x.abs() #绝对值 +x.ceil() #向上取整 +x.floor() #向下取整 +x.exp() #逐元素计算自然常数为底的指数 +x.log() #逐元素计算x的自然对数 +x.reciprocal() #求倒数 +x.square() #逐元素计算平方 +x.sqrt() #逐元素计算平方根 +x.sum() #计算所有元素的和 +x.asin() #逐元素计算反正弦函数 +x.add(y) #逐元素相加 +x.add(-y) #逐元素相减 +x.multiply(y) #逐元素相乘 +x.divide(y) #逐元素相除 +x.floor_divide(y) #逐元素相除并取整 +x.remainder(y) #逐元素相除并取余 +x.pow(y) #逐元素幂运算 +x.reduce_max() #所有元素最大值,可以指定维度 +x.reduce_min() #所有元素最小值,可以指定维度 +x.reduce_prod() #所有元素累乘,可以指定维度 +x.reduce_sum() #所有元素的和,可以指定维度 +``` + +Paddle对python数学运算相关的魔法函数进行了重写,以下操作与上述结果相同。 +```text +x + y -> x.add(y) #逐元素相加 +x - y -> x.add(-y) #逐元素相减 +x * y -> x.multiply(y) #逐元素相乘 +x / y -> x.divide(y) #逐元素相除 +x // y -> x.floor_divide(y) #逐元素相除并取整 +x % y -> x.remainder(y) #逐元素相除并取余 +x ** y -> x.pow(y) #逐元素幂运算 +``` + +#### 逻辑运算符 +```python +x.is_empty() #判断tensor是否为空 +x.isfinite() #判断tensor中元素是否是有限的数字,即不包括inf与nan +x.euqal_all(y) #判断两个tensor的所有元素是否相等 +x.euqal(y) #判断两个tensor的每个元素是否相等 +x.not_equal(y) #判断两个tensor的每个元素是否不相等 +x.less_than(y) #判断tensor x的元素是否小于tensor y的对应元素 +x.less_equal(y) #判断tensor x的元素是否小于或等于tensor y的对应元素 +x.greater_than(y) #判断tensor x的元素是否大于tensor y的对应元素 +x.greater_equal(y) #判断tensor x的元素是否大于或等于tensor y的对应元素 +``` + +同样地,Paddle对python逻辑比较相关的魔法函数进行了重写,以下操作与上述结果相同。 +```text +x == y -> x.euqal(y) #判断两个tensor的每个元素是否相等 +x != y -> x.not_equal(y) #判断两个tensor的每个元素是否不相等 +x < y -> x.less_than(y) #判断tensor x的元素是否小于tensor y的对应元素 +x <= y -> x.less_equal(y) #判断tensor x的元素是否小于或等于tensor y的对应元素 +x > y -> x.greater_than(y) #判断tensor x的元素是否大于tensor y的对应元素 +x >= y -> x.greater_equal(y) #判断tensor x的元素是否大于或等于tensor y的对应元素 +``` + +以下操作仅针对bool型Tensor: +```python +x.reduce_all() #判断一个bool型tensor是否所有元素为True +x.reduce_any() #判断一个bool型tensor是否存在至少1个元素为True +x.logical_and(y) #对两个bool型tensor逐元素进行逻辑与操作 +x.logical_or(y) #对两个bool型tensor逐元素进行逻辑或操作 +x.logical_xor(y) #对两个bool型tensor逐元素进行逻辑亦或操作 +x.logical_not(y) #对两个bool型tensor逐元素进行逻辑非操作 +``` + +#### 线性代数相关 +```python +x.cholesky() #矩阵的cholesky分解 +x.t() #矩阵转置 +x.transpose([1, 0]) #交换axis 0 与axis 1的顺序 +x.norm('pro') #矩阵的Frobenius 范数 +x.dist(y, p=2) #矩阵(x-y)的2范数 +x.matmul(y) #矩阵乘法 +``` +需要注意,Paddle中Tensor的操作符均为非inplace操作,即 ``x.add(y)`` 不会在**tensor x**上直接进行操作,而会返回一个新的**Tensor**来表示运算结果。 + +更多Tensor操作相关的API,请参考[class paddle.Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/creation/Tensor_cn.html) diff --git a/doc/paddle/guides/tensor_introduction_en.md b/doc/paddle/guides/tensor_introduction_en.md new file mode 100644 index 0000000000000000000000000000000000000000..18f7f18afdabd9e38e07337d83305532904f28e0 --- /dev/null +++ b/doc/paddle/guides/tensor_introduction_en.md @@ -0,0 +1,502 @@ +# Introduction to Tensor + +PaddlePaddle(Hereinafter referred to as Paddle) is the same as other Deep learning Framework, it use **Tensor** to +representing data. + +**Tensor** can be regarded as multi-dimensional array, which can have as many diemensions as it want. Different **Tensor** can have different data types(dtype) and shape. + +The dtypes of all elements in the same Tensor are the same. If you are familiar with [Numpy](https://www.paddlepaddle.org.cn/tutorials/projectdetail/590690), **Tensor** is similar to the **Numpy array**. + +### Contents + +* [Creation of Tensor](#1) +* [Shape of Tensor](#2) +* [Other attributes of Tensor](#3) +* [Method of Tensor](#4) + +---------- +##

Creation of Tensor

+ +Firstly, let we create a **Tensor**: + +### 1. create **1-D Tensor** like vector, whose rank is 1 +```python +# The Tensor data type can be specified by dtype, otherwise, float32 Tensor will be created +rank_1_tensor = paddle.to_tensor([2.0, 3.0, 4.0], dtype='float64') +print(rank_1_tensor) +``` +```text +Tensor: generated_tensor_1 + - place: CUDAPlace(0) + - shape: [3] + - layout: NCHW + - dtype: double + - data: [2.0, 3.0, 4.0] +``` + +Specifically, if you imput only a scalar data (for example, float/int/bool), then a **Tensor** whose shape is [1]will be created. +```python +paddle.to_tensor(2) +paddle.to_tensor([2]) +``` +The above two are completely the same, Tensor shape is [1]: +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) + - shape: [1] + - layout: NCHW + - dtype: int32_t + - data: [2] +``` + +### 2. create **2-D Tensor** like matrix, whose rank is 2 +```python +rank_2_tensor = paddle.to_tensor([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]]) +print(rank_2_tensor) +``` +```text +Tensor: generated_tensor_2 + - place: CUDAPlace(0) + - shape: [2, 3] + - layout: NCHW + - dtype: double + - data: [1.0 2.0 3.0 4.0 5.0 6.0] +``` + +### 3. Similarly, you can create multidimensional Tensor whose rank is 3, 4... N +``` +# There can be an arbitrary number of axes (sometimes called "dimensions") +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]]]) +print(rank_3_tensor) +``` +```text +Tensor: generated_tensor_3 + - place: CUDAPlace(0) + - shape: [2, 2, 5] + - layout: NCHW + - dtype: double + - data: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20] +``` +The visual representation of the **Tensor* above is: + +
+
Figure1. Visual representation of Tensor with different ranks
+ + +You can convert **Tensor** to Numpy array easily Tensor.numpy() method. +```python +print(rank_2_tensor.numpy()) +``` +```text +array([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]], dtype=float32) +``` + +**Tensor supports not only floats and ints but also complex Numbers data: +```python +rank_2_complex_tensor = paddle.to_tensor([[1+1j, 2+2j], + [3+3j, 4+4j]]) +``` +```text +CompleTensor[real]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +CompleTensor[imag]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +``` +If the input data contains complex Number, a **ComplexTensor** is automatically created. **ComplexTensor** is a special data structure in Paddle. **ComplexTensor** consists of two **Tensor**, one is real part and the other is imaginary part. **ComplexTensor** can be visualized as follows: + +
+
Figure2. Visual representation of ComplexTensor
+ +**Tensor** must be "rectangular" -- that is, along each axis, every element is the same size. For example: +``` +rank_2_tensor = paddle.to_tensor([[1.0, 2.0], + [4.0, 5.0, 6.0]]) +``` +An exception will be thrown in this case: +```text +ValueError: + Faild to convert input data to a regular ndarray : + - Usually this means the input data contains nested lists with different lengths. +``` + +The way to create **Tensor** from Python data is described above. We can also create **Tensor** +from numpy array: +```python +rank_1_tensor = paddle.to_tensor(numpy.array([1.0, 2.0])) + +rank_2_tensor = paddle.to_tensor(numpy.array([[1.0, 2.0], + [3.0, 4.0]])) + +rank_3_tensor = paddle.to_tensor(numpy.random.rand(3, 2)) +``` +The created **Tensor** will have the same shape and dtype with the original Numpy array. + +If you want to create a **Tensor** with specific size, Paddle also provide these API: +```text +paddle.zeros([m, n]) # All elements: 0, Shape: [m, n] +paddle.ones([m, n]) # All elements: 1, Shape: [m, n] +paddle.full([m, n], 10) # All elements: 10, Shape: [m, n] +paddle.arrange(start, end, 2) # Elements: from start to end, step size is 2 +paddle.linspace(start, end, 10) # Elements: from start to end, num of elementwise is 10 +``` + +---------- +##

Shape of Tensor

+ +### Basic Concept + +The shape of **Tensor** can be get by **Tensor.shape**. shape is an important attribute of **Tensor**, and the following are related concepts: + +1. shape: Describes the number of elements on each of the tensor's dimensions. +2. rank: The number of tensor's dimensions. For example, the rank of vector is 1, the rank of matrix is 2. +3. axis or dimension: A particular dimension of a tensor. +4. size: The number of all elements in the tensor. + +Let we create a 4-D **Tensor**, and visualize it to represents the relationship between the above concepts. +```python +rank_4_tensor = paddle.ones([2, 3, 4, 5]) +``` + +
+
Figure3. The relationship between Tensor shape, axis, dimension and rank
+ +```python +print("Data Type of every element:", rank_4_tensor.dtype) +print("Number of dimensions:", rank_4_tensor.ndim) +print("Shape of tensor:", rank_4_tensor.shape) +print("Elements number along axis 0 of tensor:", rank_4_tensor.shape[0]) +print("Elements number along the last axis of tensor:", rank_4_tensor.shape[-1]) +``` +```text +Data Type of every element: VarType.FP32 +Number of dimensions: 4 +Shape of tensor: [2, 3, 4, 5] +Elements number along axis 0 of tensor: 2 +Elements number along the last axis of tensor: 5 +``` + +### indexing + +Paddle follows standard Python indexing rules, similar to[ndexing a list or a string in Python](https://docs.python.org/3/tutorial/introduction.html#strings) and the basic rules for NumPy indexing. indexing is used to work on Tensor "slice". It has following characteristics: + +1. negative indices count backwards from the end +2. colons, : , are used for slices: start:stop:step + +For **1-D Tensor**, there is only single-axis indexing: +```python +rank_1_tensor = paddle.to_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8]) +print("Origin Tensor:", rank_1_tensor.numpy()) + +print("First element:", rank_1_tensor[0].numpy()) +print("Last element:", rank_1_tensor[-1].numpy()) +print("All element:", rank_1_tensor[:].numpy()) +print("Before 3:", rank_1_tensor[:3].numpy()) +print("From 6 to the end:", rank_1_tensor[6:].numpy()) +print("From 3 to 6:", rank_1_tensor[3:6].numpy()) +print("Interval of 3:", rank_1_tensor[::3].numpy()) +print("Reverse:", rank_1_tensor[::-1].numpy()) +``` +```text +Origin Tensor: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=int64) +First element: [0] +Last element: [8] +All element: [0 1 2 3 4 5 6 7 8] +Before 3: [0 1 2] +From 6 to the end: [6 7 8] +From 3 to 6: [3 4 5] +Interval of 3: [0 3 6] +Reverse: [8 7 6 5 4 3 2 1 0] +``` + +For 2-D **Tensor** or above, there is multi-axis indexing: +```python +rank_2_tensor = paddle.to_tensor([[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]]) +print("Origin Tensor:", rank_2_tensor.numpy()) + +print("First row:", rank_2_tensor[0].numpy()) +print("First row:", rank_2_tensor[0, :].numpy()) +print("First column:", rank_2_tensor[:, 0].numpy()) +print("Last column:", rank_2_tensor[:, -1].numpy()) +print("All element:", rank_2_tensor[:].numpy()) +print("First row and second column:", rank_2_tensor[0, 1].numpy()) +``` +```text +Origin Tensor: array([[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]], dtype=int64) +First row: [0 1 2 3] +First row: [0 1 2 3] +First column: [0 4 8] +Last column: [ 3 7 11] +All element: [[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]] +First row and second column: [1] +``` + +The first element of index is corresponds to Axis 0, the second is corresponds to Axis 1, and so on. If no index is specified on an Axis, the default is ':' . For example: +``` +rank_3_tensor[1] +rank_3_tensor[1, :] +rank_3_tensor[1, :, :] +``` +These three are exactly the same. + +### Manipulating Shape + +Manipulating shape of Tensor is important in programming. +```python +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]], + [[21, 22, 23, 24, 25], + [26, 27, 28, 29, 30]]]) +print("the shape of rank_3_tensor:", rank_3_tensor.shape) +``` +```text +the shape of rank_3_tensor: [3, 2, 5] +``` + +Paddle provides reshape API to manipulate the shape of Tensor: +```python +rank_3_tensor = paddle.reshape(rank_3_tensor, [2, 5, 3]) +print("After reshape:", rank_3_tensor.shape) +``` +```text +After reshape: [2, 5, 3] +``` + +There are some tricks for specifying a new shape: + +1. -1 indicates that the value of this dimension is inferred from the total number of elements and the other dimension of Tensor. Therefore, there is one and only one that can be set to -1. +2. 0 means that the actual dimension is copied from the corresponding dimension of Tensor, so the index value of 0 in shape can't exceed the rank of X. + +For example: +```text +origin:[3, 2, 5] reshape:[3, 10] actual: [3, 10] +origin:[3, 2, 5] reshape:[-1] actual: [30] +origin:[3, 2, 5] reshape:[0, 5, -1] actual: [3, 5, 2] +``` + +If you flatten a tensor by reshape to -1, you can see what order it is laid out in memory. +```python +print("Tensor flattened to Vector:", paddle.reshape(rank_3_tensor, [-1]).numpy()) +``` +```text +Tensor flattened to Vector: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30] +``` + +---------- +##

Other attributes of Tensor

+ +### dtype of Tensor + +data type of **Tensor**, which can be get from Tensor.dtype, it support 'bool', 'float16', 'float32', 'float64','uint8', 'int8', 'int16', 'int32', 'int64'. + +* If create Tensor from Python elements, the data type can be specified by dtype. Otherwise: + + * For python integer data, it will create int64 Tensor + * For python floats number, it will create float32 Tensor by default. You can change default dtype by set_default_type. + +* If create Tensor from Numpy array, the data type remains the same with origin dtype. + +```python +print("Tensor dtype from Python integers:", paddle.to_tensor(1).dtype) +print("Tensor dtype from Python floating point:", paddle.to_tensor(1.0).dtype) +``` +```text +Tensor dtype from Python integers: VarType.INT64 +Tensor dtype from Python floating point: VarType.FP32 +``` + +Paddle provide **cast** API to change the dtype: +```python +float32_tensor = paddle.to_tensor(1.0) + +float64_tensor = paddle.cast(float32_tensor, dtype='float64') +print("Tensor after cast to float64:", float64_tensor.dtype) + +int64_tensor = paddle.cast(float32_tensor, dtype='int64') +print("Tensor after cast to int64:", int64_tensor.dthpe) +``` +```text +Tensor after cast to float64: VarType.FP64 +Tensor after cast to int64: VarType.INT64 +``` + +### place of Tensor + +Device can be specified when creating a tensor. There are three kinds of to choose from: CPU/GPU/Pinned memory. +There is higher read and write efficiency between Pinned memory with GPU. In addition, Pinned memory supports asynchronous data copy, which will further improve the performance of network. The disadvantage is that allocating too much Pinned memory may reduce the performance of the host. Because it reduces the pageable memory which is used to store virtual memory data. + +* **Create Tensor on GPU**: +```python +cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) +print(cpu_tensor) +``` + +```text +Tensor: generated_tensor_0 + - place: CPUPlace +``` + +* **Create Tensor on CPU**: +```python +gpu_tensor = paddle.to_tensor(1, place=paddle.CUDAPlace(0)) +print(gpu_tensor) +``` + +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) +``` + +* **Create Tensor on pinned memory**: +```python +pin_memory_tensor = paddle.to_tensor(1, place=paddle.CUDAPinnedPlace()) +print(pin_memory_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CUDAPinnedPlace + +``` +### name of Tensor + +name of Tensor is its unique identifier, which is a Python string, and it can be get by ``Tensor.name``. By default, Paddle will customize a unique name when creating a Tensor. + +```python +print("Tensor name:", paddle.to_tensor(1).name) +``` +```text +Tensor name: generated_tensor_0 +``` + +---------- +##

Method of Tensor

+ + +Paddles provide rich Tensor operating API , including mathematical operators, logical operators, linear algebra operators and so on. The total number is more than 100+ kinds. For example: + +```python +x = paddle.to_tensor([[1.1, 2.2], [3.3, 4.4]]) +y = paddle.to_tensor([[5.5, 6.6], [7.7, 8.8]]) + +print(paddle.add(x, y), "\n") +print(x.add(y), "\n") +``` +```text +Tensor: eager_tmp_2 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] + +Tensor: eager_tmp_3 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] +``` + +It can be seen that Tensor class method has the same result with paddle API. And the Tensor class method is more convenient to invoke. + +### mathematical operators +```python +x.abs() #absolute value +x.ceil() #round up to an integer +x.floor() #round down to an integer +x.exp() #Calculate exponents of the natural constant of each element +x.log() #Calculate natural logarithm of each element +x.reciprocal() #reciprocal +x.square() #Calculate square of each element +x.sqrt() #Calculate sqrt of each element +x.sum() #Calculate the sum of all elements +x.asin() #Calculate the arcsine of each element +x.add(y) #add element by element +x.add(-y) #minus element by element +x.multiply(y) #multiply element by element +x.divide(y) #divide element by element +x.floor_divide(y) #divide exactly element by element +x.remainder(y) #mod element by element +x.pow(y) #pow element by element +x.reduce_max() #max value on specific axis +x.reduce_min() #min value on specific axis +x.reduce_prod() #multiply of all elements on specific axis +x.reduce_sum() #sum of all elements on specific axis +``` + +Paddle overwrite the magic functions related to Python mathematical operations. Like this: +```text +x + y -> x.add(y) +x - y -> x.add(-y) +x * y -> x.multiply(y) +x / y -> x.divide(y) +x // y -> x.floor_divide(y) +x % y -> x.remainder(y) +x ** y -> x.pow(y) +``` + +### logical operators +```python +x.is_empty() #Judge whether tensor is empty +x.isfinite() #Judge whether the element in tensor is finite number +x.euqal_all(y) #Judge whether all elements of two tensor are equal +x.euqal(y) #judge whether each element of two tensor is equal +x.not_equal(y) #judge whether each element of two tensor is not equal +x.less_than(y) #judge whether each element of tensor x is less than corresponding element of tensor y +x.less_equal(y) #judge whether each element of tensor x is less than or equal to element of tensor y +x.greater_than(y) #judge whether each element of tensor x is greater than element of tensor y +x.greater_equal(y) #judge whether each element of tensor x is greater than or equal to element of tensor y +``` + +Paddle overwrite the magic functions related to Python logical operations. Like this: +```text +x == y -> x.euqal(y) +x != y -> x.not_equal(y) +x < y -> x.less_than(y) +x <= y -> x.less_equal(y) +x > y -> x.greater_than(y) +x >= y -> x.greater_equal(y) +``` + +The following operations are targeted at bool Tensor only: +```python +x.reduce_all(y) #Judge whether a bool tensor is True for all elements +x.reduce_any(y) #Judge whether a bool tensor exists at least one element is True +x.logical_and(y) #logic and operation for two bool tensor +x.logical_or(y) #logic or operation for two bool tensor +x.logical_xor(y) #logic xor operation for two bool tensor +x.logical_not(y) #logic not operation for two bool tensor +``` + +### linear algebra operators +```python +x.cholesky() #cholesky decomposition of a matrix +x.t() #matrix transpose +x.transpose([1, 0]) #swap axis 0 with axis 1 +x.norm('pro') #Frobenius Norm of matrix +x.dist(y, p=2) #The 2 norm of (x-y) +x.matmul(y) #Matrix multiplication +``` +It should be noted that the class method of Tensor are non-inplace operations. It means, ``x.And dd(y)`` will not operate directly on Tensor x, but return a new Tensor to represent the results. + +For more API related to Tensor operations, please refer to[class paddle.Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/creation/Tensor_cn.html)