未验证 提交 af53a0fd 编写于 作者: Z Zhou Wei 提交者: GitHub

[2.0 Doc]Expose the Tensor concept and Enhance the Tensor function (#2580)

* add Tensor concept introduction

* add Tensor concept introduction

* add Tensor concept introduction

* add Tensor concept introduction

* add Tensor concept introduction

* fix example code
上级 1b75777e
......@@ -29,11 +29,9 @@ abs
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-1, -2, -3, -4]).astype(np.float32)
x = paddle.to_variable(x_data)
res = paddle.abs(x)
print(res.numpy())
# [1, 2, 3, 4]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.abs(x)
print(out.numpy())
# [0.4 0.2 0.1 0.3]
......@@ -30,11 +30,9 @@ arccosine函数。
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32)
x = paddle.to_variable(x_data)
res = paddle.acos(x)
print(res.numpy())
# [2.5293, 1.0573, 2.2711, 1.5336]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.acos(x)
print(out.numpy())
# [1.98231317 1.77215425 1.47062891 1.26610367]
......@@ -29,11 +29,9 @@ arcsine函数。
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32)
x = paddle.to_variable(x_data)
res = paddle.asin(x)
print(res.numpy())
# [-0.9585, 0.5135, -0.7003, 0.0372]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.asin(x)
print(out.numpy())
# [-0.41151685 -0.20135792 0.10016742 0.30469265]
......@@ -29,11 +29,9 @@ arctangent函数。
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32)
x = paddle.to_variable(x_data)
res = paddle.atan(x)
print(res.numpy())
# [-0.6858, 0.4566, -0.5724, 0.0371]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.atan(x)
print(out.numpy())
# [-0.38050638 -0.19739556 0.09966865 0.29145679]
......@@ -31,12 +31,9 @@ ceil
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[-1.5,6],[1,15.6]]).astype(np.float32)
x = paddle.to_variable(x_data)
res = paddle.ceil(x)
print(res.numpy())
# [[-1. 6.]
# [ 1. 16.]]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.ceil(x)
print(out.numpy())
# [-0. -0. 1. 1.]
......@@ -32,12 +32,9 @@ cos
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[-1,np.pi],[1,15.6]]).astype(np.float32)
x = paddle.to_variable(x_data)
res = paddle.cos(x)
print(res.numpy())
# [[ 0.54030231 -1. ]
# [ 0.54030231 -0.99417763]]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.cos(x)
print(out.numpy())
# [0.92106099 0.98006658 0.99500417 0.95533649]
......@@ -52,7 +52,7 @@ MaxPool1d
import numpy as np
paddle.disable_static()
data = paddle.to_variable(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0)
pool_out = MaxPool1d(data)
# pool_out shape: [1, 3, 16]
......
......@@ -30,12 +30,9 @@ PairwiseDistance
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_np = np.array([[1., 3.], [3., 5.]]).astype(np.float64)
y_np = np.array([[5., 6.], [7., 8.]]).astype(np.float64)
x = paddle.to_variable(x_np)
y = paddle.to_variable(y_np)
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype='float64')
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype='float64')
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance.numpy()) # [5. 5.]
......
......@@ -38,6 +38,6 @@ Hardshrink激活层
paddle.disable_static()
x = paddle.to_variable(np.array([-1, 0.3, 2.5]))
x = paddle.to_tensor([-1, 0.3, 2.5])
m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5]
......@@ -40,14 +40,11 @@ l1_loss
.. code-block:: python
import numpy as np
import paddle
import paddle
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
......
......@@ -40,13 +40,11 @@ Tensor, 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状
.. code-block:: python
import numpy as np
import paddle
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype('float32'))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype('float32'))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype('float32'))
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75]
......@@ -58,8 +58,8 @@ mse_loss
# [array([0.04000002], dtype=float32)]
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
......
......@@ -38,8 +38,8 @@ nll_loss
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
......@@ -42,7 +42,7 @@ normalize
paddle.disable_static()
x = np.arange(6, dtype=np.float32).reshape(2,3)
x = paddle.to_variable(x)
x = paddle.to_tensor(x)
y = F.normalize(x)
print(y.numpy())
# [[0. 0.4472136 0.8944272 ]
......
......@@ -24,12 +24,10 @@ sigmoid 激活函数。
:::::::::
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
paddle.disable_static()
input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32')
x = paddle.to_variable(input_data)
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
output = F.sigmoid(x)
print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
......@@ -34,9 +34,7 @@ hardshrink激活层。计算公式如下:
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_variable(np.array([-1, 0.3, 2.5]))
x = paddle.to_tensor([-1, 0.3, 2.5])
out = F.hardshrink(x) # [-1., 0., 2.5]
......@@ -29,12 +29,10 @@ Sigmoid
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32')
m = paddle.nn.Sigmoid()
x = paddle.to_variable(input_data)
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
output = m(x)
print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376
......@@ -39,14 +39,11 @@ L1Loss
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.loss.L1Loss()
output = l1_loss(input, label)
......
......@@ -63,8 +63,8 @@ MSELoss
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
......@@ -46,15 +46,12 @@ MarginRankingLoss
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype("float32"))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype("float32"))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype("float32"))
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75]
......@@ -63,8 +63,8 @@ NLLLoss
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
......@@ -31,7 +31,7 @@ dot
paddle.disable_static()
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.dot(x, y)
print(z.numpy())
.. _cn_api_paddle_tensor_gather
gather
-------------------------------
......
......@@ -34,9 +34,8 @@ max
# data_x is a variable with shape [2, 4]
# the axis is a int element
data_x = np.array([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
x = paddle.to_variable(data_x)
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.max(x)
print(result1.numpy())
#[0.9]
......@@ -53,9 +52,8 @@ max
# data_y is a variable with shape [2, 2, 2]
# the axis is list
data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
y = paddle.to_variable(data_y)
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.max(y, axis=[1, 2])
print(result5.numpy())
#[4. 8.]
......
......@@ -58,39 +58,30 @@ maximum
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.to_tensor([[5, 6], [7, 8]])
res = paddle.maximum(x, y)
print(res.numpy())
#[[5. 6.]
# [7. 8.]]
x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
y_data = np.array([1, 2], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([1, 2])
res = paddle.maximum(x, y, axis=1)
print(res.numpy())
#[[[1. 2. 3.]
# [2. 2. 3.]]]
x_data = np.array([2, 3, 5], dtype=np.float32)
y_data = np.array([1, 4, np.nan], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.maximum(x, y)
print(res.numpy())
#[ 2. 4. nan]
x_data = np.array([5, 3, np.inf], dtype=np.float32)
y_data = np.array([1, 4, 5], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.maximum(x, y)
print(res.numpy())
#[ 5. 4. inf]
......@@ -26,16 +26,12 @@ min
::::::::::
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
# data_x is a variable with shape [2, 4]
# the axis is a int element
data_x = np.array([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
x = paddle.to_variable(data_x)
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.min(x)
print(result1.numpy())
#[0.1]
......@@ -50,11 +46,9 @@ min
#[[0.2]
# [0.1]]
# data_y is a variable with shape [2, 2, 2]
# the axis is list
data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
y = paddle.to_variable(data_y)
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.min(y, axis=[1, 2])
print(result5.numpy())
#[1. 5.]
......
......@@ -61,36 +61,28 @@ minimum
import numpy as np
paddle.disable_static()
x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
#[[1. 2.]
# [3. 4.]]
x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
y_data = np.array([1, 2], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32')
y = paddle.to_tensor([1, 2], dtype='float32')
res = paddle.minimum(x, y, axis=1)
print(res.numpy())
#[[[1. 1. 1.]
# [2. 2. 2.]]]
x_data = np.array([2, 3, 5], dtype=np.float32)
y_data = np.array([1, 4, np.nan], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
#[ 1. 3. nan]
x_data = np.array([5, 3, np.inf], dtype=np.float32)
y_data = np.array([1, 4, 5], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
#[1. 3. 5.]
......@@ -30,11 +30,9 @@ sqrt
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x_data = np.array([0.1, 0.2, 0.3, 0.4])
x = paddle.to_variable(x_data)
x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
out = paddle.sqrt(x)
print(out.numpy())
# [0.31622777 0.4472136 0.54772256 0.63245553]
......@@ -67,16 +67,10 @@ stack
.. code-block:: python
import paddle
import numpy as np
data1 = np.array([[1.0, 2.0]])
data2 = np.array([[3.0, 4.0]])
data3 = np.array([[5.0, 6.0]])
paddle.disable_static()
x1 = paddle.to_variable(data1)
x2 = paddle.to_variable(data2)
x3 = paddle.to_variable(data3)
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2]
......
......@@ -150,6 +150,7 @@ paddle.fluid.layers.bilinear_tensor_product paddle.static.nn.bilinear_tensor_pro
paddle.fluid.framework.name_scope paddle.static.name_scope
paddle.fluid.layers.is_empty paddle.is_empty,paddle.tensor.is_empty,paddle.tensor.logic.is_empty
paddle.tensor.math.multiply paddle.multiply,paddle.tensor.multiply
paddle.tensor.creation.Tensor paddle.Tensor
paddle.tensor.creation.to_tensor paddle.to_tensor,paddle.tensor.to_tensor
paddle.fluid.initializer.Normal paddle.nn.initializer.Normal
paddle.nn.layer.common.AlphaDropout paddle.nn.AlphaDropout,paddle.nn.layer.AlphaDropout
......@@ -378,7 +379,6 @@ paddle.tensor.manipulation.concat paddle.concat,paddle.tensor.concat
paddle.tensor.stat.std paddle.std,paddle.tensor.std
paddle.fluid.layers.dice_loss paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss
paddle.nn.functional.loss.binary_cross_entropy paddle.nn.functional.binary_cross_entropy
paddle.fluid.dygraph.base.to_variable paddle.to_variable,paddle.framework.to_variable
paddle.fluid.dygraph.Linear paddle.nn.Linear,paddle.nn.layer.Linear,paddle.nn.layer.common.Linear
paddle.fluid.layers.box_clip paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
paddle.nn.layer.activation.ReLU6 paddle.nn.ReLU6
......
to_tensor .. _api_paddle_to_tensor
train .. _api_paddle_dataset_wmt14_train:
roi_pool .. _api_paddle_fluid_layers_roi_pool:
expand .. _api_paddle_fluid_layers_expand:
......
......@@ -26,11 +26,8 @@ abs
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-1, -2, -3, -4]).astype(np.float32)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([-1, -2, -3, -4], dtype='float32')
res = paddle.abs(x)
print(res.numpy())
# [1, 2, 3, 4]
......@@ -27,11 +27,8 @@ arccosine函数。
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371])
res = paddle.acos(x)
print(res.numpy())
# [2.5293, 1.0573, 2.2711, 1.5336]
......@@ -26,11 +26,8 @@ arcsine函数。
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371])
res = paddle.asin(x)
print(res.numpy())
# [-0.9585, 0.5135, -0.7003, 0.0372]
......@@ -26,11 +26,8 @@ arctangent函数。
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371])
res = paddle.atan(x)
print(res.numpy())
# [-0.6858, 0.4566, -0.5724, 0.0371]
......@@ -28,11 +28,8 @@ ceil
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[-1.5,6],[1,15.6]]).astype(np.float32)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([[-1.5,6], [1,15.6]])
res = paddle.ceil(x)
print(res.numpy())
# [[-1. 6.]
......
......@@ -30,10 +30,8 @@ cos
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[-1,np.pi],[1,15.6]]).astype(np.float32)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([[-1, np.pi], [1, 15.6]], dtype='float32')
res = paddle.cos(x)
print(res.numpy())
# [[ 0.54030231 -1. ]
......
......@@ -34,9 +34,7 @@ hardshrink激活层。计算公式如下:
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_variable(np.array([-1, 0.3, 2.5]))
x = paddle.to_tensor([-1, 0.3, 2.5])
out = F.hardshrink(x) # [-1., 0., 2.5]
......@@ -40,14 +40,10 @@ l1_loss
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
......
......@@ -40,13 +40,11 @@ Tensor, 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype('float32'))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype('float32'))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype('float32'))
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75]
......@@ -58,8 +58,8 @@ mse_loss
# [array([0.04000002], dtype=float32)]
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
......
......@@ -38,8 +38,8 @@ nll_loss
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
......@@ -42,7 +42,7 @@ normalize
paddle.disable_static()
x = np.arange(6, dtype=np.float32).reshape(2,3)
x = paddle.to_variable(x)
x = paddle.to_tensor(x)
y = F.normalize(x)
print(y.numpy())
# [[0. 0.4472136 0.8944272 ]
......
......@@ -34,10 +34,7 @@ Hardshrink激活层
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_variable(np.array([-1, 0.3, 2.5]))
x = paddle.to_tensor([-1, 0.3, 2.5])
m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5]
......@@ -29,12 +29,9 @@ Sigmoid
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32')
m = paddle.nn.Sigmoid()
x = paddle.to_variable(input_data)
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
output = m(x)
print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376
......@@ -30,12 +30,9 @@ PairwiseDistance
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_np = np.array([[1., 3.], [3., 5.]]).astype(np.float64)
y_np = np.array([[5., 6.], [7., 8.]]).astype(np.float64)
x = paddle.to_variable(x_np)
y = paddle.to_variable(y_np)
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype='float64')
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype='float64')
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance.numpy()) # [5. 5.]
......
......@@ -39,14 +39,11 @@ L1Loss
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.loss.L1Loss()
output = l1_loss(input, label)
......
......@@ -63,8 +63,8 @@ MSELoss
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
......@@ -46,15 +46,11 @@ MarginRankingLoss
.. code-block:: python
import numpy as np
import paddle
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype("float32"))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype("float32"))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype("float32"))
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75]
......@@ -55,16 +55,16 @@ NLLLoss
log_softmax = paddle.nn.LogSoftmax(axis=1)
input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ],
[0.53331435, 0.07999352, 0.8549948 ],
[0.25879037, 0.39530203, 0.698465 ],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32)
[0.53331435, 0.07999352, 0.8549948 ],
[0.25879037, 0.39530203, 0.698465 ],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32)
label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64)
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
......@@ -52,7 +52,7 @@ MaxPool1d
import numpy as np
paddle.disable_static()
data = paddle.to_variable(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0)
pool_out = MaxPool1d(data)
# pool_out shape: [1, 3, 16]
......
此差异已折叠。
.. _cn_api_paddle_to_tensor:
to_tensor
-------------------------------
.. py:function:: paddle.to_tensor(data, dtype=None, place=None, stop_gradient=True)
该API通过已知的 ``data`` 来创建一个 tensor,tensor类型为 ``paddle.Tensor`` 或 ``paddle.ComplexTensor`` 。
``data`` 可以是 scalar,tuple,list,numpy\.ndarray,paddle\.Tensor,paddle\.ComplexTensor。
如果 ``data`` 已经是一个tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 tensor 的拷贝并返回原来的 tensor。
否则会创建一个新的tensor,且不保留原来计算图。
``ComplexTensor`` 是Paddle特有的数据类型。对于 ``ComplexTensor`` ``x`` , ``x.real`` 表示实部,``x.imag`` 表示虚部。
参数:
- **data** (scalar|tuple|list|ndarray|Tensor|ComplexTensor) - 初始化tensor的数据,可以是
scalar,list,tuple,numpy\.ndarray,paddle\.Tensor,paddle\.ComplexTensor类型。
- **dtype** (str, optional) - 创建tensor的数据类型,可以是 'bool' ,'float16','float32',
'float64' ,'int8','int16','int32','int64','uint8'。如果创建的是 ``ComplexTensor`` ,
则dtype还可以是 'complex64','complex128'。默认值为None,如果 ``data`` 为python浮点类型,则从
:ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``data`` 为其他类型,
则会自动推导类型。
- **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace, optional) - 创建tensor的设备位置,可以是
CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为None,使用全局的place。
- **stop_gradient** (bool, optional) - 是否阻断Autograd的梯度传导。默认值为True,此时不进行梯度传传导。
返回:通过 ``data`` 创建的 tensor。其类型为 ``paddle.Tensor`` 或 ``paddle.ComplexTensor``
抛出异常:
- ``TypeError``: 当 ``data`` 不是 scalar,list,tuple,numpy.ndarray,paddle.Tensor或paddle.ComplexTensor类型时
- ``ValueError``: 当 ``data`` 是包含不等长子序列的tuple或list时, 例如[[1, 2], [3, 4, 5]]
- ``TypeError``: 当 ``dtype`` 不是 bool,float16,float32,float64,int8,int16,int32,int64,uint8,complex64,complex128时
- ``ValueError``: 当 ``place`` 不是 paddle.CPUPlace,paddle.CUDAPinnedPlace,paddle.CUDAPlace时
**代码示例**:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
type(paddle.to_tensor(1))
# <class 'paddle.Tensor'>
paddle.to_tensor(1)
# Tensor: generated_tensor_0
# - place: CUDAPlace(0) # allocate on global default place CPU:0
# - shape: [1]
# - layout: NCHW
# - dtype: int64_t
# - data: [1]
x = paddle.to_tensor(1)
paddle.to_tensor(x, dtype='int32', place=paddle.CPUPlace()) # A new tensor will be constructed due to different dtype or place
# Tensor: generated_tensor_01
# - place: CPUPlace
# - shape: [1]
# - layout: NCHW
# - dtype: int
# - data: [1]
paddle.to_tensor((1.1, 2.2), place=paddle.CUDAPinnedPlace())
# Tensor: generated_tensor_1
# - place: CUDAPinnedPlace
# - shape: [2]
# - layout: NCHW
# - dtype: double
# - data: [1.1 2.2]
paddle.to_tensor([[0.1, 0.2], [0.3, 0.4]], place=paddle.CUDAPlace(0), stop_gradient=False)
# Tensor: generated_tensor_2
# - place: CUDAPlace(0)
# - shape: [2, 2]
# - layout: NCHW
# - dtype: double
# - data: [0.1 0.2 0.3 0.4]
type(paddle.to_tensor([[1+1j, 2], [3+2j, 4]]), dtype='complex64')
# <class 'paddle.ComplexTensor'>
paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64')
# ComplexTensor[real]: generated_tensor_0.real
# - place: CUDAPlace(0)
# - shape: [2, 2]
# - layout: NCHW
# - dtype: float
# - data: [1 2 3 4]
# ComplexTensor[imag]: generated_tensor_0.imag
# - place: CUDAPlace(0)
# - shape: [2, 2]
# - layout: NCHW
# - dtype: float
# - data: [1 0 2 0]
\ No newline at end of file
......@@ -31,7 +31,7 @@ dot
paddle.disable_static()
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.dot(x, y)
print(z.numpy())
......@@ -67,16 +67,10 @@ stack
.. code-block:: python
import paddle
import numpy as np
data1 = np.array([[1.0, 2.0]])
data2 = np.array([[3.0, 4.0]])
data3 = np.array([[5.0, 6.0]])
paddle.disable_static()
x1 = paddle.to_variable(data1)
x2 = paddle.to_variable(data2)
x3 = paddle.to_variable(data3)
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2]
......
......@@ -25,16 +25,12 @@ max
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
# data_x is a variable with shape [2, 4]
# the axis is a int element
data_x = np.array([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
x = paddle.to_variable(data_x)
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]]))
result1 = paddle.max(x)
print(result1.numpy())
#[0.9]
......@@ -49,11 +45,9 @@ max
#[[0.9]
# [0.7]]
# data_y is a variable with shape [2, 2, 2]
# the axis is list
data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
y = paddle.to_variable(data_y)
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.max(y, axis=[1, 2])
print(result5.numpy())
#[4. 8.]
......
......@@ -56,39 +56,30 @@ maximum
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.to_tensor([[5, 6], [7, 8]])
res = paddle.maximum(x, y)
print(res.numpy())
#[[5. 6.]
# [7. 8.]]
x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
y_data = np.array([1, 2], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([1, 2])
res = paddle.maximum(x, y, axis=1)
print(res.numpy())
#[[[1. 2. 3.]
# [2. 2. 3.]]]
x_data = np.array([2, 3, 5], dtype=np.float32)
y_data = np.array([1, 4, np.nan], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.maximum(x, y)
print(res.numpy())
#[ 2. 4. nan]
x_data = np.array([5, 3, np.inf], dtype=np.float32)
y_data = np.array([1, 4, 5], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.maximum(x, y)
print(res.numpy())
#[ 5. 4. inf]
......@@ -24,16 +24,12 @@ min
::::::::::
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
# data_x is a variable with shape [2, 4]
# the axis is a int element
data_x = np.array([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
x = paddle.to_variable(data_x)
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.min(x)
print(result1.numpy())
#[0.1]
......@@ -48,11 +44,9 @@ min
#[[0.2]
# [0.1]]
# data_y is a variable with shape [2, 2, 2]
# the axis is list
data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
y = paddle.to_variable(data_y)
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.min(y, axis=[1, 2])
print(result5.numpy())
#[1. 5.]
......
......@@ -56,39 +56,30 @@ minimum
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
#[[1. 2.]
# [3. 4.]]
x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
y_data = np.array([1, 2], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32')
y = paddle.to_tensor([1, 2], dtype='float32')
res = paddle.minimum(x, y, axis=1)
print(res.numpy())
#[[[1. 1. 1.]
# [2. 2. 2.]]]
x_data = np.array([2, 3, 5], dtype=np.float32)
y_data = np.array([1, 4, np.nan], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
#[ 1. 3. nan]
x_data = np.array([5, 3, np.inf], dtype=np.float32)
y_data = np.array([1, 4, 5], dtype=np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
#[1. 3. 5.]
......@@ -8,6 +8,8 @@ PaddlePaddle (PArallel Distributed Deep LEarning)是一个易用、高效、灵
让我们从学习PaddlePaddle基本概念这里开始:
- `Tensor概念介绍 <tensor_introduction.html>`_ : 飞桨中数据的表示方式,Tensor概念介绍,
- `版本迁移 <./migration_cn.html>`_:介绍 Paddle 1 到Paddle 2的变化与Paddle1to2转换工具的使用。
- `动态图转静态图 <./dygraph_to_static/index_cn.html>`_:介绍 Paddle 动态图转静态图的方法
- `模型存储与载入 <./model_save_load_cn.html>`_:介绍 Paddle 模型与参数存储载入的方法
......@@ -16,6 +18,7 @@ PaddlePaddle (PArallel Distributed Deep LEarning)是一个易用、高效、灵
.. toctree::
:hidden:
tensor_introduction.md
migration_cn.rst
dygraph_to_static/index_cn.rst
model_save_load_cn.rst
......@@ -9,11 +9,13 @@ Please refer to `PaddlePaddle Github <https://github.com/PaddlePaddle/Paddle>`_
Let's start with studying basic concept of PaddlePaddle:
- `Introduction to Tensor <tensor_introduction_en.html>`_ : Introduction of Tensor, which is the representation of data in Paddle.
- `migration tools <./migration_en.html>`_:how to use migration tools to upgrade your code.
- `dynamic to static <./dygraph_to_static/index_en.html>`_:how to convert your model from dynamic graph to static graph.
.. toctree::
:hidden:
tensor_introduction_en.md
migration_en.rst
dynamic_to_static/index_en.rst
# Tensor概念介绍
飞桨(PaddlePaddle,以下简称Paddle)和其他深度学习框架一样,使用**Tensor**来表示数据,在神经网络中传递的数据均为**Tensor**
**Tensor**可以将其理解为多维数组,其可以具有任意多的维度,不同**Tensor**可以有不同的**数据类型** (dtype) 和**形状** (shape)。
同一**Tensor**的中所有元素的dtype均相同。如果你对 [Numpy](https://www.paddlepaddle.org.cn/tutorials/projectdetail/590690) 熟悉,**Tensor**是类似于 **Numpy array** 的概念。
### 目录
* [Tensor的创建](#1)
* [Tensor的shape](#2)
* [Tensor其他属性](#3)
* [Tensor的操作](#4)
----------
## <h2 id="1">Tensor的创建</h2>
首先,让我们开始创建一个 **Tensor** :
### 1. 创建类似于vector的**1-D Tensor**,其rank为1
```python
# 可通过dtype来指定Tensor数据类型,否则会创建float32类型的Tensor
rank_1_tensor = paddle.to_tensor([2.0, 3.0, 4.0], dtype='float64')
print(rank_1_tensor)
```
```text
Tensor: generated_tensor_1
- place: CUDAPlace(0)
- shape: [3]
- layout: NCHW
- dtype: double
- data: [2.0, 3.0, 4.0]
```
特殊地,如果仅输入单个scalar类型数据(例如float/int/bool类型的单个元素),则会创建shape为[1]的**Tensor**
```python
paddle.to_tensor(2)
paddle.to_tensor([2])
```
上述两种创建方式完全一致,shape均为[1],输出如下:
```text
Tensor: generated_tensor_0
- place: CUDAPlace(0)
- shape: [1]
- layout: NCHW
- dtype: int32_t
- data: [2]
```
### 2. 创建类似于matrix的**2-D Tensor**,其rank为2
```python
rank_2_tensor = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
print(rank_2_tensor)
```
```text
Tensor: generated_tensor_2
- place: CUDAPlace(0)
- shape: [2, 3]
- layout: NCHW
- dtype: double
- data: [1.0 2.0 3.0 4.0 5.0 6.0]
```
### 3. 同样地,还可以创建rank为3、4...N等更复杂的多维Tensor
```
# Tensor可以有任意数量的轴(也称为维度)
rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]],
[[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]]])
print(rank_3_tensor)
```
```text
Tensor: generated_tensor_3
- place: CUDAPlace(0)
- shape: [2, 2, 5]
- layout: NCHW
- dtype: double
- data: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20]
```
上述不同rank的**Tensor**可以可视化的表示为:
<center><img src="https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/Tensor_2.0.png?raw=true" width="600"></center>
<br><center>图1 不同rank的Tensor可视化表示</center>
你可以通过 Tensor.numpy() 方法方便地将 **Tensor** 转化为 **Numpy array**
```python
print(rank_2_tensor.numpy())
```
```text
array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]], dtype=float32)
```
**Tensor**不仅支持 floats、ints 类型数据,也支持 complex numbers 数据:
```python
rank_2_complex_tensor = paddle.to_tensor([[1+1j, 2+2j],
[3+3j, 4+4j]])
```
```text
CompleTensor[real]: generated_tensor_0.real
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [1 2 3 4]
CompleTensor[imag]: generated_tensor_0.real
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [1 2 3 4]
```
如果检测到输入数据包含complex numbers,则会自动创建一个**ComplexTensor****ComplexTensor**是Paddle中一种特殊的数据结构,
其包含实部(real)与虚部(imag)两个形状与数据类型相同的**Tensor**,其结构可视化表示为:
<center><img src="https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/ComplexTensor_2.0.png?raw=true" width="600" ></center>
<br><center>图2 ComplexTensor的可视化表示</center>
**Tensor**必须形状规则,类似于“矩形”的概念,也就是,沿任何一个轴(也称作维度)上,元素的数量都是相等的,如果为以下情况:
```
rank_2_tensor = paddle.to_tensor([[1.0, 2.0],
[4.0, 5.0, 6.0]])
```
该情况下将会抛出异常:
```text
ValueError:
Faild to convert input data to a regular ndarray :
- Usually this means the input data contains nested lists with different lengths.
```
上面介绍了通过Python数据来创建**Tensor**的方法,我们也可以通过 **Numpy array** 来创建**Tensor**
```python
rank_1_tensor = paddle.to_tensor(Numpy array([1.0, 2.0]))
rank_2_tensor = paddle.to_tensor(Numpy array([[1.0, 2.0],
[3.0, 4.0]]))
rank_3_tensor = paddle.to_tensor(numpy.random.rand(3, 2))
```
创建的 **Tensor** 与原 **Numpy array** 具有相同的 shape 与 dtype。
如果要创建一个指定shape的**Tensor**,Paddle也提供了一些API:
```text
paddle.zeros([m, n]) # 创建数据全为0,shape为[m, n]的Tensor
paddle.ones([m, n]) # 创建数据全为1,shape为[m, n]的Tensor
paddle.full([m, n], 10) # 创建数据全为10,shape为[m, n]的Tensor
paddle.arrange(start, end, step) # 创建从start到end,步长为step的Tensor
paddle.linspace(start, end, num) # 创建从start到end,元素个数固定为num的Tensor
```
----------
## <h2 id="2">Tensor的shape</h2>
### 基本概念
查看一个**Tensor**的形状可以通过 **Tensor.shape**,shape是 **Tensor** 的一个重要属性,以下为相关概念:
1. shape:描述了tensor的每个维度上的元素数量
2. rank: tensor的维度的数量,例如vector的rank为1,matrix的rank为2.
3. axis或者dimension:指tensor某个特定的维度
4. size:指tensor中全部元素的个数
让我们来创建1个4-D **Tensor**,并通过图形来直观表达以上几个概念之间的关系;
```python
rank_4_tensor = paddle.ones([2, 3, 4, 5])
```
<center><img src="https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/Axis_2.0.png?raw=true" width="600" ></center>
<br><center>图3 Tensor的shape、axis、dimension、rank之间的关系</center>
```python
print("Data Type of every element:", rank_4_tensor.dtype)
print("Number of dimensions:", rank_4_tensor.ndim)
print("Shape of tensor:", rank_4_tensor.shape)
print("Elements number along axis 0 of tensor:", rank_4_tensor.shape[0])
print("Elements number along the last axis of tensor:", rank_4_tensor.shape[-1])
```
```text
Data Type of every element: VarType.FP32
Number of dimensions: 4
Shape of tensor: [2, 3, 4, 5]
Elements number along axis 0 of tensor: 2
Elements number along the last axis of tensor: 5
```
### 索引
通过索引能方便地对Tensor进行“切片”操作。Paddle使用标准的 Python索引规则 与 Numpy索引规则,与[ndexing a list or a string in Python](https://docs.python.org/3/tutorial/introduction.html#strings)类似。具有以下特点:
1. 如果索引为负数,则从尾部开始计算
2. 如果索引使用 ``:`` ,则其对应格式为start: stop: step,其中start、stop、step均可缺省
* 针对1-D **Tensor**,则仅有单个轴上的索引:
```python
rank_1_tensor = paddle.to_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8])
print("Origin Tensor:", rank_1_tensor.numpy())
print("First element:", rank_1_tensor[0].numpy())
print("Last element:", rank_1_tensor[-1].numpy())
print("All element:", rank_1_tensor[:].numpy())
print("Before 3:", rank_1_tensor[:3].numpy())
print("From 6 to the end:", rank_1_tensor[6:].numpy())
print("From 3 to 6:", rank_1_tensor[3:6].numpy())
print("Interval of 3:", rank_1_tensor[::3].numpy())
print("Reverse:", rank_1_tensor[::-1].numpy())
```
```text
Origin Tensor: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=int64)
First element: [0]
Last element: [8]
All element: [0 1 2 3 4 5 6 7 8]
Before 3: [0 1 2]
From 6 to the end: [6 7 8]
From 3 to 6: [3 4 5]
Interval of 3: [0 3 6]
Reverse: [8 7 6 5 4 3 2 1 0]
```
* 针对2-D及以上的 **Tensor**,则会有多个轴上的索引:
```python
rank_2_tensor = paddle.to_tensor([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]])
print("Origin Tensor:", rank_2_tensor.numpy())
print("First row:", rank_2_tensor[0].numpy())
print("First row:", rank_2_tensor[0, :].numpy())
print("First column:", rank_2_tensor[:, 0].numpy())
print("Last column:", rank_2_tensor[:, -1].numpy())
print("All element:", rank_2_tensor[:].numpy())
print("First row and second column:", rank_2_tensor[0, 1].numpy())
```
```text
Origin Tensor: array([[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]], dtype=int64)
First row: [0 1 2 3]
First row: [0 1 2 3]
First column: [0 4 8]
Last column: [ 3 7 11]
All element: [[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
First row and second column: [1]
```
输入索引的第一个值对应axis 0,第二个值对应axis 1,以此类推,如果某个axis上未指定索引,则默认为 ``:`` 。例如:
```
rank_3_tensor[1]
rank_3_tensor[1, :]
rank_3_tensor[1, :, :]
```
以上三种索引的结果是完全相同的。
### 对shape进行操作
重新定义**Tensor**的shape在实际编程中具有重要意义。
```python
rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]],
[[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]],
[[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30]]])
print("the shape of rank_3_tensor:", rank_3_tensor.shape)
```
```text
the shape of rank_3_tensor: [3, 2, 5]
```
Paddle提供了reshape接口来改变Tensor的shape:
```python
rank_3_tensor = paddle.reshape(rank_3_tensor, [2, 5, 3])
print("After reshape:", rank_3_tensor.shape)
```
```text
After reshape: [2, 5, 3]
```
在指定新的shape时存在一些技巧:
**1.** -1 表示这个维度的值是从Tensor的元素总数和剩余维度推断出来的。因此,有且只有一个维度可以被设置为-1。
**2.** 0 表示实际的维数是从Tensor的对应维数中复制出来的,因此shape中0的索引值不能超过x的维度。
有一些例子可以很好解释这些技巧:
```text
origin:[3, 2, 5] reshape:[3, 10] actual: [3, 10]
origin:[3, 2, 5] reshape:[-1] actual: [30]
origin:[3, 2, 5] reshape:[0, 5, -1] actual: [3, 5, 2]
```
可以发现,reshape为[-1]时,会将tensor按其在计算机上的内存分布展平为1-D Tensor。
```python
print("Tensor flattened to Vector:", paddle.reshape(rank_3_tensor, [-1]).numpy())
```
```text
Tensor flattened to Vector: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30]
```
----------
## <h2 id="3">Tensor其他属性</h2>
### Tensor的dtype
**Tensor**的数据类型,可以通过 Tensor.dtype 来查看,dtype支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64'。
* 通过Python元素创建的Tensor,可以通过dtype来进行指定,如果未指定:
* 对于python整型数据,则会创建int64型Tensor
* 对于python浮点型数据,默认会创建float32型Tensor,并且可以通过set_default_type来调整浮点型数据的默认类型。
* 通过Numpy array创建的Tensor,则与其原来的dtype保持相同。
```python
print("Tensor dtype from Python integers:", paddle.to_tensor(1).dtype)
print("Tensor dtype from Python floating point:", paddle.to_tensor(1.0).dtype)
```
```text
Tensor dtype from Python integers: VarType.INT64
Tensor dtype from Python floating point: VarType.FP32
```
Paddle提供了**cast**接口来改变dtype:
```python
float32_tensor = paddle.to_tensor(1.0)
float64_tensor = paddle.cast(float32_tensor, dtype='float64')
print("Tensor after cast to float64:", float64_tensor.dtype)
int64_tensor = paddle.cast(float32_tensor, dtype='int64')
print("Tensor after cast to int64:", int64_tensor.dthpe)
```
```text
Tensor after cast to float64: VarType.FP64
Tensor after cast to int64: VarType.INT64
```
### Tensor的place
初始化**Tensor**时可以通过**place**来指定其分配的设备位置,可支持的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存,其与GPU之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能,因为其减少了用于存储虚拟内存数据的可分页内存。
* **创建CPU上的Tensor**
```python
cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace())
print(cpu_tensor)
```
```text
Tensor: generated_tensor_0
- place: CPUPlace
```
* **创建GPU上的Tensor**
```python
gpu_tensor = paddle.to_tensor(1, place=paddle.CUDAPlace(0))
print(gpu_tensor)
```
```text
Tensor: generated_tensor_0
- place: CUDAPlace(0)
```
* **创建固定内存上的Tensor**
```python
pin_memory_tensor = paddle.to_tensor(1, place=paddle.CUDAPinnedPlace())
print(pin_memory_tensor)
```
```text
Tensor: generated_tensor_0
- place: CUDAPinnedPlace
```
### Tensor的name
Tensor的name是其唯一的标识符,为python 字符串类型,查看一个Tensor的name可以通过Tensor.name属性。默认地,在每个Tensor创建时,Paddle会自定义一个独一无二的name。
```python
print("Tensor name:", paddle.to_tensor(1).name)
```
```text
Tensor name: generated_tensor_0
```
----------
## <h2 id="4">Tensor的操作</h2>
Paddle提供了丰富的Tensor操作的API,包括数学运算符、逻辑运算符、线性代数相关等100+余种API,这些API调用有两种方法:
```python
x = paddle.to_tensor([[1.1, 2.2], [3.3, 4.4]])
y = paddle.to_tensor([[5.5, 6.6], [7.7, 8.8]])
print(paddle.add(x, y), "\n")
print(x.add(y), "\n")
```
```text
Tensor: eager_tmp_2
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [6.6 8.8 11 13.2]
Tensor: eager_tmp_3
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [6.6 8.8 11 13.2]
```
可以看出,使用 **Tensor类成员函数****paddle API** 具有相同的效果,由于 **类成员函数** 操作更为方便,以下均从 **Tensor类成员函数** 的角度,对常用**Tensor**操作进行介绍。
#### 数学运算符
```python
x.abs() #绝对值
x.ceil() #向上取整
x.floor() #向下取整
x.exp() #逐元素计算自然常数为底的指数
x.log() #逐元素计算x的自然对数
x.reciprocal() #求倒数
x.square() #逐元素计算平方
x.sqrt() #逐元素计算平方根
x.sum() #计算所有元素的和
x.asin() #逐元素计算反正弦函数
x.add(y) #逐元素相加
x.add(-y) #逐元素相减
x.multiply(y) #逐元素相乘
x.divide(y) #逐元素相除
x.floor_divide(y) #逐元素相除并取整
x.remainder(y) #逐元素相除并取余
x.pow(y) #逐元素幂运算
x.reduce_max() #所有元素最大值,可以指定维度
x.reduce_min() #所有元素最小值,可以指定维度
x.reduce_prod() #所有元素累乘,可以指定维度
x.reduce_sum() #所有元素的和,可以指定维度
```
Paddle对python数学运算相关的魔法函数进行了重写,以下操作与上述结果相同。
```text
x + y -> x.add(y) #逐元素相加
x - y -> x.add(-y) #逐元素相减
x * y -> x.multiply(y) #逐元素相乘
x / y -> x.divide(y) #逐元素相除
x // y -> x.floor_divide(y) #逐元素相除并取整
x % y -> x.remainder(y) #逐元素相除并取余
x ** y -> x.pow(y) #逐元素幂运算
```
#### 逻辑运算符
```python
x.is_empty() #判断tensor是否为空
x.isfinite() #判断tensor中元素是否是有限的数字,即不包括inf与nan
x.euqal_all(y) #判断两个tensor的所有元素是否相等
x.euqal(y) #判断两个tensor的每个元素是否相等
x.not_equal(y) #判断两个tensor的每个元素是否不相等
x.less_than(y) #判断tensor x的元素是否小于tensor y的对应元素
x.less_equal(y) #判断tensor x的元素是否小于或等于tensor y的对应元素
x.greater_than(y) #判断tensor x的元素是否大于tensor y的对应元素
x.greater_equal(y) #判断tensor x的元素是否大于或等于tensor y的对应元素
```
同样地,Paddle对python逻辑比较相关的魔法函数进行了重写,以下操作与上述结果相同。
```text
x == y -> x.euqal(y) #判断两个tensor的每个元素是否相等
x != y -> x.not_equal(y) #判断两个tensor的每个元素是否不相等
x < y -> x.less_than(y) #判断tensor x的元素是否小于tensor y的对应元素
x <= y -> x.less_equal(y) #判断tensor x的元素是否小于或等于tensor y的对应元素
x > y -> x.greater_than(y) #判断tensor x的元素是否大于tensor y的对应元素
x >= y -> x.greater_equal(y) #判断tensor x的元素是否大于或等于tensor y的对应元素
```
以下操作仅针对bool型Tensor:
```python
x.reduce_all() #判断一个bool型tensor是否所有元素为True
x.reduce_any() #判断一个bool型tensor是否存在至少1个元素为True
x.logical_and(y) #对两个bool型tensor逐元素进行逻辑与操作
x.logical_or(y) #对两个bool型tensor逐元素进行逻辑或操作
x.logical_xor(y) #对两个bool型tensor逐元素进行逻辑亦或操作
x.logical_not(y) #对两个bool型tensor逐元素进行逻辑非操作
```
#### 线性代数相关
```python
x.cholesky() #矩阵的cholesky分解
x.t() #矩阵转置
x.transpose([1, 0]) #交换axis 0 与axis 1的顺序
x.norm('pro') #矩阵的Frobenius 范数
x.dist(y, p=2) #矩阵(x-y)的2范数
x.matmul(y) #矩阵乘法
```
需要注意,Paddle中Tensor的操作符均为非inplace操作,即 ``x.add(y)`` 不会在**tensor x**上直接进行操作,而会返回一个新的**Tensor**来表示运算结果。
更多Tensor操作相关的API,请参考[class paddle.Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/creation/Tensor_cn.html)
# Introduction to Tensor
PaddlePaddle(Hereinafter referred to as Paddle) is the same as other Deep learning Framework, it use **Tensor** to
representing data.
**Tensor** can be regarded as multi-dimensional array, which can have as many diemensions as it want. Different **Tensor** can have different data types(dtype) and shape.
The dtypes of all elements in the same Tensor are the same. If you are familiar with [Numpy](https://www.paddlepaddle.org.cn/tutorials/projectdetail/590690), **Tensor** is similar to the **Numpy array**.
### Contents
* [Creation of Tensor](#1)
* [Shape of Tensor](#2)
* [Other attributes of Tensor](#3)
* [Method of Tensor](#4)
----------
## <h2 id="1">Creation of Tensor</h2>
Firstly, let we create a **Tensor**:
### 1. create **1-D Tensor** like vector, whose rank is 1
```python
# The Tensor data type can be specified by dtype, otherwise, float32 Tensor will be created
rank_1_tensor = paddle.to_tensor([2.0, 3.0, 4.0], dtype='float64')
print(rank_1_tensor)
```
```text
Tensor: generated_tensor_1
- place: CUDAPlace(0)
- shape: [3]
- layout: NCHW
- dtype: double
- data: [2.0, 3.0, 4.0]
```
Specifically, if you imput only a scalar data (for example, float/int/bool), then a **Tensor** whose shape is [1]will be created.
```python
paddle.to_tensor(2)
paddle.to_tensor([2])
```
The above two are completely the same, Tensor shape is [1]:
```text
Tensor: generated_tensor_0
- place: CUDAPlace(0)
- shape: [1]
- layout: NCHW
- dtype: int32_t
- data: [2]
```
### 2. create **2-D Tensor** like matrix, whose rank is 2
```python
rank_2_tensor = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
print(rank_2_tensor)
```
```text
Tensor: generated_tensor_2
- place: CUDAPlace(0)
- shape: [2, 3]
- layout: NCHW
- dtype: double
- data: [1.0 2.0 3.0 4.0 5.0 6.0]
```
### 3. Similarly, you can create multidimensional Tensor whose rank is 3, 4... N
```
# There can be an arbitrary number of axes (sometimes called "dimensions")
rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]],
[[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]]])
print(rank_3_tensor)
```
```text
Tensor: generated_tensor_3
- place: CUDAPlace(0)
- shape: [2, 2, 5]
- layout: NCHW
- dtype: double
- data: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20]
```
The visual representation of the **Tensor* above is:
<center><img src="https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/Tensor_2.0.png?raw=true" width="600" ></center>
<br><center>Figure1. Visual representation of Tensor with different ranks</center>
You can convert **Tensor** to Numpy array easily Tensor.numpy() method.
```python
print(rank_2_tensor.numpy())
```
```text
array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]], dtype=float32)
```
**Tensor supports not only floats and ints but also complex Numbers data:
```python
rank_2_complex_tensor = paddle.to_tensor([[1+1j, 2+2j],
[3+3j, 4+4j]])
```
```text
CompleTensor[real]: generated_tensor_0.real
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [1 2 3 4]
CompleTensor[imag]: generated_tensor_0.real
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [1 2 3 4]
```
If the input data contains complex Number, a **ComplexTensor** is automatically created. **ComplexTensor** is a special data structure in Paddle. **ComplexTensor** consists of two **Tensor**, one is real part and the other is imaginary part. **ComplexTensor** can be visualized as follows:
<center><img src="https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/ComplexTensor_2.0.png?raw=true" width="600" ></center>
<br><center>Figure2. Visual representation of ComplexTensor</center>
**Tensor** must be "rectangular" -- that is, along each axis, every element is the same size. For example:
```
rank_2_tensor = paddle.to_tensor([[1.0, 2.0],
[4.0, 5.0, 6.0]])
```
An exception will be thrown in this case:
```text
ValueError:
Faild to convert input data to a regular ndarray :
- Usually this means the input data contains nested lists with different lengths.
```
The way to create **Tensor** from Python data is described above. We can also create **Tensor**
from numpy array:
```python
rank_1_tensor = paddle.to_tensor(numpy.array([1.0, 2.0]))
rank_2_tensor = paddle.to_tensor(numpy.array([[1.0, 2.0],
[3.0, 4.0]]))
rank_3_tensor = paddle.to_tensor(numpy.random.rand(3, 2))
```
The created **Tensor** will have the same shape and dtype with the original Numpy array.
If you want to create a **Tensor** with specific size, Paddle also provide these API:
```text
paddle.zeros([m, n]) # All elements: 0, Shape: [m, n]
paddle.ones([m, n]) # All elements: 1, Shape: [m, n]
paddle.full([m, n], 10) # All elements: 10, Shape: [m, n]
paddle.arrange(start, end, 2) # Elements: from start to end, step size is 2
paddle.linspace(start, end, 10) # Elements: from start to end, num of elementwise is 10
```
----------
## <h2 id="2">Shape of Tensor</h2>
### Basic Concept
The shape of **Tensor** can be get by **Tensor.shape**. shape is an important attribute of **Tensor**, and the following are related concepts:
1. shape: Describes the number of elements on each of the tensor's dimensions.
2. rank: The number of tensor's dimensions. For example, the rank of vector is 1, the rank of matrix is 2.
3. axis or dimension: A particular dimension of a tensor.
4. size: The number of all elements in the tensor.
Let we create a 4-D **Tensor**, and visualize it to represents the relationship between the above concepts.
```python
rank_4_tensor = paddle.ones([2, 3, 4, 5])
```
<center><img src="https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/Axis_2.0.png?raw=true" width="600" ></center>
<br><center>Figure3. The relationship between Tensor shape, axis, dimension and rank</center>
```python
print("Data Type of every element:", rank_4_tensor.dtype)
print("Number of dimensions:", rank_4_tensor.ndim)
print("Shape of tensor:", rank_4_tensor.shape)
print("Elements number along axis 0 of tensor:", rank_4_tensor.shape[0])
print("Elements number along the last axis of tensor:", rank_4_tensor.shape[-1])
```
```text
Data Type of every element: VarType.FP32
Number of dimensions: 4
Shape of tensor: [2, 3, 4, 5]
Elements number along axis 0 of tensor: 2
Elements number along the last axis of tensor: 5
```
### indexing
Paddle follows standard Python indexing rules, similar to[ndexing a list or a string in Python](https://docs.python.org/3/tutorial/introduction.html#strings) and the basic rules for NumPy indexing. indexing is used to work on Tensor "slice". It has following characteristics:
1. negative indices count backwards from the end
2. colons, : , are used for slices: start:stop:step
For **1-D Tensor**, there is only single-axis indexing:
```python
rank_1_tensor = paddle.to_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8])
print("Origin Tensor:", rank_1_tensor.numpy())
print("First element:", rank_1_tensor[0].numpy())
print("Last element:", rank_1_tensor[-1].numpy())
print("All element:", rank_1_tensor[:].numpy())
print("Before 3:", rank_1_tensor[:3].numpy())
print("From 6 to the end:", rank_1_tensor[6:].numpy())
print("From 3 to 6:", rank_1_tensor[3:6].numpy())
print("Interval of 3:", rank_1_tensor[::3].numpy())
print("Reverse:", rank_1_tensor[::-1].numpy())
```
```text
Origin Tensor: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=int64)
First element: [0]
Last element: [8]
All element: [0 1 2 3 4 5 6 7 8]
Before 3: [0 1 2]
From 6 to the end: [6 7 8]
From 3 to 6: [3 4 5]
Interval of 3: [0 3 6]
Reverse: [8 7 6 5 4 3 2 1 0]
```
For 2-D **Tensor** or above, there is multi-axis indexing:
```python
rank_2_tensor = paddle.to_tensor([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]])
print("Origin Tensor:", rank_2_tensor.numpy())
print("First row:", rank_2_tensor[0].numpy())
print("First row:", rank_2_tensor[0, :].numpy())
print("First column:", rank_2_tensor[:, 0].numpy())
print("Last column:", rank_2_tensor[:, -1].numpy())
print("All element:", rank_2_tensor[:].numpy())
print("First row and second column:", rank_2_tensor[0, 1].numpy())
```
```text
Origin Tensor: array([[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]], dtype=int64)
First row: [0 1 2 3]
First row: [0 1 2 3]
First column: [0 4 8]
Last column: [ 3 7 11]
All element: [[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
First row and second column: [1]
```
The first element of index is corresponds to Axis 0, the second is corresponds to Axis 1, and so on. If no index is specified on an Axis, the default is ':' . For example:
```
rank_3_tensor[1]
rank_3_tensor[1, :]
rank_3_tensor[1, :, :]
```
These three are exactly the same.
### Manipulating Shape
Manipulating shape of Tensor is important in programming.
```python
rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]],
[[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]],
[[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30]]])
print("the shape of rank_3_tensor:", rank_3_tensor.shape)
```
```text
the shape of rank_3_tensor: [3, 2, 5]
```
Paddle provides reshape API to manipulate the shape of Tensor:
```python
rank_3_tensor = paddle.reshape(rank_3_tensor, [2, 5, 3])
print("After reshape:", rank_3_tensor.shape)
```
```text
After reshape: [2, 5, 3]
```
There are some tricks for specifying a new shape:
1. -1 indicates that the value of this dimension is inferred from the total number of elements and the other dimension of Tensor. Therefore, there is one and only one that can be set to -1.
2. 0 means that the actual dimension is copied from the corresponding dimension of Tensor, so the index value of 0 in shape can't exceed the rank of X.
For example:
```text
origin:[3, 2, 5] reshape:[3, 10] actual: [3, 10]
origin:[3, 2, 5] reshape:[-1] actual: [30]
origin:[3, 2, 5] reshape:[0, 5, -1] actual: [3, 5, 2]
```
If you flatten a tensor by reshape to -1, you can see what order it is laid out in memory.
```python
print("Tensor flattened to Vector:", paddle.reshape(rank_3_tensor, [-1]).numpy())
```
```text
Tensor flattened to Vector: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30]
```
----------
## <h2 id="3">Other attributes of Tensor</h2>
### dtype of Tensor
data type of **Tensor**, which can be get from Tensor.dtype, it support 'bool', 'float16', 'float32', 'float64','uint8', 'int8', 'int16', 'int32', 'int64'.
* If create Tensor from Python elements, the data type can be specified by dtype. Otherwise:
* For python integer data, it will create int64 Tensor
* For python floats number, it will create float32 Tensor by default. You can change default dtype by set_default_type.
* If create Tensor from Numpy array, the data type remains the same with origin dtype.
```python
print("Tensor dtype from Python integers:", paddle.to_tensor(1).dtype)
print("Tensor dtype from Python floating point:", paddle.to_tensor(1.0).dtype)
```
```text
Tensor dtype from Python integers: VarType.INT64
Tensor dtype from Python floating point: VarType.FP32
```
Paddle provide **cast** API to change the dtype:
```python
float32_tensor = paddle.to_tensor(1.0)
float64_tensor = paddle.cast(float32_tensor, dtype='float64')
print("Tensor after cast to float64:", float64_tensor.dtype)
int64_tensor = paddle.cast(float32_tensor, dtype='int64')
print("Tensor after cast to int64:", int64_tensor.dthpe)
```
```text
Tensor after cast to float64: VarType.FP64
Tensor after cast to int64: VarType.INT64
```
### place of Tensor
Device can be specified when creating a tensor. There are three kinds of to choose from: CPU/GPU/Pinned memory.
There is higher read and write efficiency between Pinned memory with GPU. In addition, Pinned memory supports asynchronous data copy, which will further improve the performance of network. The disadvantage is that allocating too much Pinned memory may reduce the performance of the host. Because it reduces the pageable memory which is used to store virtual memory data.
* **Create Tensor on GPU**:
```python
cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace())
print(cpu_tensor)
```
```text
Tensor: generated_tensor_0
- place: CPUPlace
```
* **Create Tensor on CPU**:
```python
gpu_tensor = paddle.to_tensor(1, place=paddle.CUDAPlace(0))
print(gpu_tensor)
```
```text
Tensor: generated_tensor_0
- place: CUDAPlace(0)
```
* **Create Tensor on pinned memory**:
```python
pin_memory_tensor = paddle.to_tensor(1, place=paddle.CUDAPinnedPlace())
print(pin_memory_tensor)
```
```text
Tensor: generated_tensor_0
- place: CUDAPinnedPlace
```
### name of Tensor
name of Tensor is its unique identifier, which is a Python string, and it can be get by ``Tensor.name``. By default, Paddle will customize a unique name when creating a Tensor.
```python
print("Tensor name:", paddle.to_tensor(1).name)
```
```text
Tensor name: generated_tensor_0
```
----------
## <h2 id="4">Method of Tensor</h2>
Paddles provide rich Tensor operating API , including mathematical operators, logical operators, linear algebra operators and so on. The total number is more than 100+ kinds. For example:
```python
x = paddle.to_tensor([[1.1, 2.2], [3.3, 4.4]])
y = paddle.to_tensor([[5.5, 6.6], [7.7, 8.8]])
print(paddle.add(x, y), "\n")
print(x.add(y), "\n")
```
```text
Tensor: eager_tmp_2
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [6.6 8.8 11 13.2]
Tensor: eager_tmp_3
- place: CUDAPlace(0)
- shape: [2, 2]
- layout: NCHW
- dtype: float
- data: [6.6 8.8 11 13.2]
```
It can be seen that Tensor class method has the same result with paddle API. And the Tensor class method is more convenient to invoke.
### mathematical operators
```python
x.abs() #absolute value
x.ceil() #round up to an integer
x.floor() #round down to an integer
x.exp() #Calculate exponents of the natural constant of each element
x.log() #Calculate natural logarithm of each element
x.reciprocal() #reciprocal
x.square() #Calculate square of each element
x.sqrt() #Calculate sqrt of each element
x.sum() #Calculate the sum of all elements
x.asin() #Calculate the arcsine of each element
x.add(y) #add element by element
x.add(-y) #minus element by element
x.multiply(y) #multiply element by element
x.divide(y) #divide element by element
x.floor_divide(y) #divide exactly element by element
x.remainder(y) #mod element by element
x.pow(y) #pow element by element
x.reduce_max() #max value on specific axis
x.reduce_min() #min value on specific axis
x.reduce_prod() #multiply of all elements on specific axis
x.reduce_sum() #sum of all elements on specific axis
```
Paddle overwrite the magic functions related to Python mathematical operations. Like this:
```text
x + y -> x.add(y)
x - y -> x.add(-y)
x * y -> x.multiply(y)
x / y -> x.divide(y)
x // y -> x.floor_divide(y)
x % y -> x.remainder(y)
x ** y -> x.pow(y)
```
### logical operators
```python
x.is_empty() #Judge whether tensor is empty
x.isfinite() #Judge whether the element in tensor is finite number
x.euqal_all(y) #Judge whether all elements of two tensor are equal
x.euqal(y) #judge whether each element of two tensor is equal
x.not_equal(y) #judge whether each element of two tensor is not equal
x.less_than(y) #judge whether each element of tensor x is less than corresponding element of tensor y
x.less_equal(y) #judge whether each element of tensor x is less than or equal to element of tensor y
x.greater_than(y) #judge whether each element of tensor x is greater than element of tensor y
x.greater_equal(y) #judge whether each element of tensor x is greater than or equal to element of tensor y
```
Paddle overwrite the magic functions related to Python logical operations. Like this:
```text
x == y -> x.euqal(y)
x != y -> x.not_equal(y)
x < y -> x.less_than(y)
x <= y -> x.less_equal(y)
x > y -> x.greater_than(y)
x >= y -> x.greater_equal(y)
```
The following operations are targeted at bool Tensor only:
```python
x.reduce_all(y) #Judge whether a bool tensor is True for all elements
x.reduce_any(y) #Judge whether a bool tensor exists at least one element is True
x.logical_and(y) #logic and operation for two bool tensor
x.logical_or(y) #logic or operation for two bool tensor
x.logical_xor(y) #logic xor operation for two bool tensor
x.logical_not(y) #logic not operation for two bool tensor
```
### linear algebra operators
```python
x.cholesky() #cholesky decomposition of a matrix
x.t() #matrix transpose
x.transpose([1, 0]) #swap axis 0 with axis 1
x.norm('pro') #Frobenius Norm of matrix
x.dist(y, p=2) #The 2 norm of (x-y)
x.matmul(y) #Matrix multiplication
```
It should be noted that the class method of Tensor are non-inplace operations. It means, ``x.And dd(y)`` will not operate directly on Tensor x, but return a new Tensor to represent the results.
For more API related to Tensor operations, please refer to[class paddle.Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/creation/Tensor_cn.html)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册