未验证 提交 d5809836 编写于 作者: L Ligoml 提交者: GitHub

[cherry-pick2.4]fix numpy issue in codeblock examples (#47664)

* #46765

* #47042

* Remove redundant numpy import (#47483)

* #47555

* resolve conflict

* resolve conflict

* resolve conflict

* resolve conflict

* resolve conflict

* for_codestyle

* fix sample code paddle.linalg.multi_dot
Co-authored-by: NKevin吴嘉文 <417333277@qq.com>
上级 764cea0c
......@@ -1120,7 +1120,6 @@ class Fleet(object):
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
paddle.enable_static()
......
......@@ -945,7 +945,6 @@ def fused_multi_transformer(
# required: gpu
import paddle
import paddle.incubate.nn.functional as F
import numpy as np
# input: [batch_size, seq_len, embed_dim]
x = paddle.rand(shape=(2, 4, 128), dtype="float32")
......
......@@ -166,8 +166,7 @@ class LookAhead(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1,10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -280,8 +279,8 @@ class LookAhead(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......
......@@ -342,8 +342,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -378,8 +377,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -425,8 +423,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -500,8 +497,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......
......@@ -1313,10 +1313,12 @@ def softshrink(x, threshold=0.5, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
out = F.softshrink(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.39999998, 0. , 0. , 0.30000001])
"""
if threshold < 0:
raise ValueError(
......@@ -1365,10 +1367,12 @@ def softsign(x, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
"""
if in_dygraph_mode():
return _C_ops.softsign(x)
......@@ -1405,10 +1409,12 @@ def swish(x, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.]))
out = F.swish(x) # [-0.238406, 0., 0.731059]
x = paddle.to_tensor([-2., 0., 1.])
out = F.swish(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.23840584, 0. , 0.73105854])
"""
if in_dygraph_mode():
return _C_ops.swish(x, 1.0)
......@@ -1487,10 +1493,12 @@ def tanhshrink(x, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
"""
if in_dygraph_mode():
return _C_ops.tanh_shrink(x)
......@@ -1536,10 +1544,12 @@ def thresholded_relu(x, threshold=1.0, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.]))
out = F.thresholded_relu(x) # [2., 0., 0.]
x = paddle.to_tensor([2., 0., 1.])
out = F.thresholded_relu(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2., 0., 0.])
"""
if in_dygraph_mode():
......
......@@ -1965,18 +1965,16 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([[[0, 1, 0],
[ 1, 0, 1]]]).astype("float32")
print(x_data.shape)
paddle.disable_static()
x = paddle.to_tensor(x_data, stop_gradient=False)
x = paddle.to_tensor([[[0, 1, 0],
[ 1, 0, 1]]], dtype="float32", stop_gradient=False)
output = paddle.nn.functional.label_smooth(x)
print(output)
#[[[0.03333334 0.93333334 0.03333334]
# [0.93333334 0.03333334 0.93333334]]]
# Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[0.03333334, 0.93333334, 0.03333334],
# [0.93333334, 0.03333334, 0.93333334]]])
"""
if epsilon > 1.0 or epsilon < 0.0:
raise ValueError("The value of epsilon must be between 0 and 1.")
......
......@@ -396,26 +396,22 @@ def conv1d(
import paddle
import paddle.nn.functional as F
import numpy as np
x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32)
w=np.array(
[[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]]).astype(np.float32)
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(w)
y_var = F.conv1d(x_var, w_var)
y_np = y_var.numpy()
print(y_np)
# [[[133. 238.]
# [160. 211.]]]
x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]], dtype="float32")
w = paddle.to_tensor([[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]], dtype="float32")
y = F.conv1d(x, w)
print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[133., 238.],
# [160., 211.]]])
"""
cudnn_version = get_cudnn_version()
if cudnn_version is not None:
......@@ -949,24 +945,20 @@ def conv1d_transpose(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
# shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7],
[8, 0, 9, 2,]]]).astype(np.float32)
x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2,]]], dtype="float32")
# shape: (2, 1, 2)
w=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32)
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(w)
y_var = F.conv1d_transpose(x_var, w_var)
print(y_var)
# [[[60. 16. 99. 75. 4.]]]
w = paddle.to_tensor([[[7, 0]],
[[4, 2]]], dtype="float32")
y = F.conv1d_transpose(x, w)
print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[60., 16., 99., 75., 4. ]]])
"""
cudnn_version = get_cudnn_version()
if cudnn_version is not None:
......
......@@ -252,16 +252,14 @@ def fluid_softmax_with_cross_entropy(
.. code-block:: python
import paddle
import numpy as np
data = np.random.rand(128).astype("float32")
label = np.random.rand(1).astype("int64")
data = paddle.to_tensor(data)
label = paddle.to_tensor(label)
linear = paddle.nn.Linear(128, 100)
x = linear(data)
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
logits = paddle.to_tensor([0.4, 0.6, 0.9])
label = paddle.randint(high=2, shape=[1], dtype="int64")
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
print(out)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.15328646])
"""
if _non_static_mode():
if core.is_compiled_with_npu():
......@@ -1778,7 +1776,6 @@ def ctc_loss(
# declarative mode
import paddle.nn.functional as F
import numpy as np
import paddle
# length of the longest logit sequence
......@@ -1790,8 +1787,7 @@ def ctc_loss(
# class num
class_num = 3
np.random.seed(1)
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
......@@ -1804,30 +1800,30 @@ def ctc_loss(
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
labels = np.array([[1, 2, 2],
[1, 2, 2]]).astype("int32")
input_lengths = np.array([5, 5]).astype("int64")
label_lengths = np.array([3, 3]).astype("int64")
log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths)
label_lengths = paddle.to_tensor(label_lengths)
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]],
dtype="float32")
labels = paddle.to_tensor([[1, 2, 2],
[1, 2, 2]], dtype="int32")
input_lengths = paddle.to_tensor([5, 5], dtype="int64")
label_lengths = paddle.to_tensor([3, 3], dtype="int64")
loss = F.ctc_loss(log_probs, labels,
input_lengths,
label_lengths,
blank=0,
reduction='none')
print(loss) #[3.9179852 2.9076521]
print(loss)
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.91798496, 2.90765190])
loss = F.ctc_loss(log_probs, labels,
input_lengths,
label_lengths,
blank=0,
reduction='mean')
print(loss) #[1.1376063]
print(loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.13760614])
"""
......@@ -2265,16 +2261,14 @@ def softmax_with_cross_entropy(
.. code-block:: python
import paddle
import numpy as np
data = np.random.rand(128).astype("float32")
label = np.random.rand(1).astype("int64")
data = paddle.to_tensor(data)
label = paddle.to_tensor(label)
linear = paddle.nn.Linear(128, 100)
x = linear(data)
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
logits = paddle.to_tensor([0.4, 0.6, 0.9], dtype="float32")
label = paddle.to_tensor([1], dtype="int64")
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
print(out)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.15328646])
"""
return fluid_softmax_with_cross_entropy(
logits,
......@@ -3869,18 +3863,26 @@ def soft_margin_loss(input, label, reduction='mean', name=None):
.. code-block:: python
import paddle
import numpy as np
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
output = paddle.nn.functional.soft_margin_loss(input, label)
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.64022040])
input = paddle.uniform(shape=(5, 5), dtype="float32", min=0.1, max=0.8)
label = paddle.randint(0, 2, shape=(5, 5), dtype="int64")
label[label==0]=-1
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
label_np[label_np==0]=-1
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none')
print(output)
# Tensor(shape=[5, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1.09917796, 0.52613139, 0.56263304, 0.82736146, 0.38776723],
# [1.07179427, 1.11924267, 0.49877715, 1.10026348, 0.46184641],
# [0.84367639, 0.74795729, 0.44629076, 0.55123353, 0.77659678],
# [0.39465919, 0.76651484, 0.54485321, 0.76609844, 0.77166790],
# [0.51283568, 0.84757161, 0.78913331, 1.05268764, 0.45318675]])
"""
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
......
......@@ -62,27 +62,28 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = np.arange(6, dtype=np.float32).reshape(2,3)
x = paddle.to_tensor(x)
x = paddle.arange(6, dtype="float32").reshape([2,3])
y = F.normalize(x)
print(y.numpy())
# [[0. 0.4472136 0.8944272 ]
# [0.42426404 0.5656854 0.7071067 ]]
print(y)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0. , 0.44721359, 0.89442718],
# [0.42426404, 0.56568539, 0.70710671]])
y = F.normalize(x, p=1.5)
print(y.numpy())
# [[0. 0.40862012 0.81724024]
# [0.35684016 0.4757869 0.5947336 ]]
print(y)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0. , 0.40862012, 0.81724024],
# [0.35684016, 0.47578689, 0.59473360]])
y = F.normalize(x, axis=0)
print(y.numpy())
# [[0. 0.24253564 0.37139067]
# [1. 0.97014254 0.9284767 ]]
print(y)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0. , 0.24253564, 0.37139067],
# [1. , 0.97014254, 0.92847669]])
"""
if in_dygraph_mode():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
......@@ -169,22 +170,31 @@ def batch_norm(
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle
x = np.random.seed(123)
x = np.random.random(size=(2, 1, 2, 3)).astype('float32')
running_mean = np.random.random(size=1).astype('float32')
running_variance = np.random.random(size=1).astype('float32')
weight_data = np.random.random(size=1).astype('float32')
bias_data = np.random.random(size=1).astype('float32')
x = paddle.to_tensor(x)
rm = paddle.to_tensor(running_mean)
rv = paddle.to_tensor(running_variance)
w = paddle.to_tensor(weight_data)
b = paddle.to_tensor(bias_data)
batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b)
print(batch_norm_out)
x = paddle.arange(12, dtype="float32").reshape([2, 1, 2, 3])
print(x)
# Tensor(shape=[2, 1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]]],
# [[[6. , 7. , 8. ],
# [9. , 10., 11.]]]])
running_mean = paddle.to_tensor([0], dtype="float32")
running_variance = paddle.to_tensor([1], dtype="float32")
weight = paddle.to_tensor([2], dtype="float32")
bias = paddle.to_tensor([1], dtype="float32")
batch_norm_out = paddle.nn.functional.batch_norm(x, running_mean,
running_variance, weight, bias)
print(batch_norm_out)
# Tensor(shape=[2, 1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[1. , 2.99998999 , 4.99997997 ],
# [6.99996948 , 8.99995995 , 10.99994946]]],
# [[[12.99993896, 14.99992943, 16.99991989],
# [18.99990845, 20.99989891, 22.99988937]]]])
"""
assert len(x.shape) >= 2, "input dim must be larger than 1"
......
......@@ -1797,10 +1797,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
x = paddle.rand([2, 3, 32, 32])
# x.shape is [2, 3, 32, 32]
out = paddle.nn.functional.adaptive_avg_pool2d(
x = x,
......
......@@ -94,56 +94,48 @@ def sparse_attention(
# required: skiptest
import paddle
import numpy as np
query_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
key_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
value_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
sparse_csr_offset_data = np.array([[[0, 2,
4, 6, 8]]]).astype("int32")
sparse_csr_columns_data = np.array([[[0, 1,
0, 1, 2, 3, 2, 3]]]).astype("int32")
key_padding_mask_data = np.array([[1,1,1,0]]).astype("float32")
attention_mask_data = np.array([[1,0,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1]]).astype("float32")
print(query_data.shape)
# (1, 1, 4, 2)
print(sparse_csr_offset_data.shape)
# (1, 1, 5)
print(sparse_csr_columns_data.shape)
# (1, 1, 8)
paddle.disable_static()
query = paddle.to_tensor(query_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
key = paddle.to_tensor(key_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
value = paddle.to_tensor(value_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
offset = paddle.to_tensor(sparse_csr_offset_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
columns = paddle.to_tensor(sparse_csr_columns_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
key_padding_mask = paddle.to_tensor(key_padding_mask_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
attention_mask = paddle.to_tensor(attention_mask_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
# `query`, `key` and `value` all have shape [1, 1, 4, 2]
query = paddle.to_tensor([[[[0, 1, ], [2, 3],
[0, 1], [2, 3]]]], dtype="float32")
key = paddle.to_tensor([[[[0, 1], [2, 3],
[0, 1], [2, 3]]]], dtype="float32")
value = paddle.to_tensor([[[[0, 1], [2, 3],
[0, 1], [2, 3]]]], dtype="float32")
offset = paddle.to_tensor([[[0, 2, 4, 6, 8]]], dtype="int32")
columns = paddle.to_tensor([[[0, 1, 0, 1, 2, 3, 2, 3]]], dtype="int32")
print(offset.shape) # (1, 1, 5)
print(columns.shape) # (1, 1, 8)
key_padding_mask = paddle.to_tensor([[1, 1, 1, 0]], dtype="float32")
attention_mask = paddle.to_tensor([[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype="float32")
output_mask = paddle.nn.functional.sparse_attention(query, key,
value, offset, columns,
key_padding_mask=key_padding_mask, attn_mask=attention_mask)
value, offset, columns,
key_padding_mask=key_padding_mask,
attn_mask=attention_mask)
print(output_mask)
# [[[[0. , 1. ],
# [1.99830270, 2.99830270],
# [0. , 1. ],
# [0. , 1. ]]]]
# Tensor(shape=[1, 1, 4, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[0. , 1. ],
# [1.99830270, 2.99830270],
# [0. , 1. ],
# [0. , 1. ]]]])
output = paddle.nn.functional.sparse_attention(query, key,
value, offset, columns)
value, offset, columns)
print(output)
# [[[[1.60885942, 2.60885954],
# [1.99830270, 2.99830270],
# [1.60885942, 2.60885954],
# [1.99830270, 2.99830270]]]]
# Tensor(shape=[1, 1, 4, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[1.60885942, 2.60885954],
# [1.99830270, 2.99830270],
# [1.60885942, 2.60885954],
# [1.99830270, 2.99830270]]]])
"""
if in_dynamic_mode():
(
......
......@@ -284,13 +284,13 @@ class Tanh(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Tanh()
out = m(x)
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.37994894, -0.19737533, 0.09966800, 0.29131261])
"""
def __init__(self, name=None):
......@@ -883,11 +883,13 @@ class Softshrink(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
m = paddle.nn.Softshrink()
out = m(x) # [-0.4, 0, 0, 0.3]
out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.39999998, 0. , 0. , 0.30000001])
"""
def __init__(self, threshold=0.5, name=None):
......@@ -923,11 +925,13 @@ class Softsign(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Softsign()
out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
"""
def __init__(self, name=None):
......@@ -962,11 +966,13 @@ class Swish(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.]))
x = paddle.to_tensor([-2., 0., 1.])
m = paddle.nn.Swish()
out = m(x) # [-0.238406, 0., 0.731059]
out = m(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.23840584, 0. , 0.73105854])
"""
def __init__(self, name=None):
......@@ -1046,11 +1052,13 @@ class Tanhshrink(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Tanhshrink()
out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
"""
def __init__(self, name=None):
......@@ -1093,11 +1101,13 @@ class ThresholdedReLU(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.]))
x = paddle.to_tensor([2., 0., 1.])
m = paddle.nn.ThresholdedReLU()
out = m(x) # [2., 0., 0.]
out = m(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2., 0., 0.])
"""
def __init__(self, threshold=1.0, name=None):
......
......@@ -381,16 +381,13 @@ class Upsample(Layer):
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
input_data = np.random.rand(2,3,6,10).astype("float32")
upsample_out = paddle.nn.Upsample(size=[12,12])
input = paddle.rand([2,3,6,10], dtype="float32")
upsample_out = paddle.nn.Upsample(size=[12,12])
input = paddle.to_tensor(input_data)
output = upsample_out(x=input)
print(output.shape)
# [2L, 3L, 12L, 12L]
# [2, 3, 12, 12]
"""
......@@ -657,14 +654,12 @@ class Bilinear(Layer):
.. code-block:: python
import paddle
import numpy
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
layer1 = paddle.rand((5, 5)).astype('float32')
layer2 = paddle.rand((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear(
in1_features=5, in2_features=4, out_features=1000)
result = bilinear(paddle.to_tensor(layer1),
paddle.to_tensor(layer2)) # result shape [5, 1000]
result = bilinear(layer1,layer2) # result shape [5, 1000]
"""
......@@ -756,17 +751,22 @@ class Dropout(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.array([[1,2,3], [4,5,6]]).astype('float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[1,2,3], [4,5,6]], dtype="float32")
m = paddle.nn.Dropout(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[2., 0., 6.],
# [0., 0., 0.]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
print(y_test)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
"""
def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
......@@ -821,17 +821,35 @@ class Dropout2D(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
x = paddle.to_tensor(x)
x = paddle.rand([2, 2, 1, 3], dtype="float32")
print(x)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0.10052059, 0.93890846, 0.45351565]],
# [[0.47507706, 0.45021373, 0.11331241]]],
# [[[0.53358698, 0.97375143, 0.34997326]],
# [[0.24758087, 0.52628899, 0.17970420]]]])
m = paddle.nn.Dropout2D(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0. , 0. , 0. ]],
# [[0.95015413, 0.90042746, 0.22662482]]],
# [[[1.06717396, 1.94750285, 0.69994652]],
# [[0. , 0. , 0. ]]]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
print(y_test)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0.10052059, 0.93890846, 0.45351565]],
# [[0.47507706, 0.45021373, 0.11331241]]],
# [[[0.53358698, 0.97375143, 0.34997326]],
# [[0.24758087, 0.52628899, 0.17970420]]]])
"""
def __init__(self, p=0.5, data_format='NCHW', name=None):
......@@ -884,17 +902,47 @@ class Dropout3D(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
x = paddle.to_tensor(x)
x = paddle.arange(24, dtype="float32").reshape((1, 2, 2, 2, 3))
print(x)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]],
# [[6. , 7. , 8. ],
# [9. , 10., 11.]]],
# [[[12., 13., 14.],
# [15., 16., 17.]],
# [[18., 19., 20.],
# [21., 22., 23.]]]]])
m = paddle.nn.Dropout3D(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 2. , 4. ],
# [6. , 8. , 10.]],
# [[12., 14., 16.],
# [18., 20., 22.]]],
# [[[0. , 0. , 0. ],
# [0. , 0. , 0. ]],
# [[0. , 0. , 0. ],
# [0. , 0. , 0. ]]]]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
print(y_test)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]],
# [[6. , 7. , 8. ],
# [9. , 10., 11.]]],
# [[[12., 13., 14.],
# [15., 16., 17.]],
# [[18., 19., 20.],
# [21., 22., 23.]]]]])
"""
def __init__(self, p=0.5, data_format='NCDHW', name=None):
......@@ -945,18 +993,21 @@ class AlphaDropout(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.array([[-1, 1], [-1, 1]]).astype('float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[-1, 1], [-1, 1]], dtype="float32")
m = paddle.nn.AlphaDropout(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-0.77919382, 1.66559887],
# [-0.77919382, -0.77919382]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
# [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
print(y_test)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-1., 1.],
# [-1., 1.]])
"""
def __init__(self, p=0.5, name=None):
......@@ -1295,18 +1346,17 @@ class CosineSimilarity(Layer):
import paddle
import paddle.nn as nn
import numpy as np
np.random.seed(0)
x1 = np.random.rand(2,3)
x2 = np.random.rand(2,3)
x1 = paddle.to_tensor(x1)
x2 = paddle.to_tensor(x2)
x1 = paddle.to_tensor([[1., 2., 3.],
[2., 3., 4.]], dtype="float32")
x2 = paddle.to_tensor([[8., 3., 3.],
[2., 3., 4.]], dtype="float32")
cos_sim_func = nn.CosineSimilarity(axis=0)
result = cos_sim_func(x1, x2)
print(result)
# [0.99806249 0.9817672 0.94987036]
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.65079135, 0.98058069, 1. ])
"""
def __init__(self, axis=1, eps=1e-8):
......@@ -1393,30 +1443,33 @@ class Embedding(Layer):
.. code-block:: python
import paddle
import numpy as np
x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
x = paddle.to_tensor(x_data, stop_gradient=False)
y = paddle.to_tensor(y_data, stop_gradient=False)
embedding = paddle.nn.Embedding(10, 3, sparse=True)
x = paddle.to_tensor([[0], [1], [3]], dtype="int64", stop_gradient=False)
embedding = paddle.nn.Embedding(4, 3, sparse=True)
w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32)
w0 = paddle.to_tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]], dtype="float32")
embedding.weight.set_value(w0)
print(embedding.weight)
# Tensor(shape=[4, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[0., 0., 0.],
# [1., 1., 1.],
# [2., 2., 2.],
# [3., 3., 3.]])
adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01)
adam.clear_grad()
# weight.shape = [10, 3]
# x.data = [[3],[4],[5]]
# x.shape = [3, 1]
out = embedding(x)
print(out)
# Tensor(shape=[3, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[0., 0., 0.]],
# [[1., 1., 1.]],
# [[3., 3., 3.]]])
# out.data = [[2,2,2], [2,2,2], [2,2,2]]
# out.shape = [3, 1, 3]
out=embedding(x)
out.backward()
adam.step()
......
......@@ -312,26 +312,26 @@ class Conv1D(_ConvNd):
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1D
import numpy as np
x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32)
w=np.array(
[[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y_t = conv(x_t)
print(y_t)
# [[[133. 238.]
# [160. 211.]]]
import paddle
from paddle.nn import Conv1D
x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]], dtype="float32")
w = paddle.to_tensor([[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]], dtype="float32")
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y = conv(x)
print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[133., 238.],
# [160., 211.]]])
"""
def __init__(
......@@ -498,23 +498,22 @@ class Conv1DTranspose(_ConvNd):
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1DTranspose
import numpy as np
# shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7],
[8, 0, 9, 2]]]).astype(np.float32)
# shape: (2, 1, 2)
y=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(y)
y_t = conv(x_t)
print(y_t)
# [[[60. 16. 99. 75. 4.]]]
import paddle
from paddle.nn import Conv1DTranspose
# shape: (1, 2, 4)
x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2]]], dtype="float32")
# shape: (2, 1, 2)
w = paddle.to_tensor([[[7, 0]],
[[4, 2]]], dtype="float32")
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(w)
y = conv(x)
print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[60., 16., 99., 75., 4. ]]])
"""
def __init__(
......
......@@ -777,16 +777,15 @@ class BCELoss(Layer):
Examples:
.. code-block:: python
import numpy as np
import paddle
input_data = np.array([0.5, 0.6, 0.7]).astype("float32")
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.to_tensor([0.5, 0.6, 0.7])
label = paddle.to_tensor([1.0, 0.0, 1.0])
bce_loss = paddle.nn.BCELoss()
output = bce_loss(input, label)
print(output) # [0.65537095]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.65537101])
"""
......@@ -1094,7 +1093,6 @@ class CTCLoss(Layer):
.. code-block:: python
# declarative mode
import numpy as np
import paddle
# length of the longest logit sequence
......@@ -1106,8 +1104,7 @@ class CTCLoss(Layer):
# class num
class_num = 3
np.random.seed(1)
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
......@@ -1120,26 +1117,25 @@ class CTCLoss(Layer):
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
labels = np.array([[1, 2, 2],
[1, 2, 2]]).astype("int32")
input_lengths = np.array([5, 5]).astype("int64")
label_lengths = np.array([3, 3]).astype("int64")
log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths)
label_lengths = paddle.to_tensor(label_lengths)
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]], dtype="float32")
labels = paddle.to_tensor([[1, 2, 2],
[1, 2, 2]], dtype="int32")
input_lengths = paddle.to_tensor([5, 5], dtype="int64")
label_lengths = paddle.to_tensor([3, 3], dtype="int64")
loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels,
input_lengths,
label_lengths)
print(loss) #[3.9179852 2.9076521]
print(loss)
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.91798496, 2.90765190])
loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels,
input_lengths,
label_lengths)
print(loss) #[1.1376063]
print(loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.13760614])
"""
def __init__(self, blank=0, reduction='mean'):
......@@ -1775,20 +1771,29 @@ class SoftMarginLoss(Layer):
.. code-block:: python
import paddle
import numpy as np
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
soft_margin_loss = paddle.nn.SoftMarginLoss()
output = soft_margin_loss(input, label)
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.64022040])
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
input_np = paddle.uniform(shape=(5, 5), min=0.1, max=0.8, dtype="float64")
label_np = paddle.randint(high=2, shape=(5, 5), dtype="int64")
label_np[label_np==0]=-1
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
soft_margin_loss = paddle.nn.SoftMarginLoss(reduction='none')
output = soft_margin_loss(input, label)
print(output)
# Tensor(shape=[5, 5], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [[0.61739663, 0.51405668, 1.09346100, 0.42385561, 0.91602303],
# [0.76997038, 1.01977148, 0.98971722, 1.13976032, 0.88152088],
# [0.55476735, 1.10505384, 0.89923519, 0.45018155, 1.06587511],
# [0.37998142, 0.48067240, 0.47791212, 0.55664053, 0.98581399],
# [0.78571653, 0.59319711, 0.39701841, 0.76172109, 0.83781742]])
"""
def __init__(self, reduction='mean', name=None):
......
......@@ -362,17 +362,13 @@ class GroupNorm(Layer):
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32')
x = paddle.to_tensor(x_data)
group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
group_norm_out = group_norm(x)
x = paddle.arange(48, dtype="float32").reshape((2, 6, 2, 2))
group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
group_norm_out = group_norm(x)
print(group_norm_out.numpy())
print(group_norm_out)
"""
def __init__(
......@@ -1150,18 +1146,23 @@ class SyncBatchNorm(_BatchNormBase):
Examples:
.. code-block:: python
# required: gpu
import paddle
import paddle.nn as nn
import numpy as np
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
if paddle.is_compiled_with_cuda():
sync_batch_norm = nn.SyncBatchNorm(2)
hidden1 = sync_batch_norm(x)
print(hidden1)
# [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]]
# Tensor(shape=[1, 2, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[ 0.26824948, 1.09363246],
# [ 0.26824948, -1.63013160]],
# [[ 0.80956620, -0.66528702],
# [-1.27446556, 1.13018656]]]])
"""
def __init__(
......
......@@ -68,9 +68,8 @@ class AvgPool1D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
pool_out = AvgPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -173,10 +172,9 @@ class AvgPool2D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 3, 32, 32], dtype="float32", min=-1, max=1)
AvgPool2D = nn.AvgPool2D(kernel_size=2,
stride=2, padding=0)
output = AvgPool2D(input)
......@@ -271,10 +269,9 @@ class AvgPool3D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# avg pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 2, 3, 32, 32], dtype="float32", min=-1, max=1)
AvgPool3D = nn.AvgPool3D(kernel_size=2,
stride=2, padding=0)
output = AvgPool3D(input)
......@@ -378,9 +375,8 @@ class MaxPool1D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
pool_out = MaxPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -488,10 +484,9 @@ class MaxPool2D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 3, 32, 32], dtype="float32", min=-1, max=1)
MaxPool2D = nn.MaxPool2D(kernel_size=2,
stride=2, padding=0)
output = MaxPool2D(input)
......@@ -590,10 +585,9 @@ class MaxPool3D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# max pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 2, 3, 32, 32], dtype="float32", min=-1, max=1)
MaxPool3D = nn.MaxPool3D(kernel_size=2,
stride=2, padding=0)
output = MaxPool3D(input)
......@@ -685,9 +679,8 @@ class AdaptiveAvgPool1D(Layer):
#
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16)
pool_out = AdaptiveAvgPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -764,11 +757,9 @@ class AdaptiveAvgPool2D(Layer):
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 32, 32]
x = paddle.rand([2, 3, 32, 32])
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=3)
pool_out = adaptive_avg_pool(x = x)
# pool_out.shape is [2, 3, 3, 3]
......@@ -858,11 +849,9 @@ class AdaptiveAvgPool3D(Layer):
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 8, 32, 32]
x = paddle.rand([2, 3, 8, 32, 32])
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=3)
pool_out = adaptive_avg_pool(x = x)
# pool_out = [2, 3, 3, 3, 3]
......@@ -941,9 +930,8 @@ class AdaptiveMaxPool1D(Layer):
#
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16)
pool_out = AdaptiveMaxPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -1027,10 +1015,9 @@ class AdaptiveMaxPool2D(Layer):
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
x = paddle.rand([2, 3, 32, 32])
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=3, return_mask=True)
pool_out, indices = adaptive_max_pool(x = x)
"""
......@@ -1117,10 +1104,8 @@ class AdaptiveMaxPool3D(Layer):
# output[:, :, i, j, k] =
# max(input[:, :, dstart:dend, hstart: hend, wstart: wend])
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data)
x = paddle.rand([2, 3, 8, 32, 32])
pool = paddle.nn.AdaptiveMaxPool3D(output_size=4)
out = pool(x)
# out shape: [2, 3, 4, 4, 4]
......@@ -1191,7 +1176,6 @@ class MaxUnPool1D(Layer):
import paddle
import paddle.nn.functional as F
import numpy as np
data = paddle.rand(shape=[1, 3, 16])
pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
......@@ -1371,7 +1355,6 @@ class MaxUnPool3D(Layer):
import paddle
import paddle.nn.functional as F
import numpy as np
data = paddle.rand(shape=[1, 1, 4, 4, 6])
pool_out, indices = F.max_pool3d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
......
......@@ -46,7 +46,6 @@ def export(layer, path, input_spec=None, opset_version=9, **configs):
.. code-block:: python
import paddle
import numpy as np
class LinearNet(paddle.nn.Layer):
def __init__(self):
......@@ -77,8 +76,8 @@ def export(layer, path, input_spec=None, opset_version=9, **configs):
# Export model with 'Tensor' to support pruned model by set 'output_spec'.
def export_logic():
model = Logic()
x = paddle.to_tensor(np.array([1]))
y = paddle.to_tensor(np.array([2]))
x = paddle.to_tensor([1])
y = paddle.to_tensor([2])
# Static and run model.
paddle.jit.to_static(model)
out = model(x, y, z=True)
......
......@@ -70,10 +70,9 @@ class Adadelta(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
......
......@@ -72,7 +72,6 @@ class Adagrad(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.rand(shape=[10, 10])
linear = paddle.nn.Linear(10, 10)
......
......@@ -85,9 +85,8 @@ class Adamax(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
......
......@@ -85,8 +85,8 @@ class Momentum(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
......
......@@ -1023,14 +1023,13 @@ class Optimizer(object):
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
x = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out = linear(x)
out.backward()
adam.step()
adam.clear_grad()
......@@ -1100,11 +1099,9 @@ class Optimizer(object):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
optimizer = paddle.optimizer.Adam(learning_rate=0.1,
......@@ -1307,11 +1304,9 @@ class Optimizer(object):
Examples:
.. code-block:: python
import numpy as np
import paddle
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
a = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
......@@ -1417,14 +1412,12 @@ class Optimizer(object):
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
a = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
......
......@@ -43,7 +43,7 @@ class L1Decay(fluid.regularizer.L1Decay):
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L1Decay
import numpy as np
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
......@@ -105,7 +105,6 @@ class L2Decay(fluid.regularizer.L2Decay):
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L2Decay
import numpy as np
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
......
......@@ -185,13 +185,12 @@ class InputSpec(object):
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.static import InputSpec
paddle.disable_static()
x = paddle.to_tensor(np.ones([2, 2], np.float32))
x = paddle.ones([2, 2], dtype="float32")
x_spec = InputSpec.from_tensor(x, name='x')
print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
......
......@@ -1138,33 +1138,34 @@ def triu(x, diagonal=0, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
x = paddle.arange(1, 13, dtype="int64").reshape([3,-1])
# Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1 , 2 , 3 , 4 ],
# [5 , 6 , 7 , 8 ],
# [9 , 10, 11, 12]])
# example 1, default diagonal
x = paddle.to_tensor(data)
triu1 = paddle.tensor.triu(x)
# array([[ 1, 2, 3, 4],
# [ 0, 6, 7, 8],
# [ 0, 0, 11, 12]])
# Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1 , 2 , 3 , 4 ],
# [0 , 6 , 7 , 8 ],
# [0 , 0 , 11, 12]])
# example 2, positive diagonal value
triu2 = paddle.tensor.triu(x, diagonal=2)
# array([[0, 0, 3, 4],
# [0, 0, 0, 8],
# [0, 0, 0, 0]])
# Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 3, 4],
# [0, 0, 0, 8],
# [0, 0, 0, 0]])
# example 3, negative diagonal value
triu3 = paddle.tensor.triu(x, diagonal=-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 0, 10, 11, 12]])
# Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1 , 2 , 3 , 4 ],
# [5 , 6 , 7 , 8 ],
# [0 , 10, 11, 12]])
"""
if in_dygraph_mode():
......@@ -1275,24 +1276,27 @@ def diagflat(x, offset=0, name=None):
x = paddle.to_tensor([1, 2, 3])
y = paddle.diagflat(x)
print(y.numpy())
# [[1 0 0]
# [0 2 0]
# [0 0 3]]
print(y)
# Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 0, 0],
# [0, 2, 0],
# [0, 0, 3]])
y = paddle.diagflat(x, offset=1)
print(y.numpy())
# [[0 1 0 0]
# [0 0 2 0]
# [0 0 0 3]
# [0 0 0 0]]
print(y)
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 1, 0, 0],
# [0, 0, 2, 0],
# [0, 0, 0, 3],
# [0, 0, 0, 0]])
y = paddle.diagflat(x, offset=-1)
print(y.numpy())
# [[0 0 0 0]
# [1 0 0 0]
# [0 2 0 0]
# [0 0 3 0]]
print(y)
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 0, 0],
# [1, 0, 0, 0],
# [0, 2, 0, 0],
# [0, 0, 3, 0]])
.. code-block:: python
:name: code-example-2
......@@ -1301,27 +1305,30 @@ def diagflat(x, offset=0, name=None):
x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.diagflat(x)
print(y.numpy())
# [[1 0 0 0]
# [0 2 0 0]
# [0 0 3 0]
# [0 0 0 4]]
print(y)
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 0, 0, 0],
# [0, 2, 0, 0],
# [0, 0, 3, 0],
# [0, 0, 0, 4]])
y = paddle.diagflat(x, offset=1)
print(y.numpy())
# [[0 1 0 0 0]
# [0 0 2 0 0]
# [0 0 0 3 0]
# [0 0 0 0 4]
# [0 0 0 0 0]]
print(y)
# Tensor(shape=[5, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 1, 0, 0, 0],
# [0, 0, 2, 0, 0],
# [0, 0, 0, 3, 0],
# [0, 0, 0, 0, 4],
# [0, 0, 0, 0, 0]])
y = paddle.diagflat(x, offset=-1)
print(y.numpy())
# [[0 0 0 0 0]
# [1 0 0 0 0]
# [0 2 0 0 0]
# [0 0 3 0 0]
# [0 0 0 4 0]]
print(y)
# Tensor(shape=[5, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 0, 0, 0],
# [1, 0, 0, 0, 0],
# [0, 2, 0, 0, 0],
# [0, 0, 3, 0, 0],
# [0, 0, 0, 4, 0]])
"""
padding_value = 0
if in_dygraph_mode():
......@@ -1413,23 +1420,26 @@ def diag(x, offset=0, padding_value=0, name=None):
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.diag(x)
print(y.numpy())
# [[1 0 0]
# [0 2 0]
# [0 0 3]]
print(y)
# Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 0, 0],
# [0, 2, 0],
# [0, 0, 3]])
y = paddle.diag(x, offset=1)
print(y.numpy())
# [[0 1 0 0]
# [0 0 2 0]
# [0 0 0 3]
# [0 0 0 0]]
print(y)
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 1, 0, 0],
# [0, 0, 2, 0],
# [0, 0, 0, 3],
# [0, 0, 0, 0]])
y = paddle.diag(x, padding_value=6)
print(y.numpy())
# [[1 6 6]
# [6 2 6]
# [6 6 3]]
print(y)
# Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 6, 6],
# [6, 2, 6],
# [6, 6, 3]])
.. code-block:: python
:name: code-example-2
......@@ -1439,16 +1449,19 @@ def diag(x, offset=0, padding_value=0, name=None):
paddle.disable_static()
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
y = paddle.diag(x)
print(y.numpy())
# [1 5]
print(y)
# Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [1, 5])
y = paddle.diag(x, offset=1)
print(y.numpy())
# [2 6]
print(y)
# Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [2, 6])
y = paddle.diag(x, offset=-1)
print(y.numpy())
# [4]
print(y)
# Tensor(shape=[1], dtype=int64, place=Place(cpu), stop_gradient=True,
# [4])
"""
if in_dygraph_mode():
return _C_ops.diag(x, offset, padding_value)
......@@ -1911,7 +1924,7 @@ def _memcpy(input, place=None, output=None):
.. code-block:: python
import paddle
import numpy as np
data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result = paddle._memcpy(data, place=paddle.CPUPlace()) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
"""
......@@ -1986,10 +1999,10 @@ def complex(real, imag, name=None):
x = paddle.arange(2, dtype=paddle.float32).unsqueeze(-1)
y = paddle.arange(3, dtype=paddle.float32)
z = paddle.complex(x, y)
print(z.numpy())
# [[0.+0.j 0.+1.j 0.+2.j]
# [1.+0.j 1.+1.j 1.+2.j]]
print(z)
# Tensor(shape=[2, 3], dtype=complex64, place=Place(cpu), stop_gradient=True,
# [[0j , 1j , 2j ],
# [(1+0j), (1+1j), (1+2j)]])
"""
if in_dygraph_mode():
return _C_ops.complex(real, imag)
......
......@@ -314,38 +314,53 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
.. code-block:: python
import paddle
import numpy as np
shape=[2, 3, 4]
np_input = np.arange(24).astype('float32') - 12
np_input = np_input.reshape(shape)
x = paddle.to_tensor(np_input)
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
x = paddle.arange(24, dtype="float32").reshape([2, 3, 4]) - 12
# x: Tensor(shape=[2, 3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[[-12., -11., -10., -9. ],
# [-8. , -7. , -6. , -5. ],
# [-4. , -3. , -2. , -1. ]],
# [[ 0. , 1. , 2. , 3. ],
# [ 4. , 5. , 6. , 7. ],
# [ 8. , 9. , 10., 11.]]])
# compute frobenius norm along last two dimensions.
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
# out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]
# out_fro: Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [17.43559647, 16.91153526, 16.73320007, 16.91153526])
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
#out_pnorm.numpy(): [[21.118711 13.190906 5.477226]
# [ 3.7416575 11.224972 19.131126]]
# out_pnorm: Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[21.11871147, 13.19090557, 5.47722578 ],
# [3.74165750 , 11.22497177, 19.13112640]])
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
#out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]
# out_pnorm: Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [17.43559647, 16.91153526, 16.73320007, 16.91153526])
# compute inf-order norm
out_pnorm = paddle.linalg.norm(x, p=np.inf)
#out_pnorm.numpy() = [12.]
out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)
#out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]
out_pnorm = paddle.linalg.norm(x, p=float("inf"))
# out_pnorm = Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [12.])
out_pnorm = paddle.linalg.norm(x, p=float("inf"), axis=0)
# out_pnorm: Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[12., 11., 10., 9. ],
# [8. , 7. , 6. , 7. ],
# [8. , 9. , 10., 11.]])
# compute -inf-order norm
out_pnorm = paddle.linalg.norm(x, p=-np.inf)
#out_pnorm.numpy(): [0.]
out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)
#out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]
out_pnorm = paddle.linalg.norm(x, p=-float("inf"))
# out_pnorm: Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [0.])
out_pnorm = paddle.linalg.norm(x, p=-float("inf"), axis=0)
# out_pnorm: Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0., 1., 2., 3.],
# [4., 5., 6., 5.],
# [4., 3., 2., 1.]])
"""
def frobenius_norm(input, dim=None, keepdim=False, name=None):
......@@ -699,10 +714,9 @@ def dist(x, y, p=2, name=None):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32")
y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32")
x = paddle.to_tensor([[3, 3],[3, 3]], dtype="float32")
y = paddle.to_tensor([[3, 3],[3, 1]], dtype="float32")
out = paddle.dist(x, y, 0)
print(out) # out = [1.]
......@@ -754,69 +768,82 @@ def cond(x, p=None, name=None):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
# compute conditional number when p is None
out = paddle.linalg.cond(x)
# out.numpy() [1.4142135]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.41421342])
# compute conditional number when order of the norm is 'fro'
out_fro = paddle.linalg.cond(x, p='fro')
# out_fro.numpy() [3.1622777]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.16227770])
# compute conditional number when order of the norm is 'nuc'
out_nuc = paddle.linalg.cond(x, p='nuc')
# out_nuc.numpy() [9.2426405]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [9.24263859])
# compute conditional number when order of the norm is 1
out_1 = paddle.linalg.cond(x, p=1)
# out_1.numpy() [2.]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2.])
# compute conditional number when order of the norm is -1
out_minus_1 = paddle.linalg.cond(x, p=-1)
# out_minus_1.numpy() [1.]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.])
# compute conditional number when order of the norm is 2
out_2 = paddle.linalg.cond(x, p=2)
# out_2.numpy() [1.4142135]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.41421342])
# compute conditional number when order of the norm is -1
out_minus_2 = paddle.linalg.cond(x, p=-2)
# out_minus_2.numpy() [0.70710677]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.70710683])
# compute conditional number when order of the norm is inf
out_inf = paddle.linalg.cond(x, p=np.inf)
# out_inf.numpy() [2.]
out_inf = paddle.linalg.cond(x, p=float("inf"))
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2.])
# compute conditional number when order of the norm is -inf
out_minus_inf = paddle.linalg.cond(x, p=-np.inf)
# out_minus_inf.numpy() [1.]
a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))
# a.numpy()
# [[[ 0.14063153 -0.996288 0.7996131 -0.02571543]
# [-0.16303636 1.5534962 -0.49919784 -0.04402903]
# [-1.1341571 -0.6022629 0.5445269 0.29154757]
# [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]]
# [[-0.58081484 0.12402827 0.7229862 -0.55046535]
# [-0.15178485 -1.1604939 0.75810957 0.30971205]
# [-0.9669573 1.0940945 -0.27363303 -0.35416734]
# [-1.216529 2.0018666 -0.7773689 -0.17556527]]]
out_minus_inf = paddle.linalg.cond(x, p=-float("inf"))
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.])
a = paddle.randn([2, 4, 4])
# Tensor(shape=[2, 4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[-0.06784091, -0.07095790, 1.31792855, -0.58959651],
# [ 0.20818676, -0.85640615, -0.89998871, -1.47439921],
# [-0.49132481, 0.42250812, -0.77383220, -2.19794774],
# [-0.33551720, -1.70003879, -1.09795380, -0.63737559]],
# [[ 1.12026262, -0.16119350, -1.21157813, 2.74383283],
# [-0.15999718, 0.18798758, -0.69392562, 1.35720372],
# [-0.53013402, -2.26304483, 1.40843511, -1.02288902],
# [ 0.69533503, 2.05261683, -0.02251151, -1.43127477]]])
a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.numpy() [31.572273 28.120834]
b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))
# b.numpy()
# [[[ 1.61707487 0.46829144 0.38130416 0.82546736]
# [-1.72710298 0.08866375 -0.62518804 0.16128892]
# [-0.02822879 -1.67764516 0.11141444 0.3220113 ]]
# [[ 0.22524372 0.62474921 -0.85503233 -1.03960523]
# [-0.76620689 0.56673047 0.85064753 -0.45158196]
# [ 1.47595418 2.23646462 1.5701758 0.10497519]]]
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [8.86691189 , 75.23817444])
b = paddle.randn([2, 3, 4])
# Tensor(shape=[2, 3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[-0.43754861, 1.80796063, -0.78729683, -1.82264030],
# [-0.27670753, 0.06620564, 0.29072434, -0.31155765],
# [ 0.34123746, -0.05444612, 0.05001324, -1.46877074]],
# [[-0.64331555, -1.51103854, -1.26277697, -0.68024760],
# [ 2.59375715, -1.06665540, 0.96575671, -0.73330832],
# [-0.47064447, -0.23945692, -0.95150250, -1.07125998]]])
b_cond_2 = paddle.linalg.cond(b, p=2)
# b_cond_2.numpy() [3.30064451 2.51976252]
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [6.64228773, 3.89068866])
"""
......@@ -1160,14 +1187,18 @@ def dot(x, y, name=None):
.. code-block:: python
import paddle
import numpy as np
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
# 1-D Tensor * 1-D Tensor
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([4, 5, 6])
z = paddle.dot(x, y)
print(z) # [32]
# 2-D Tensor * 2-D Tensor
x = paddle.to_tensor([[1, 2, 3], [2, 4, 6]])
y = paddle.to_tensor([[4, 5, 6], [4, 5, 6]])
z = paddle.dot(x, y)
print(z)
print(z) # [[32], [64]]
"""
if in_dygraph_mode():
......@@ -1494,18 +1525,13 @@ def cholesky(x, upper=False, name=None):
.. code-block:: python
import paddle
import numpy as np
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
a = paddle.rand([3, 3], dtype="float32")
a_t = paddle.transpose(a, [1, 0])
x = paddle.matmul(a, a_t) + 1e-03
out = paddle.linalg.cholesky(x, upper=False)
print(out)
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]
"""
if in_dygraph_mode():
return _C_ops.cholesky(x, upper)
......@@ -2583,28 +2609,19 @@ def multi_dot(x, name=None):
.. code-block:: python
import paddle
import numpy as np
# A * B
A_data = np.random.random([3, 4]).astype(np.float32)
B_data = np.random.random([4, 5]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
A = paddle.rand([3, 4])
B = paddle.rand([4, 5])
out = paddle.linalg.multi_dot([A, B])
print(out.numpy().shape)
print(out.shape)
# [3, 5]
# A * B * C
A_data = np.random.random([10, 5]).astype(np.float32)
B_data = np.random.random([5, 8]).astype(np.float32)
C_data = np.random.random([8, 7]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
C = paddle.to_tensor(C_data)
A = paddle.rand([10, 5])
B = paddle.rand([5, 8])
C = paddle.rand([8, 7])
out = paddle.linalg.multi_dot([A, B, C])
print(out.numpy().shape)
print(out.shape)
# [10, 7]
"""
if _in_legacy_dygraph():
return _legacy_C_ops.multi_dot(x)
......@@ -3099,47 +3116,51 @@ def triangular_solve(
x, y, upper=True, transpose=False, unitriangular=False, name=None
):
r"""
Computes the solution of a system of equations with a triangular coefficient matrix `x` and
multiple right-hand sides `y` .
Computes the solution of a system of equations with a triangular coefficient matrix `x` and
multiple right-hand sides `y` .
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Args:
x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular
system of equations. Default: True.
transpose (bool, optional): whether `x` should be transposed before calculation. Default: False.
unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed
to be 1 and not referenced from `x` . Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
Args:
x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular
system of equations. Default: True.
transpose (bool, optional): whether `x` should be transposed before calculation. Default: False.
unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed
to be 1 and not referenced from `x` . Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
# a square system of linear equations:
# x1 + x2 + x3 = 0
# 2*x2 + x3 = -9
# -x3 = 5
# a square system of linear equations:
# x1 + x2 + x3 = 0
# 2*x2 + x3 = -9
# -x3 = 5
import paddle
import numpy as np
<<<<<<< HEAD
import paddle
import numpy as np
=======
import paddle
>>>>>>> 912be4f897 (fix numpy issue in codeblock examples for operators under python/paddle/tensor folder (#46765))
x = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
y = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.triangular_solve(x, y, upper=True)
x = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
y = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.triangular_solve(x, y, upper=True)
print(out)
# [7, -2, -5]
print(out)
# [7, -2, -5]
"""
if in_dygraph_mode():
return _C_ops.triangular_solve(x, y, upper, transpose, unitriangular)
......@@ -3246,14 +3267,13 @@ def eigvalsh(x, UPLO='L', name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
x = paddle.to_tensor([[1, -2j], [2j, 5]])
out_value = paddle.eigvalsh(x, UPLO='L')
print(out_value)
#[0.17157288, 5.82842712]
# Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [0.17157286, 5.82842731])
"""
if in_dygraph_mode():
values, _ = _C_ops.eigvalsh(x, UPLO, x.stop_gradient)
......
......@@ -152,14 +152,14 @@ def logical_or(x, y, out=None, name=None):
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([True, False], dtype=np.bool_).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool_).reshape(2, 2)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
res = paddle.logical_or(x, y)
print(res) # [[ True True] [ True False]]
print(res)
# Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
# [[True , True ],
# [True , False]])
"""
if in_dygraph_mode():
return _C_ops.logical_or(x, y)
......@@ -194,14 +194,14 @@ def logical_xor(x, y, out=None, name=None):
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([True, False], dtype=np.bool_).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool_).reshape([2, 2])
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
res = paddle.logical_xor(x, y)
print(res) # [[False, True], [ True, False]]
print(res)
# Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
# [[False, True ],
# [True , False]])
"""
if in_dygraph_mode():
return _C_ops.logical_xor(x, y)
......@@ -364,22 +364,20 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [False]
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True]
"""
......@@ -992,22 +990,18 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [True, False]
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True, False]
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [True, False]
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True, True]
"""
......
......@@ -1346,12 +1346,9 @@ def flip(x, axis, name=None):
.. code-block:: python
import paddle
import numpy as np
image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32')
img = paddle.to_tensor(x)
img = paddle.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
tmp = paddle.flip(img, [0,1])
print(tmp) # [[[10,11],[8, 9]], [[6, 7],[4, 5]], [[2, 3],[0, 1]]]
......@@ -2279,21 +2276,33 @@ def unique_consecutive(
x = paddle.to_tensor([1, 1, 2, 2, 3, 1, 1, 2])
output = paddle.unique_consecutive(x) #
np_output = output.numpy() # [1 2 3 1 2]
print(output)
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 1, 2])
_, inverse, counts = paddle.unique_consecutive(x, return_inverse=True, return_counts=True)
np_inverse = inverse.numpy() # [0 0 1 1 2 3 3 4]
np_counts = inverse.numpy() # [2 2 1 2 1]
print(inverse)
# Tensor(shape=[8], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 0, 1, 1, 2, 3, 3, 4])
print(counts)
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [2, 2, 1, 2, 1])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) #
np_output = output.numpy() # [2 1 3 0 1 2 1 3 2 1 3]
print(output)
# Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1],
# [2, 1, 3]])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) #
np_output = output.numpy()
# [[2 1 3]
# [3 0 1]
# [2 1 3]]
print(output)
# Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1],
# [2, 1, 3]])
"""
if axis is None:
......@@ -2414,18 +2423,27 @@ def unique(
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
np_indices = indices.numpy() # [3 0 1 4]
np_inverse = inverse.numpy() # [1 2 2 0 3 2]
np_counts = counts.numpy() # [1 1 3 1]
print(indices)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [3, 0, 1, 4])
print(inverse)
# Tensor(shape=[6], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 2, 0, 3, 2])
print(counts)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 1, 3, 1])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x)
np_unique = unique.numpy() # [0 1 2 3]
print(unique)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 1, 2, 3])
unique = paddle.unique(x, axis=0)
np_unique = unique.numpy()
# [[2 1 3]
# [3 0 1]]
print(unique)
# Tensor(shape=[2, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1]])
"""
if axis is None:
axis = []
......@@ -3035,12 +3053,10 @@ def scatter_nd(index, updates, shape, name=None):
.. code-block:: python
import paddle
import numpy as np
index_data = np.array([[1, 1],
[0, 1],
[1, 3]]).astype(np.int64)
index = paddle.to_tensor(index_data)
index = paddle.to_tensor([[1, 1],
[0, 1],
[1, 3]], dtype="int64")
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
......@@ -3064,15 +3080,12 @@ def chunk(x, chunks, axis=0, name=None):
Returns:
list(Tensor): The list of segmented Tensors.
Example:
Examples:
.. code-block:: python
import numpy as np
import paddle
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
x = paddle.rand([3, 9, 5])
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)
# out0.shape [3, 3, 5]
......@@ -3115,19 +3128,22 @@ def tile(x, repeat_times, name=None):
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy()
# [[1, 2, 3]
# [1, 2, 3]]
print(out)
# Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3],
# [1, 2, 3]])
out = paddle.tile(data, repeat_times=(2, 2))
np_out = out.numpy()
# [[1, 2, 3, 1, 2, 3]
# [1, 2, 3, 1, 2, 3]]
print(out)
# Tensor(shape=[2, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3, 1, 2, 3],
# [1, 2, 3, 1, 2, 3]])
repeat_times = paddle.to_tensor([1, 2], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy()
# [[1, 2, 3, 1, 2, 3]]
print(out)
# Tensor(shape=[1, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3, 1, 2, 3]])
"""
if in_dygraph_mode():
if isinstance(repeat_times, core.eager.Tensor):
......@@ -3227,8 +3243,10 @@ def expand_as(x, y, name=None):
data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
print(out)
# Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3],
# [1, 2, 3]])
"""
if in_dygraph_mode():
return _C_ops.expand_as(x, None, y.shape)
......@@ -4238,10 +4256,11 @@ def as_complex(x, name=None):
import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
print(y.numpy())
print(y)
# [[ 0. +1.j 2. +3.j 4. +5.j]
# [ 6. +7.j 8. +9.j 10.+11.j]]
# Tensor(shape=[2, 3], dtype=complex64, place=Place(gpu:0), stop_gradient=True,
# [[1j , (2+3j) , (4+5j) ],
# [(6+7j) , (8+9j) , (10+11j)]])
"""
if in_dygraph_mode():
return _C_ops.as_complex(x)
......@@ -4285,15 +4304,16 @@ def as_real(x, name=None):
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
z = paddle.as_real(y)
print(z.numpy())
print(z)
# [[[ 0. 1.]
# [ 2. 3.]
# [ 4. 5.]]
# Tensor(shape=[2, 3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[0. , 1. ],
# [2. , 3. ],
# [4. , 5. ]],
# [[ 6. 7.]
# [ 8. 9.]
# [10. 11.]]]
# [[6. , 7. ],
# [8. , 9. ],
# [10., 11.]]])
"""
if in_dygraph_mode():
return _C_ops.as_real(x)
......@@ -4752,10 +4772,11 @@ def index_add(x, index, axis, value, name=None):
index = paddle.to_tensor([0, 2], dtype="int32")
value = paddle.to_tensor([[1, 1, 1], [1, 1, 1]], dtype="float32")
outplace_res = paddle.index_add(input_tensor, index, 0, value)
print(outplace_res.numpy())
# [[2 2 2]
# [1 1 1]
# [2 2 2]]
print(outplace_res)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[2., 2., 2.],
# [1., 1., 1.],
# [2., 2., 2.]])
"""
_index_add_params_check(x, index, axis, value)
......@@ -4813,10 +4834,11 @@ def index_add_(x, index, axis, value, name=None):
index = paddle.to_tensor([0, 2], dtype="int32")
value = paddle.to_tensor([[1, 1], [1, 1], [1, 1]], dtype="float32")
inplace_res = paddle.index_add_(input_tensor, index, 1, value)
print(inplace_res.numpy())
# [[2, 1, 2]
# [2, 1, 2]
# [2, 1, 2]]
print(inplace_res)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[2., 1., 2.],
# [2., 1., 2.],
# [2., 1., 2.]])
"""
_index_add_params_check(x, index, axis, value)
......
......@@ -905,34 +905,37 @@ def maximum(x, y, name=None):
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.maximum(x, y)
print(res)
# [[3, 4],
# [7, 8]]
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 4],
# [7, 8]])
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.maximum(x, y)
print(res)
# [[3, 2, 4],
# [3, 2, 4]]
# Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 2, 4],
# [3, 2, 4]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
res = paddle.maximum(x, y)
print(res)
# [ 2., nan, nan]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [2. , nan, nan])
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float32')
x = paddle.to_tensor([5, 3, float("inf")], dtype='float32')
y = paddle.to_tensor([1, -float("inf"), 5], dtype='float32')
res = paddle.maximum(x, y)
print(res)
# [ 5., 3., inf.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [5. , 3. , inf.])
"""
op_type = 'elementwise_max'
axis = -1
......@@ -966,34 +969,37 @@ def minimum(x, y, name=None):
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.minimum(x, y)
print(res)
# [[1, 2],
# [5, 6]]
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 2],
# [5, 6]])
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.minimum(x, y)
print(res)
# [[[1, 0, 3],
# [1, 0, 3]]]
# Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[[1, 0, 3],
# [1, 0, 3]]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
res = paddle.minimum(x, y)
print(res)
# [ 1., nan, nan]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1. , nan, nan])
x = paddle.to_tensor([5, 3, np.inf], dtype='float64')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float64')
x = paddle.to_tensor([5, 3, float("inf")], dtype='float64')
y = paddle.to_tensor([1, -float("inf"), 5], dtype='float64')
res = paddle.minimum(x, y)
print(res)
# [ 1., -inf., 5.]
# Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
# [ 1. , -inf., 5. ])
"""
op_type = 'elementwise_min'
axis = -1
......@@ -1029,34 +1035,37 @@ def fmax(x, y, name=None):
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.fmax(x, y)
print(res)
# [[3, 4],
# [7, 8]]
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 4],
# [7, 8]])
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.fmax(x, y)
print(res)
# [[3, 2, 4],
# [3, 2, 4]]
# Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 2, 4],
# [3, 2, 4]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
res = paddle.fmax(x, y)
print(res)
# [ 2., 3., 5.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [2., 3., 5.])
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float32')
x = paddle.to_tensor([5, 3, float("inf")], dtype='float32')
y = paddle.to_tensor([1, -float("inf"), 5], dtype='float32')
res = paddle.fmax(x, y)
print(res)
# [ 5., 3., inf.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [5. , 3. , inf.])
"""
op_type = 'elementwise_fmax'
axis = -1
......@@ -1092,34 +1101,37 @@ def fmin(x, y, name=None):
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.fmin(x, y)
print(res)
# [[1, 2],
# [5, 6]]
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 2],
# [5, 6]])
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.fmin(x, y)
print(res)
# [[[1, 0, 3],
# [1, 0, 3]]]
# Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[[1, 0, 3],
# [1, 0, 3]]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32')
res = paddle.fmin(x, y)
print(res)
# [ 1., 3., 5.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1., 3., 5.])
x = paddle.to_tensor([5, 3, np.inf], dtype='float64')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float64')
x = paddle.to_tensor([5, 3, float("inf")], dtype='float64')
y = paddle.to_tensor([1, -float("inf"), 5], dtype='float64')
res = paddle.fmin(x, y)
print(res)
# [ 1., -inf., 5.]
# Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
# [ 1. , -inf., 5. ])
"""
op_type = 'elementwise_fmin'
axis = -1
......@@ -1290,15 +1302,13 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
.. code-block:: python
import paddle
import numpy as np
# x is a Tensor with following elements:
# [[nan, 0.3, 0.5, 0.9]
# [0.1, 0.2, -nan, 0.7]]
# Each example is followed by the corresponding output tensor.
x = np.array([[float('nan'), 0.3, 0.5, 0.9],
[0.1, 0.2, float('-nan'), 0.7]]).astype(np.float32)
x = paddle.to_tensor(x)
x = paddle.to_tensor([[float('nan'), 0.3, 0.5, 0.9],
[0.1, 0.2, float('-nan'), 0.7]],dtype="float32")
out1 = paddle.nansum(x) # [2.7]
out2 = paddle.nansum(x, axis=0) # [0.1, 0.5, 0.5, 1.6]
out3 = paddle.nansum(x, axis=-1) # [1.7, 1.0]
......@@ -1308,9 +1318,8 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
# [[[1, nan], [3, 4]],
# [[5, 6], [-nan, 8]]]
# Each example is followed by the corresponding output tensor.
y = np.array([[[1, float('nan')], [3, 4]],
y = paddle.to_tensor([[[1, float('nan')], [3, 4]],
[[5, 6], [float('-nan'), 8]]])
y = paddle.to_tensor(y)
out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19]
out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18]
"""
......@@ -4205,8 +4214,8 @@ def rad2deg(x, name=None):
.. code-block:: python
import paddle
import numpy as np
import math
x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])
result1 = paddle.rad2deg(x1)
print(result1)
......@@ -4214,7 +4223,7 @@ def rad2deg(x, name=None):
# [180.02334595, -180.02334595, 359.98937988, -359.98937988,
# 9.95437622 , -89.95437622])
x2 = paddle.to_tensor(np.pi/2)
x2 = paddle.to_tensor(math.pi/2)
result2 = paddle.rad2deg(x2)
print(result2)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
......@@ -4663,18 +4672,20 @@ def angle(x, name=None):
x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
z = x + 1j * y
print(z.numpy())
# [[-2.-2.j -2.-1.j -2.+0.j -2.+1.j]
# [-1.-2.j -1.-1.j -1.+0.j -1.+1.j]
# [ 0.-2.j 0.-1.j 0.+0.j 0.+1.j]
# [ 1.-2.j 1.-1.j 1.+0.j 1.+1.j]]
print(z)
# Tensor(shape=[4, 4], dtype=complex64, place=Place(cpu), stop_gradient=True,
# [[(-2-2j), (-2-1j), (-2+0j), (-2+1j)],
# [(-1-2j), (-1-1j), (-1+0j), (-1+1j)],
# [-2j , -1j , 0j , 1j ],
# [ (1-2j), (1-1j), (1+0j), (1+1j)]])
theta = paddle.angle(z)
print(theta.numpy())
# [[-2.3561945 -2.6779451 3.1415927 2.6779451]
# [-2.0344439 -2.3561945 3.1415927 2.3561945]
# [-1.5707964 -1.5707964 0. 1.5707964]
# [-1.1071488 -0.7853982 0. 0.7853982]]
print(theta)
# Tensor(shape=[4, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[-2.35619450, -2.67794514, 3.14159274, 2.67794514],
# [-2.03444386, -2.35619450, 3.14159274, 2.35619450],
# [-1.57079637, -1.57079637, 0. , 1.57079637],
# [-1.10714877, -0.78539819, 0. , 0.78539819]])
"""
if in_dygraph_mode():
......@@ -4755,19 +4766,14 @@ def frac(x, name=None):
.. code-block:: python
import paddle
import numpy as np
input = paddle.rand([3, 3], 'float32')
print(input.numpy())
# [[ 1.2203873 -1.0035421 -0.35193074]
# [-0.00928353 0.58917075 -0.8407828 ]
# [-1.5131804 0.5850153 -0.17597814]]
input = paddle.to_tensor([[12.22000003, -1.02999997],
[-0.54999995, 0.66000003]])
output = paddle.frac(input)
print(output.numpy())
# [[ 0.22038734 -0.00354207 -0.35193074]
# [-0.00928353 0.58917075 -0.8407828 ]
# [-0.5131804 0.5850153 -0.17597814]]
print(output)
# Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[ 0.22000003, -0.02999997],
# [-0.54999995, 0.66000003]])
"""
op_type = 'elementwise_sub'
axis = -1
......
......@@ -628,32 +628,35 @@ def quantile(x, q, axis=None, keepdim=False):
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.arange(0, 8, dtype=np.float32).reshape(4, 2)
# [[0 1]
# [2 3]
# [4 5]
# [6 7]]
y = paddle.to_tensor(x)
y = paddle.arange(0, 8 ,dtype="float32").reshape([4, 2])
# Tensor(shape=[4, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0., 1.],
# [2., 3.],
# [4., 5.],
# [6., 7.]])
y1 = paddle.quantile(y, q=0.5, axis=[0, 1])
# 3.5
# Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
# 3.50000000)
y2 = paddle.quantile(y, q=0.5, axis=1)
# [0.5 2.5 4.5 6.5]
# Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=True,
# [0.50000000, 2.50000000, 4.50000000, 6.50000000])
y3 = paddle.quantile(y, q=[0.3, 0.5], axis=0)
# [[1.8 2.8]
# [3. 4. ]]
# Tensor(shape=[2, 2], dtype=float64, place=Place(cpu), stop_gradient=True,
# [[1.80000000, 2.80000000],
# [3. , 4. ]])
x[0][0] = np.nan
y = paddle.to_tensor(x)
y[0,0] = float("nan")
y4 = paddle.quantile(y, q=0.8, axis=1, keepdim=True)
# [[nan]
# [2.8]
# [4.8]
# [6.8]]
# Tensor(shape=[4, 1], dtype=float64, place=Place(cpu), stop_gradient=True,
# [[nan ],
# [2.80000000],
# [4.80000000],
# [6.80000000]])
"""
return _compute_quantile(x, q, axis=axis, keepdim=keepdim, ignore_nan=False)
......@@ -688,35 +691,37 @@ def nanquantile(x, q, axis=None, keepdim=False):
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.array(
x = paddle.to_tensor(
[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
dtype=np.float32
)
x[0][0] = np.nan
[5, 6, 7, 8, 9]],
dtype="float32")
x[0,0] = float("nan")
x = paddle.to_tensor(x)
y1 = paddle.nanquantile(x, q=0.5, axis=[0, 1])
# 5.0
# Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
# 5.)
y2 = paddle.nanquantile(x, q=0.5, axis=1)
# [2.5 7. ]
# Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True,
# [2.50000000, 7. ])
y3 = paddle.nanquantile(x, q=[0.3, 0.5], axis=0)
# [[5. 2.5 3.5 4.5 5.5]
# [5. 3.5 4.5 5.5 6.5]
# Tensor(shape=[2, 5], dtype=float64, place=Place(cpu), stop_gradient=True,
# [[5. , 2.50000000, 3.50000000, 4.50000000, 5.50000000],
# [5. , 3.50000000, 4.50000000, 5.50000000, 6.50000000]])
y4 = paddle.nanquantile(x, q=0.8, axis=1, keepdim=True)
# [[3.4]
# [8.2]]
# Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=True,
# [[3.40000000],
# [8.20000000]])
nan = paddle.full(shape=[2, 3], fill_value=np.nan)
nan = paddle.full(shape=[2, 3], fill_value=float("nan"))
y5 = paddle.nanquantile(nan, q=0.8, axis=1, keepdim=True)
# [[nan]
# [nan]]
# Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=True,
# [[nan],
# [nan]])
"""
return _compute_quantile(x, q, axis=axis, keepdim=keepdim, ignore_nan=True)
......@@ -189,15 +189,11 @@ def yolo_loss(
.. code-block:: python
import paddle
import numpy as np
x = np.random.random([2, 14, 8, 8]).astype('float32')
gt_box = np.random.random([2, 10, 4]).astype('float32')
gt_label = np.random.random([2, 10]).astype('int32')
x = paddle.rand([2, 14, 8, 8]).astype('float32')
gt_box = paddle.rand([2, 10, 4]).astype('float32')
gt_label = paddle.rand([2, 10]).astype('int32')
x = paddle.to_tensor(x)
gt_box = paddle.to_tensor(gt_box)
gt_label = paddle.to_tensor(gt_label)
loss = paddle.vision.ops.yolo_loss(x,
gt_box=gt_box,
......@@ -412,13 +408,9 @@ def yolo_box(
.. code-block:: python
import paddle
import numpy as np
x = np.random.random([2, 14, 8, 8]).astype('float32')
img_size = np.ones((2, 2)).astype('int32')
x = paddle.to_tensor(x)
img_size = paddle.to_tensor(img_size)
x = paddle.rand([2, 14, 8, 8]).astype('float32')
img_size = paddle.ones((2, 2)).astype('int32')
boxes, scores = paddle.vision.ops.yolo_box(x,
img_size=img_size,
......@@ -2139,33 +2131,36 @@ def nms(
.. code-block:: python
import paddle
import numpy as np
boxes = np.random.rand(4, 4).astype('float32')
boxes = paddle.rand([4, 4]).astype('float32')
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
# [[0.06287421 0.5809351 0.3443958 0.8713329 ]
# [0.0749094 0.9713205 0.99241287 1.2799143 ]
# [0.46246734 0.6753201 1.346266 1.3821303 ]
# [0.8984796 0.5619834 1.1254641 1.0201943 ]]
print(boxes)
# Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.64811575, 0.89756244, 0.86473107, 1.48552322],
# [0.48085716, 0.84799081, 0.54517937, 0.86396021],
# [0.62646860, 0.72901905, 1.17392159, 1.69691563],
# [0.89729202, 0.46281594, 1.88733089, 0.98588502]])
out = paddle.vision.ops.nms(paddle.to_tensor(boxes), 0.1)
# [0, 1, 3, 0]
out = paddle.vision.ops.nms(boxes, 0.1)
print(out)
# Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 1, 3])
scores = np.random.rand(4).astype('float32')
# [0.98015213 0.3156527 0.8199343 0.874901 ]
scores = paddle.to_tensor([0.6, 0.7, 0.4, 0.233])
categories = [0, 1, 2, 3]
category_idxs = np.random.choice(categories, 4)
# [2 0 0 3]
out = paddle.vision.ops.nms(paddle.to_tensor(boxes),
0.1,
paddle.to_tensor(scores),
paddle.to_tensor(category_idxs),
categories,
4)
# [0, 3, 2]
category_idxs = paddle.to_tensor([2, 0, 0, 3], dtype="int64")
out = paddle.vision.ops.nms(boxes,
0.1,
paddle.to_tensor(scores),
paddle.to_tensor(category_idxs),
categories,
4)
print(out)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 0, 2, 3])
"""
def _nms(boxes, iou_threshold):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册