未验证 提交 a2761308 编写于 作者: K Kevin吴嘉文 提交者: GitHub

Remove reduntant numpy input in Example code (test=document_fix) (#47555)

* Remove reduntant numpy input in Example code

* Remove reduntant numpy input in Example code, test=document_fix
上级 954be40d
......@@ -158,8 +158,7 @@ class LookAhead(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1,10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -272,8 +271,8 @@ class LookAhead(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......
......@@ -341,8 +341,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -377,8 +376,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -424,8 +422,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......@@ -499,8 +496,7 @@ class ModelAverage(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
......
......@@ -1286,10 +1286,12 @@ def softshrink(x, threshold=0.5, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
out = F.softshrink(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.39999998, 0. , 0. , 0.30000001])
"""
if threshold < 0:
raise ValueError(
......@@ -1337,10 +1339,12 @@ def softsign(x, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
"""
if in_dygraph_mode():
return _C_ops.softsign(x)
......@@ -1376,10 +1380,12 @@ def swish(x, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.]))
out = F.swish(x) # [-0.238406, 0., 0.731059]
x = paddle.to_tensor([-2., 0., 1.])
out = F.swish(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.23840584, 0. , 0.73105854])
"""
if in_dygraph_mode():
return _C_ops.swish(x, 1.0)
......@@ -1456,10 +1462,12 @@ def tanhshrink(x, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
"""
if in_dygraph_mode():
return _C_ops.tanh_shrink(x)
......@@ -1504,10 +1512,12 @@ def thresholded_relu(x, threshold=1.0, name=None):
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.]))
out = F.thresholded_relu(x) # [2., 0., 0.]
x = paddle.to_tensor([2., 0., 1.])
out = F.thresholded_relu(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2., 0., 0.])
"""
if in_dygraph_mode():
......
......@@ -1963,18 +1963,16 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([[[0, 1, 0],
[ 1, 0, 1]]]).astype("float32")
print(x_data.shape)
paddle.disable_static()
x = paddle.to_tensor(x_data, stop_gradient=False)
x = paddle.to_tensor([[[0, 1, 0],
[ 1, 0, 1]]], dtype="float32", stop_gradient=False)
output = paddle.nn.functional.label_smooth(x)
print(output)
#[[[0.03333334 0.93333334 0.03333334]
# [0.93333334 0.03333334 0.93333334]]]
# Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[0.03333334, 0.93333334, 0.03333334],
# [0.93333334, 0.03333334, 0.93333334]]])
"""
if epsilon > 1.0 or epsilon < 0.0:
raise ValueError("The value of epsilon must be between 0 and 1.")
......
......@@ -368,26 +368,22 @@ def conv1d(
import paddle
import paddle.nn.functional as F
import numpy as np
x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32)
w=np.array(
[[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]]).astype(np.float32)
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(w)
y_var = F.conv1d(x_var, w_var)
y_np = y_var.numpy()
print(y_np)
# [[[133. 238.]
# [160. 211.]]]
x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]], dtype="float32")
w = paddle.to_tensor([[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]], dtype="float32")
y = F.conv1d(x, w)
print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[133., 238.],
# [160., 211.]]])
"""
cudnn_version = get_cudnn_version()
if cudnn_version is not None:
......@@ -905,24 +901,20 @@ def conv1d_transpose(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
# shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7],
[8, 0, 9, 2,]]]).astype(np.float32)
x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2,]]], dtype="float32")
# shape: (2, 1, 2)
w=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32)
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(w)
y_var = F.conv1d_transpose(x_var, w_var)
print(y_var)
# [[[60. 16. 99. 75. 4.]]]
w = paddle.to_tensor([[[7, 0]],
[[4, 2]]], dtype="float32")
y = F.conv1d_transpose(x, w)
print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[60., 16., 99., 75., 4. ]]])
"""
cudnn_version = get_cudnn_version()
if cudnn_version is not None:
......
......@@ -251,16 +251,14 @@ def fluid_softmax_with_cross_entropy(
.. code-block:: python
import paddle
import numpy as np
data = np.random.rand(128).astype("float32")
label = np.random.rand(1).astype("int64")
data = paddle.to_tensor(data)
label = paddle.to_tensor(label)
linear = paddle.nn.Linear(128, 100)
x = linear(data)
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
logits = paddle.to_tensor([0.4, 0.6, 0.9])
label = paddle.randint(high=2, shape=[1], dtype="int64")
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
print(out)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.15328646])
"""
if _non_static_mode():
if core.is_compiled_with_npu():
......@@ -1772,7 +1770,6 @@ def ctc_loss(
# declarative mode
import paddle.nn.functional as F
import numpy as np
import paddle
# length of the longest logit sequence
......@@ -1784,8 +1781,7 @@ def ctc_loss(
# class num
class_num = 3
np.random.seed(1)
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
......@@ -1798,30 +1794,30 @@ def ctc_loss(
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
labels = np.array([[1, 2, 2],
[1, 2, 2]]).astype("int32")
input_lengths = np.array([5, 5]).astype("int64")
label_lengths = np.array([3, 3]).astype("int64")
log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths)
label_lengths = paddle.to_tensor(label_lengths)
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]],
dtype="float32")
labels = paddle.to_tensor([[1, 2, 2],
[1, 2, 2]], dtype="int32")
input_lengths = paddle.to_tensor([5, 5], dtype="int64")
label_lengths = paddle.to_tensor([3, 3], dtype="int64")
loss = F.ctc_loss(log_probs, labels,
input_lengths,
label_lengths,
blank=0,
reduction='none')
print(loss) #[3.9179852 2.9076521]
print(loss)
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.91798496, 2.90765190])
loss = F.ctc_loss(log_probs, labels,
input_lengths,
label_lengths,
blank=0,
reduction='mean')
print(loss) #[1.1376063]
print(loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.13760614])
"""
......@@ -2257,16 +2253,14 @@ def softmax_with_cross_entropy(
.. code-block:: python
import paddle
import numpy as np
data = np.random.rand(128).astype("float32")
label = np.random.rand(1).astype("int64")
data = paddle.to_tensor(data)
label = paddle.to_tensor(label)
linear = paddle.nn.Linear(128, 100)
x = linear(data)
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
logits = paddle.to_tensor([0.4, 0.6, 0.9], dtype="float32")
label = paddle.to_tensor([1], dtype="int64")
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
print(out)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.15328646])
"""
return fluid_softmax_with_cross_entropy(
logits,
......@@ -4003,18 +3997,26 @@ def soft_margin_loss(input, label, reduction='mean', name=None):
.. code-block:: python
import paddle
import numpy as np
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
output = paddle.nn.functional.soft_margin_loss(input, label)
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.64022040])
input = paddle.uniform(shape=(5, 5), dtype="float32", min=0.1, max=0.8)
label = paddle.randint(0, 2, shape=(5, 5), dtype="int64")
label[label==0]=-1
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
label_np[label_np==0]=-1
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none')
print(output)
# Tensor(shape=[5, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1.09917796, 0.52613139, 0.56263304, 0.82736146, 0.38776723],
# [1.07179427, 1.11924267, 0.49877715, 1.10026348, 0.46184641],
# [0.84367639, 0.74795729, 0.44629076, 0.55123353, 0.77659678],
# [0.39465919, 0.76651484, 0.54485321, 0.76609844, 0.77166790],
# [0.51283568, 0.84757161, 0.78913331, 1.05268764, 0.45318675]])
"""
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
......
......@@ -54,27 +54,28 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = np.arange(6, dtype=np.float32).reshape(2,3)
x = paddle.to_tensor(x)
x = paddle.arange(6, dtype="float32").reshape([2,3])
y = F.normalize(x)
print(y.numpy())
# [[0. 0.4472136 0.8944272 ]
# [0.42426404 0.5656854 0.7071067 ]]
print(y)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0. , 0.44721359, 0.89442718],
# [0.42426404, 0.56568539, 0.70710671]])
y = F.normalize(x, p=1.5)
print(y.numpy())
# [[0. 0.40862012 0.81724024]
# [0.35684016 0.4757869 0.5947336 ]]
print(y)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0. , 0.40862012, 0.81724024],
# [0.35684016, 0.47578689, 0.59473360]])
y = F.normalize(x, axis=0)
print(y.numpy())
# [[0. 0.24253564 0.37139067]
# [1. 0.97014254 0.9284767 ]]
print(y)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0. , 0.24253564, 0.37139067],
# [1. , 0.97014254, 0.92847669]])
"""
if in_dygraph_mode():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
......@@ -161,22 +162,31 @@ def batch_norm(
Examples:
.. code-block:: python
import paddle
import numpy as np
x = np.random.seed(123)
x = np.random.random(size=(2, 1, 2, 3)).astype('float32')
running_mean = np.random.random(size=1).astype('float32')
running_variance = np.random.random(size=1).astype('float32')
weight_data = np.random.random(size=1).astype('float32')
bias_data = np.random.random(size=1).astype('float32')
x = paddle.to_tensor(x)
rm = paddle.to_tensor(running_mean)
rv = paddle.to_tensor(running_variance)
w = paddle.to_tensor(weight_data)
b = paddle.to_tensor(bias_data)
batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b)
print(batch_norm_out)
import paddle
x = paddle.arange(12, dtype="float32").reshape([2, 1, 2, 3])
print(x)
# Tensor(shape=[2, 1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]]],
# [[[6. , 7. , 8. ],
# [9. , 10., 11.]]]])
running_mean = paddle.to_tensor([0], dtype="float32")
running_variance = paddle.to_tensor([1], dtype="float32")
weight = paddle.to_tensor([2], dtype="float32")
bias = paddle.to_tensor([1], dtype="float32")
batch_norm_out = paddle.nn.functional.batch_norm(x, running_mean,
running_variance, weight, bias)
print(batch_norm_out)
# Tensor(shape=[2, 1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[1. , 2.99998999 , 4.99997997 ],
# [6.99996948 , 8.99995995 , 10.99994946]]],
# [[[12.99993896, 14.99992943, 16.99991989],
# [18.99990845, 20.99989891, 22.99988937]]]])
"""
assert len(x.shape) >= 2, "input dim must be larger than 1"
......
......@@ -1780,10 +1780,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
x = paddle.rand([2, 3, 32, 32])
# x.shape is [2, 3, 32, 32]
out = paddle.nn.functional.adaptive_avg_pool2d(
x = x,
......
......@@ -91,56 +91,48 @@ def sparse_attention(
# required: skiptest
import paddle
import numpy as np
query_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
key_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
value_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
sparse_csr_offset_data = np.array([[[0, 2,
4, 6, 8]]]).astype("int32")
sparse_csr_columns_data = np.array([[[0, 1,
0, 1, 2, 3, 2, 3]]]).astype("int32")
key_padding_mask_data = np.array([[1,1,1,0]]).astype("float32")
attention_mask_data = np.array([[1,0,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1]]).astype("float32")
print(query_data.shape)
# (1, 1, 4, 2)
print(sparse_csr_offset_data.shape)
# (1, 1, 5)
print(sparse_csr_columns_data.shape)
# (1, 1, 8)
paddle.disable_static()
query = paddle.to_tensor(query_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
key = paddle.to_tensor(key_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
value = paddle.to_tensor(value_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
offset = paddle.to_tensor(sparse_csr_offset_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
columns = paddle.to_tensor(sparse_csr_columns_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
key_padding_mask = paddle.to_tensor(key_padding_mask_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
attention_mask = paddle.to_tensor(attention_mask_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
# `query`, `key` and `value` all have shape [1, 1, 4, 2]
query = paddle.to_tensor([[[[0, 1, ], [2, 3],
[0, 1], [2, 3]]]], dtype="float32")
key = paddle.to_tensor([[[[0, 1], [2, 3],
[0, 1], [2, 3]]]], dtype="float32")
value = paddle.to_tensor([[[[0, 1], [2, 3],
[0, 1], [2, 3]]]], dtype="float32")
offset = paddle.to_tensor([[[0, 2, 4, 6, 8]]], dtype="int32")
columns = paddle.to_tensor([[[0, 1, 0, 1, 2, 3, 2, 3]]], dtype="int32")
print(offset.shape) # (1, 1, 5)
print(columns.shape) # (1, 1, 8)
key_padding_mask = paddle.to_tensor([[1, 1, 1, 0]], dtype="float32")
attention_mask = paddle.to_tensor([[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype="float32")
output_mask = paddle.nn.functional.sparse_attention(query, key,
value, offset, columns,
key_padding_mask=key_padding_mask, attn_mask=attention_mask)
value, offset, columns,
key_padding_mask=key_padding_mask,
attn_mask=attention_mask)
print(output_mask)
# [[[[0. , 1. ],
# [1.99830270, 2.99830270],
# [0. , 1. ],
# [0. , 1. ]]]]
# Tensor(shape=[1, 1, 4, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[0. , 1. ],
# [1.99830270, 2.99830270],
# [0. , 1. ],
# [0. , 1. ]]]])
output = paddle.nn.functional.sparse_attention(query, key,
value, offset, columns)
value, offset, columns)
print(output)
# [[[[1.60885942, 2.60885954],
# [1.99830270, 2.99830270],
# [1.60885942, 2.60885954],
# [1.99830270, 2.99830270]]]]
# Tensor(shape=[1, 1, 4, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[1.60885942, 2.60885954],
# [1.99830270, 2.99830270],
# [1.60885942, 2.60885954],
# [1.99830270, 2.99830270]]]])
"""
if in_dynamic_mode():
(
......
......@@ -282,13 +282,13 @@ class Tanh(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Tanh()
out = m(x)
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.37994894, -0.19737533, 0.09966800, 0.29131261])
"""
def __init__(self, name=None):
......@@ -879,11 +879,13 @@ class Softshrink(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
m = paddle.nn.Softshrink()
out = m(x) # [-0.4, 0, 0, 0.3]
out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.39999998, 0. , 0. , 0.30000001])
"""
def __init__(self, threshold=0.5, name=None):
......@@ -919,11 +921,13 @@ class Softsign(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Softsign()
out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
"""
def __init__(self, name=None):
......@@ -958,11 +962,13 @@ class Swish(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.]))
x = paddle.to_tensor([-2., 0., 1.])
m = paddle.nn.Swish()
out = m(x) # [-0.238406, 0., 0.731059]
out = m(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.23840584, 0. , 0.73105854])
"""
def __init__(self, name=None):
......@@ -1042,11 +1048,13 @@ class Tanhshrink(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Tanhshrink()
out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
"""
def __init__(self, name=None):
......@@ -1089,11 +1097,13 @@ class ThresholdedReLU(Layer):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.]))
x = paddle.to_tensor([2., 0., 1.])
m = paddle.nn.ThresholdedReLU()
out = m(x) # [2., 0., 0.]
out = m(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2., 0., 0.])
"""
def __init__(self, threshold=1.0, name=None):
......
......@@ -364,16 +364,13 @@ class Upsample(Layer):
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
input_data = np.random.rand(2,3,6,10).astype("float32")
upsample_out = paddle.nn.Upsample(size=[12,12])
input = paddle.rand([2,3,6,10], dtype="float32")
upsample_out = paddle.nn.Upsample(size=[12,12])
input = paddle.to_tensor(input_data)
output = upsample_out(x=input)
print(output.shape)
# [2L, 3L, 12L, 12L]
# [2, 3, 12, 12]
"""
......@@ -640,14 +637,12 @@ class Bilinear(Layer):
.. code-block:: python
import paddle
import numpy
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
layer1 = paddle.rand((5, 5)).astype('float32')
layer2 = paddle.rand((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear(
in1_features=5, in2_features=4, out_features=1000)
result = bilinear(paddle.to_tensor(layer1),
paddle.to_tensor(layer2)) # result shape [5, 1000]
result = bilinear(layer1,layer2) # result shape [5, 1000]
"""
......@@ -739,17 +734,22 @@ class Dropout(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.array([[1,2,3], [4,5,6]]).astype('float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[1,2,3], [4,5,6]], dtype="float32")
m = paddle.nn.Dropout(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[2., 0., 6.],
# [0., 0., 0.]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
print(y_test)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
"""
def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
......@@ -804,17 +804,35 @@ class Dropout2D(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
x = paddle.to_tensor(x)
x = paddle.rand([2, 2, 1, 3], dtype="float32")
print(x)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0.10052059, 0.93890846, 0.45351565]],
# [[0.47507706, 0.45021373, 0.11331241]]],
# [[[0.53358698, 0.97375143, 0.34997326]],
# [[0.24758087, 0.52628899, 0.17970420]]]])
m = paddle.nn.Dropout2D(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0. , 0. , 0. ]],
# [[0.95015413, 0.90042746, 0.22662482]]],
# [[[1.06717396, 1.94750285, 0.69994652]],
# [[0. , 0. , 0. ]]]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
print(y_test)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0.10052059, 0.93890846, 0.45351565]],
# [[0.47507706, 0.45021373, 0.11331241]]],
# [[[0.53358698, 0.97375143, 0.34997326]],
# [[0.24758087, 0.52628899, 0.17970420]]]])
"""
def __init__(self, p=0.5, data_format='NCHW', name=None):
......@@ -867,17 +885,47 @@ class Dropout3D(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
x = paddle.to_tensor(x)
x = paddle.arange(24, dtype="float32").reshape((1, 2, 2, 2, 3))
print(x)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]],
# [[6. , 7. , 8. ],
# [9. , 10., 11.]]],
# [[[12., 13., 14.],
# [15., 16., 17.]],
# [[18., 19., 20.],
# [21., 22., 23.]]]]])
m = paddle.nn.Dropout3D(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 2. , 4. ],
# [6. , 8. , 10.]],
# [[12., 14., 16.],
# [18., 20., 22.]]],
# [[[0. , 0. , 0. ],
# [0. , 0. , 0. ]],
# [[0. , 0. , 0. ],
# [0. , 0. , 0. ]]]]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
print(y_test)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]],
# [[6. , 7. , 8. ],
# [9. , 10., 11.]]],
# [[[12., 13., 14.],
# [15., 16., 17.]],
# [[18., 19., 20.],
# [21., 22., 23.]]]]])
"""
def __init__(self, p=0.5, data_format='NCDHW', name=None):
......@@ -928,18 +976,21 @@ class AlphaDropout(Layer):
.. code-block:: python
import paddle
import numpy as np
x = np.array([[-1, 1], [-1, 1]]).astype('float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[-1, 1], [-1, 1]], dtype="float32")
m = paddle.nn.AlphaDropout(p=0.5)
y_train = m(x)
print(y_train)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-0.77919382, 1.66559887],
# [-0.77919382, -0.77919382]])
m.eval() # switch the model to test phase
y_test = m(x)
print(x)
print(y_train)
# [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
print(y_test)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-1., 1.],
# [-1., 1.]])
"""
def __init__(self, p=0.5, name=None):
......@@ -1278,18 +1329,17 @@ class CosineSimilarity(Layer):
import paddle
import paddle.nn as nn
import numpy as np
np.random.seed(0)
x1 = np.random.rand(2,3)
x2 = np.random.rand(2,3)
x1 = paddle.to_tensor(x1)
x2 = paddle.to_tensor(x2)
x1 = paddle.to_tensor([[1., 2., 3.],
[2., 3., 4.]], dtype="float32")
x2 = paddle.to_tensor([[8., 3., 3.],
[2., 3., 4.]], dtype="float32")
cos_sim_func = nn.CosineSimilarity(axis=0)
result = cos_sim_func(x1, x2)
print(result)
# [0.99806249 0.9817672 0.94987036]
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.65079135, 0.98058069, 1. ])
"""
def __init__(self, axis=1, eps=1e-8):
......@@ -1376,30 +1426,33 @@ class Embedding(Layer):
.. code-block:: python
import paddle
import numpy as np
x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
x = paddle.to_tensor(x_data, stop_gradient=False)
y = paddle.to_tensor(y_data, stop_gradient=False)
embedding = paddle.nn.Embedding(10, 3, sparse=True)
x = paddle.to_tensor([[0], [1], [3]], dtype="int64", stop_gradient=False)
embedding = paddle.nn.Embedding(4, 3, sparse=True)
w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32)
w0 = paddle.to_tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]], dtype="float32")
embedding.weight.set_value(w0)
print(embedding.weight)
# Tensor(shape=[4, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[0., 0., 0.],
# [1., 1., 1.],
# [2., 2., 2.],
# [3., 3., 3.]])
adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01)
adam.clear_grad()
# weight.shape = [10, 3]
# x.data = [[3],[4],[5]]
# x.shape = [3, 1]
out = embedding(x)
print(out)
# Tensor(shape=[3, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[0., 0., 0.]],
# [[1., 1., 1.]],
# [[3., 3., 3.]]])
# out.data = [[2,2,2], [2,2,2], [2,2,2]]
# out.shape = [3, 1, 3]
out=embedding(x)
out.backward()
adam.step()
......
......@@ -309,26 +309,26 @@ class Conv1D(_ConvNd):
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1D
import numpy as np
x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32)
w=np.array(
[[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y_t = conv(x_t)
print(y_t)
# [[[133. 238.]
# [160. 211.]]]
import paddle
from paddle.nn import Conv1D
x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]], dtype="float32")
w = paddle.to_tensor([[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]], dtype="float32")
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y = conv(x)
print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[133., 238.],
# [160., 211.]]])
"""
def __init__(
......@@ -495,23 +495,22 @@ class Conv1DTranspose(_ConvNd):
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1DTranspose
import numpy as np
# shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7],
[8, 0, 9, 2]]]).astype(np.float32)
# shape: (2, 1, 2)
y=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(y)
y_t = conv(x_t)
print(y_t)
# [[[60. 16. 99. 75. 4.]]]
import paddle
from paddle.nn import Conv1DTranspose
# shape: (1, 2, 4)
x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2]]], dtype="float32")
# shape: (2, 1, 2)
w = paddle.to_tensor([[[7, 0]],
[[4, 2]]], dtype="float32")
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(w)
y = conv(x)
print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[60., 16., 99., 75., 4. ]]])
"""
def __init__(
......
......@@ -765,16 +765,15 @@ class BCELoss(Layer):
Examples:
.. code-block:: python
import numpy as np
import paddle
input_data = np.array([0.5, 0.6, 0.7]).astype("float32")
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.to_tensor([0.5, 0.6, 0.7])
label = paddle.to_tensor([1.0, 0.0, 1.0])
bce_loss = paddle.nn.BCELoss()
output = bce_loss(input, label)
print(output) # [0.65537095]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.65537101])
"""
......@@ -1078,7 +1077,6 @@ class CTCLoss(Layer):
.. code-block:: python
# declarative mode
import numpy as np
import paddle
# length of the longest logit sequence
......@@ -1090,8 +1088,7 @@ class CTCLoss(Layer):
# class num
class_num = 3
np.random.seed(1)
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
......@@ -1104,26 +1101,25 @@ class CTCLoss(Layer):
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
labels = np.array([[1, 2, 2],
[1, 2, 2]]).astype("int32")
input_lengths = np.array([5, 5]).astype("int64")
label_lengths = np.array([3, 3]).astype("int64")
log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths)
label_lengths = paddle.to_tensor(label_lengths)
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]], dtype="float32")
labels = paddle.to_tensor([[1, 2, 2],
[1, 2, 2]], dtype="int32")
input_lengths = paddle.to_tensor([5, 5], dtype="int64")
label_lengths = paddle.to_tensor([3, 3], dtype="int64")
loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels,
input_lengths,
label_lengths)
print(loss) #[3.9179852 2.9076521]
print(loss)
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.91798496, 2.90765190])
loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels,
input_lengths,
label_lengths)
print(loss) #[1.1376063]
print(loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.13760614])
"""
def __init__(self, blank=0, reduction='mean'):
......@@ -1858,20 +1854,29 @@ class SoftMarginLoss(Layer):
.. code-block:: python
import paddle
import numpy as np
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
soft_margin_loss = paddle.nn.SoftMarginLoss()
output = soft_margin_loss(input, label)
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.64022040])
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
input_np = paddle.uniform(shape=(5, 5), min=0.1, max=0.8, dtype="float64")
label_np = paddle.randint(high=2, shape=(5, 5), dtype="int64")
label_np[label_np==0]=-1
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
soft_margin_loss = paddle.nn.SoftMarginLoss(reduction='none')
output = soft_margin_loss(input, label)
print(output)
# Tensor(shape=[5, 5], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [[0.61739663, 0.51405668, 1.09346100, 0.42385561, 0.91602303],
# [0.76997038, 1.01977148, 0.98971722, 1.13976032, 0.88152088],
# [0.55476735, 1.10505384, 0.89923519, 0.45018155, 1.06587511],
# [0.37998142, 0.48067240, 0.47791212, 0.55664053, 0.98581399],
# [0.78571653, 0.59319711, 0.39701841, 0.76172109, 0.83781742]])
"""
def __init__(self, reduction='mean', name=None):
......
......@@ -349,16 +349,12 @@ class GroupNorm(Layer):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32')
x = paddle.to_tensor(x_data)
x = paddle.arange(48, dtype="float32").reshape((2, 6, 2, 2))
group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
group_norm_out = group_norm(x)
print(group_norm_out.numpy())
print(group_norm_out)
"""
def __init__(
......@@ -1123,18 +1119,23 @@ class SyncBatchNorm(_BatchNormBase):
Examples:
.. code-block:: python
# required: gpu
import paddle
import paddle.nn as nn
import numpy as np
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
x = paddle.to_tensor(x)
x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
if paddle.is_compiled_with_cuda():
sync_batch_norm = nn.SyncBatchNorm(2)
hidden1 = sync_batch_norm(x)
print(hidden1)
# [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]]
# Tensor(shape=[1, 2, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[ 0.26824948, 1.09363246],
# [ 0.26824948, -1.63013160]],
# [[ 0.80956620, -0.66528702],
# [-1.27446556, 1.13018656]]]])
"""
def __init__(
......
......@@ -67,9 +67,8 @@ class AvgPool1D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
pool_out = AvgPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -172,10 +171,9 @@ class AvgPool2D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 3, 32, 32], dtype="float32", min=-1, max=1)
AvgPool2D = nn.AvgPool2D(kernel_size=2,
stride=2, padding=0)
output = AvgPool2D(input)
......@@ -270,10 +268,9 @@ class AvgPool3D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# avg pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 2, 3, 32, 32], dtype="float32", min=-1, max=1)
AvgPool3D = nn.AvgPool3D(kernel_size=2,
stride=2, padding=0)
output = AvgPool3D(input)
......@@ -369,9 +366,8 @@ class MaxPool1D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
pool_out = MaxPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -475,10 +471,9 @@ class MaxPool2D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 3, 32, 32], dtype="float32", min=-1, max=1)
MaxPool2D = nn.MaxPool2D(kernel_size=2,
stride=2, padding=0)
output = MaxPool2D(input)
......@@ -573,10 +568,9 @@ class MaxPool3D(Layer):
import paddle
import paddle.nn as nn
import numpy as np
# max pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32))
input = paddle.uniform([1, 2, 3, 32, 32], dtype="float32", min=-1, max=1)
MaxPool3D = nn.MaxPool3D(kernel_size=2,
stride=2, padding=0)
output = MaxPool3D(input)
......@@ -668,9 +662,8 @@ class AdaptiveAvgPool1D(Layer):
#
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16)
pool_out = AdaptiveAvgPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -747,11 +740,9 @@ class AdaptiveAvgPool2D(Layer):
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 32, 32]
x = paddle.rand([2, 3, 32, 32])
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=3)
pool_out = adaptive_avg_pool(x = x)
# pool_out.shape is [2, 3, 3, 3]
......@@ -841,11 +832,9 @@ class AdaptiveAvgPool3D(Layer):
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 8, 32, 32]
x = paddle.rand([2, 3, 8, 32, 32])
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=3)
pool_out = adaptive_avg_pool(x = x)
# pool_out = [2, 3, 3, 3, 3]
......@@ -921,9 +910,8 @@ class AdaptiveMaxPool1D(Layer):
#
import paddle
import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16)
pool_out = AdaptiveMaxPool1D(data)
# pool_out shape: [1, 3, 16]
......@@ -1007,10 +995,9 @@ class AdaptiveMaxPool2D(Layer):
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
x = paddle.rand([2, 3, 32, 32])
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=3, return_mask=True)
pool_out, indices = adaptive_max_pool(x = x)
"""
......@@ -1097,10 +1084,8 @@ class AdaptiveMaxPool3D(Layer):
# output[:, :, i, j, k] =
# max(input[:, :, dstart:dend, hstart: hend, wstart: wend])
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data)
x = paddle.rand([2, 3, 8, 32, 32])
pool = paddle.nn.AdaptiveMaxPool3D(output_size=4)
out = pool(x)
# out shape: [2, 3, 4, 4, 4]
......
......@@ -46,7 +46,6 @@ def export(layer, path, input_spec=None, opset_version=9, **configs):
.. code-block:: python
import paddle
import numpy as np
class LinearNet(paddle.nn.Layer):
def __init__(self):
......@@ -77,8 +76,8 @@ def export(layer, path, input_spec=None, opset_version=9, **configs):
# Export model with 'Tensor' to support pruned model by set 'output_spec'.
def export_logic():
model = Logic()
x = paddle.to_tensor(np.array([1]))
y = paddle.to_tensor(np.array([2]))
x = paddle.to_tensor([1])
y = paddle.to_tensor([2])
# Static and run model.
paddle.jit.to_static(model)
out = model(x, y, z=True)
......
......@@ -68,10 +68,9 @@ class Adadelta(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
......
......@@ -84,9 +84,8 @@ class Adamax(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
......
......@@ -83,8 +83,8 @@ class Momentum(Optimizer):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
......
......@@ -1004,14 +1004,13 @@ class Optimizer(object):
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
x = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out = linear(x)
out.backward()
adam.step()
adam.clear_grad()
......@@ -1081,11 +1080,9 @@ class Optimizer(object):
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
optimizer = paddle.optimizer.Adam(learning_rate=0.1,
......@@ -1286,11 +1283,9 @@ class Optimizer(object):
Examples:
.. code-block:: python
import numpy as np
import paddle
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
a = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
......@@ -1396,14 +1391,12 @@ class Optimizer(object):
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
a = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
......
......@@ -183,13 +183,12 @@ class InputSpec(object):
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.static import InputSpec
paddle.disable_static()
x = paddle.to_tensor(np.ones([2, 2], np.float32))
x = paddle.ones([2, 2], dtype="float32")
x_spec = InputSpec.from_tensor(x, name='x')
print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
......
......@@ -757,69 +757,82 @@ def cond(x, p=None, name=None):
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
# compute conditional number when p is None
out = paddle.linalg.cond(x)
# out.numpy() [1.4142135]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.41421342])
# compute conditional number when order of the norm is 'fro'
out_fro = paddle.linalg.cond(x, p='fro')
# out_fro.numpy() [3.1622777]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.16227770])
# compute conditional number when order of the norm is 'nuc'
out_nuc = paddle.linalg.cond(x, p='nuc')
# out_nuc.numpy() [9.2426405]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [9.24263859])
# compute conditional number when order of the norm is 1
out_1 = paddle.linalg.cond(x, p=1)
# out_1.numpy() [2.]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2.])
# compute conditional number when order of the norm is -1
out_minus_1 = paddle.linalg.cond(x, p=-1)
# out_minus_1.numpy() [1.]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.])
# compute conditional number when order of the norm is 2
out_2 = paddle.linalg.cond(x, p=2)
# out_2.numpy() [1.4142135]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.41421342])
# compute conditional number when order of the norm is -1
out_minus_2 = paddle.linalg.cond(x, p=-2)
# out_minus_2.numpy() [0.70710677]
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.70710683])
# compute conditional number when order of the norm is inf
out_inf = paddle.linalg.cond(x, p=np.inf)
# out_inf.numpy() [2.]
out_inf = paddle.linalg.cond(x, p=float("inf"))
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2.])
# compute conditional number when order of the norm is -inf
out_minus_inf = paddle.linalg.cond(x, p=-np.inf)
# out_minus_inf.numpy() [1.]
a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))
# a.numpy()
# [[[ 0.14063153 -0.996288 0.7996131 -0.02571543]
# [-0.16303636 1.5534962 -0.49919784 -0.04402903]
# [-1.1341571 -0.6022629 0.5445269 0.29154757]
# [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]]
# [[-0.58081484 0.12402827 0.7229862 -0.55046535]
# [-0.15178485 -1.1604939 0.75810957 0.30971205]
# [-0.9669573 1.0940945 -0.27363303 -0.35416734]
# [-1.216529 2.0018666 -0.7773689 -0.17556527]]]
out_minus_inf = paddle.linalg.cond(x, p=-float("inf"))
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.])
a = paddle.randn([2, 4, 4])
# Tensor(shape=[2, 4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[-0.06784091, -0.07095790, 1.31792855, -0.58959651],
# [ 0.20818676, -0.85640615, -0.89998871, -1.47439921],
# [-0.49132481, 0.42250812, -0.77383220, -2.19794774],
# [-0.33551720, -1.70003879, -1.09795380, -0.63737559]],
# [[ 1.12026262, -0.16119350, -1.21157813, 2.74383283],
# [-0.15999718, 0.18798758, -0.69392562, 1.35720372],
# [-0.53013402, -2.26304483, 1.40843511, -1.02288902],
# [ 0.69533503, 2.05261683, -0.02251151, -1.43127477]]])
a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.numpy() [31.572273 28.120834]
b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))
# b.numpy()
# [[[ 1.61707487 0.46829144 0.38130416 0.82546736]
# [-1.72710298 0.08866375 -0.62518804 0.16128892]
# [-0.02822879 -1.67764516 0.11141444 0.3220113 ]]
# [[ 0.22524372 0.62474921 -0.85503233 -1.03960523]
# [-0.76620689 0.56673047 0.85064753 -0.45158196]
# [ 1.47595418 2.23646462 1.5701758 0.10497519]]]
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [8.86691189 , 75.23817444])
b = paddle.randn([2, 3, 4])
# Tensor(shape=[2, 3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[-0.43754861, 1.80796063, -0.78729683, -1.82264030],
# [-0.27670753, 0.06620564, 0.29072434, -0.31155765],
# [ 0.34123746, -0.05444612, 0.05001324, -1.46877074]],
# [[-0.64331555, -1.51103854, -1.26277697, -0.68024760],
# [ 2.59375715, -1.06665540, 0.96575671, -0.73330832],
# [-0.47064447, -0.23945692, -0.95150250, -1.07125998]]])
b_cond_2 = paddle.linalg.cond(b, p=2)
# b_cond_2.numpy() [3.30064451 2.51976252]
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [6.64228773, 3.89068866])
"""
......@@ -1503,18 +1516,13 @@ def cholesky(x, upper=False, name=None):
.. code-block:: python
import paddle
import numpy as np
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
a = paddle.rand([3, 3], dtype="float32")
a_t = paddle.transpose(a, [1, 0])
x = paddle.matmul(a, a_t) + 1e-03
out = paddle.linalg.cholesky(x, upper=False)
print(out)
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]
"""
if in_dygraph_mode():
return _C_ops.cholesky(x, upper)
......
......@@ -174,15 +174,11 @@ def yolo_loss(
.. code-block:: python
import paddle
import numpy as np
x = np.random.random([2, 14, 8, 8]).astype('float32')
gt_box = np.random.random([2, 10, 4]).astype('float32')
gt_label = np.random.random([2, 10]).astype('int32')
x = paddle.rand([2, 14, 8, 8]).astype('float32')
gt_box = paddle.rand([2, 10, 4]).astype('float32')
gt_label = paddle.rand([2, 10]).astype('int32')
x = paddle.to_tensor(x)
gt_box = paddle.to_tensor(gt_box)
gt_label = paddle.to_tensor(gt_label)
loss = paddle.vision.ops.yolo_loss(x,
gt_box=gt_box,
......@@ -391,13 +387,9 @@ def yolo_box(
.. code-block:: python
import paddle
import numpy as np
x = np.random.random([2, 14, 8, 8]).astype('float32')
img_size = np.ones((2, 2)).astype('int32')
x = paddle.to_tensor(x)
img_size = paddle.to_tensor(img_size)
x = paddle.rand([2, 14, 8, 8]).astype('float32')
img_size = paddle.ones((2, 2)).astype('int32')
boxes, scores = paddle.vision.ops.yolo_box(x,
img_size=img_size,
......@@ -2118,33 +2110,36 @@ def nms(
.. code-block:: python
import paddle
import numpy as np
boxes = np.random.rand(4, 4).astype('float32')
boxes = paddle.rand([4, 4]).astype('float32')
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
# [[0.06287421 0.5809351 0.3443958 0.8713329 ]
# [0.0749094 0.9713205 0.99241287 1.2799143 ]
# [0.46246734 0.6753201 1.346266 1.3821303 ]
# [0.8984796 0.5619834 1.1254641 1.0201943 ]]
print(boxes)
# Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.64811575, 0.89756244, 0.86473107, 1.48552322],
# [0.48085716, 0.84799081, 0.54517937, 0.86396021],
# [0.62646860, 0.72901905, 1.17392159, 1.69691563],
# [0.89729202, 0.46281594, 1.88733089, 0.98588502]])
out = paddle.vision.ops.nms(paddle.to_tensor(boxes), 0.1)
# [0, 1, 3, 0]
out = paddle.vision.ops.nms(boxes, 0.1)
print(out)
# Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 1, 3])
scores = np.random.rand(4).astype('float32')
# [0.98015213 0.3156527 0.8199343 0.874901 ]
scores = paddle.to_tensor([0.6, 0.7, 0.4, 0.233])
categories = [0, 1, 2, 3]
category_idxs = np.random.choice(categories, 4)
# [2 0 0 3]
out = paddle.vision.ops.nms(paddle.to_tensor(boxes),
0.1,
paddle.to_tensor(scores),
paddle.to_tensor(category_idxs),
categories,
4)
# [0, 3, 2]
category_idxs = paddle.to_tensor([2, 0, 0, 3], dtype="int64")
out = paddle.vision.ops.nms(boxes,
0.1,
paddle.to_tensor(scores),
paddle.to_tensor(category_idxs),
categories,
4)
print(out)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 0, 2, 3])
"""
def _nms(boxes, iou_threshold):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册