未验证 提交 a2761308 编写于 作者: K Kevin吴嘉文 提交者: GitHub

Remove reduntant numpy input in Example code (test=document_fix) (#47555)

* Remove reduntant numpy input in Example code

* Remove reduntant numpy input in Example code, test=document_fix
上级 954be40d
...@@ -158,8 +158,7 @@ class LookAhead(Optimizer): ...@@ -158,8 +158,7 @@ class LookAhead(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np inp = paddle.rand([1,10], dtype="float32")
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1) linear = paddle.nn.Linear(10, 1)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
...@@ -272,8 +271,8 @@ class LookAhead(Optimizer): ...@@ -272,8 +271,8 @@ class LookAhead(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32')) inp = paddle.rand([1, 10], dtype="float32")
linear = paddle.nn.Linear(10, 1) linear = paddle.nn.Linear(10, 1)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
......
...@@ -341,8 +341,7 @@ class ModelAverage(Optimizer): ...@@ -341,8 +341,7 @@ class ModelAverage(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np inp = paddle.rand([1, 10], dtype="float32")
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1) linear = paddle.nn.Linear(10, 1)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
...@@ -377,8 +376,7 @@ class ModelAverage(Optimizer): ...@@ -377,8 +376,7 @@ class ModelAverage(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np inp = paddle.rand([1, 10], dtype="float32")
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1) linear = paddle.nn.Linear(10, 1)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
...@@ -424,8 +422,7 @@ class ModelAverage(Optimizer): ...@@ -424,8 +422,7 @@ class ModelAverage(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np inp = paddle.rand([1, 10], dtype="float32")
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1) linear = paddle.nn.Linear(10, 1)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
...@@ -499,8 +496,7 @@ class ModelAverage(Optimizer): ...@@ -499,8 +496,7 @@ class ModelAverage(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np inp = paddle.rand([1, 10], dtype="float32")
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1) linear = paddle.nn.Linear(10, 1)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
......
...@@ -1286,10 +1286,12 @@ def softshrink(x, threshold=0.5, name=None): ...@@ -1286,10 +1286,12 @@ def softshrink(x, threshold=0.5, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
out = F.softshrink(x) # [-0.4, 0, 0, 0.3] out = F.softshrink(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.39999998, 0. , 0. , 0.30000001])
""" """
if threshold < 0: if threshold < 0:
raise ValueError( raise ValueError(
...@@ -1337,10 +1339,12 @@ def softsign(x, name=None): ...@@ -1337,10 +1339,12 @@ def softsign(x, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] out = F.softsign(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.softsign(x) return _C_ops.softsign(x)
...@@ -1376,10 +1380,12 @@ def swish(x, name=None): ...@@ -1376,10 +1380,12 @@ def swish(x, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.])) x = paddle.to_tensor([-2., 0., 1.])
out = F.swish(x) # [-0.238406, 0., 0.731059] out = F.swish(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.23840584, 0. , 0.73105854])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.swish(x, 1.0) return _C_ops.swish(x, 1.0)
...@@ -1456,10 +1462,12 @@ def tanhshrink(x, name=None): ...@@ -1456,10 +1462,12 @@ def tanhshrink(x, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] out = F.tanhshrink(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tanh_shrink(x) return _C_ops.tanh_shrink(x)
...@@ -1504,10 +1512,12 @@ def thresholded_relu(x, threshold=1.0, name=None): ...@@ -1504,10 +1512,12 @@ def thresholded_relu(x, threshold=1.0, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.])) x = paddle.to_tensor([2., 0., 1.])
out = F.thresholded_relu(x) # [2., 0., 0.] out = F.thresholded_relu(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2., 0., 0.])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
......
...@@ -1963,18 +1963,16 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None): ...@@ -1963,18 +1963,16 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x_data = np.array([[[0, 1, 0],
[ 1, 0, 1]]]).astype("float32")
print(x_data.shape)
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(x_data, stop_gradient=False)
x = paddle.to_tensor([[[0, 1, 0],
[ 1, 0, 1]]], dtype="float32", stop_gradient=False)
output = paddle.nn.functional.label_smooth(x) output = paddle.nn.functional.label_smooth(x)
print(output) print(output)
# Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
#[[[0.03333334 0.93333334 0.03333334] # [[[0.03333334, 0.93333334, 0.03333334],
# [0.93333334 0.03333334 0.93333334]]] # [0.93333334, 0.03333334, 0.93333334]]])
""" """
if epsilon > 1.0 or epsilon < 0.0: if epsilon > 1.0 or epsilon < 0.0:
raise ValueError("The value of epsilon must be between 0 and 1.") raise ValueError("The value of epsilon must be between 0 and 1.")
......
...@@ -368,26 +368,22 @@ def conv1d( ...@@ -368,26 +368,22 @@ def conv1d(
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
x = np.array([[[4, 8, 1, 9], x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9], [7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32) [6, 9, 2, 6]]], dtype="float32")
w=np.array( w = paddle.to_tensor([[[9, 3, 4],
[[[9, 3, 4],
[0, 0, 7], [0, 0, 7],
[2, 5, 6]], [2, 5, 6]],
[[0, 3, 4], [[0, 3, 4],
[2, 9, 7], [2, 9, 7],
[5, 6, 8]]]).astype(np.float32) [5, 6, 8]]], dtype="float32")
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(w)
y_var = F.conv1d(x_var, w_var)
y_np = y_var.numpy()
print(y_np)
# [[[133. 238.] y = F.conv1d(x, w)
# [160. 211.]]] print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[133., 238.],
# [160., 211.]]])
""" """
cudnn_version = get_cudnn_version() cudnn_version = get_cudnn_version()
if cudnn_version is not None: if cudnn_version is not None:
...@@ -905,24 +901,20 @@ def conv1d_transpose( ...@@ -905,24 +901,20 @@ def conv1d_transpose(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
# shape: (1, 2, 4) # shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7], x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2,]]]).astype(np.float32) [8, 0, 9, 2,]]], dtype="float32")
# shape: (2, 1, 2) # shape: (2, 1, 2)
w=np.array([[[7, 0]], w = paddle.to_tensor([[[7, 0]],
[[4, 2]]]).astype(np.float32) [[4, 2]]], dtype="float32")
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(w) y = F.conv1d_transpose(x, w)
y_var = F.conv1d_transpose(x_var, w_var) print(y)
print(y_var) # Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[60., 16., 99., 75., 4. ]]])
# [[[60. 16. 99. 75. 4.]]]
""" """
cudnn_version = get_cudnn_version() cudnn_version = get_cudnn_version()
if cudnn_version is not None: if cudnn_version is not None:
......
...@@ -251,16 +251,14 @@ def fluid_softmax_with_cross_entropy( ...@@ -251,16 +251,14 @@ def fluid_softmax_with_cross_entropy(
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
logits = paddle.to_tensor([0.4, 0.6, 0.9])
data = np.random.rand(128).astype("float32") label = paddle.randint(high=2, shape=[1], dtype="int64")
label = np.random.rand(1).astype("int64")
data = paddle.to_tensor(data) out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
label = paddle.to_tensor(label)
linear = paddle.nn.Linear(128, 100)
x = linear(data)
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
print(out) print(out)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.15328646])
""" """
if _non_static_mode(): if _non_static_mode():
if core.is_compiled_with_npu(): if core.is_compiled_with_npu():
...@@ -1772,7 +1770,6 @@ def ctc_loss( ...@@ -1772,7 +1770,6 @@ def ctc_loss(
# declarative mode # declarative mode
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
import paddle import paddle
# length of the longest logit sequence # length of the longest logit sequence
...@@ -1784,8 +1781,7 @@ def ctc_loss( ...@@ -1784,8 +1781,7 @@ def ctc_loss(
# class num # class num
class_num = 3 class_num = 3
np.random.seed(1) log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
...@@ -1798,30 +1794,30 @@ def ctc_loss( ...@@ -1798,30 +1794,30 @@ def ctc_loss(
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32") [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]],
labels = np.array([[1, 2, 2], dtype="float32")
[1, 2, 2]]).astype("int32") labels = paddle.to_tensor([[1, 2, 2],
input_lengths = np.array([5, 5]).astype("int64") [1, 2, 2]], dtype="int32")
label_lengths = np.array([3, 3]).astype("int64") input_lengths = paddle.to_tensor([5, 5], dtype="int64")
label_lengths = paddle.to_tensor([3, 3], dtype="int64")
log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths)
label_lengths = paddle.to_tensor(label_lengths)
loss = F.ctc_loss(log_probs, labels, loss = F.ctc_loss(log_probs, labels,
input_lengths, input_lengths,
label_lengths, label_lengths,
blank=0, blank=0,
reduction='none') reduction='none')
print(loss) #[3.9179852 2.9076521] print(loss)
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.91798496, 2.90765190])
loss = F.ctc_loss(log_probs, labels, loss = F.ctc_loss(log_probs, labels,
input_lengths, input_lengths,
label_lengths, label_lengths,
blank=0, blank=0,
reduction='mean') reduction='mean')
print(loss) #[1.1376063] print(loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.13760614])
""" """
...@@ -2257,16 +2253,14 @@ def softmax_with_cross_entropy( ...@@ -2257,16 +2253,14 @@ def softmax_with_cross_entropy(
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
logits = paddle.to_tensor([0.4, 0.6, 0.9], dtype="float32")
data = np.random.rand(128).astype("float32") label = paddle.to_tensor([1], dtype="int64")
label = np.random.rand(1).astype("int64")
data = paddle.to_tensor(data) out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
label = paddle.to_tensor(label)
linear = paddle.nn.Linear(128, 100)
x = linear(data)
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
print(out) print(out)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.15328646])
""" """
return fluid_softmax_with_cross_entropy( return fluid_softmax_with_cross_entropy(
logits, logits,
...@@ -4003,18 +3997,26 @@ def soft_margin_loss(input, label, reduction='mean', name=None): ...@@ -4003,18 +3997,26 @@ def soft_margin_loss(input, label, reduction='mean', name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32') input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32') label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
output = paddle.nn.functional.soft_margin_loss(input, label) output = paddle.nn.functional.soft_margin_loss(input, label)
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.64022040])
input = paddle.uniform(shape=(5, 5), dtype="float32", min=0.1, max=0.8)
label = paddle.randint(0, 2, shape=(5, 5), dtype="int64")
label[label==0]=-1
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
label_np[label_np==0]=-1
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none') output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none')
print(output)
# Tensor(shape=[5, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1.09917796, 0.52613139, 0.56263304, 0.82736146, 0.38776723],
# [1.07179427, 1.11924267, 0.49877715, 1.10026348, 0.46184641],
# [0.84367639, 0.74795729, 0.44629076, 0.55123353, 0.77659678],
# [0.39465919, 0.76651484, 0.54485321, 0.76609844, 0.77166790],
# [0.51283568, 0.84757161, 0.78913331, 1.05268764, 0.45318675]])
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
raise ValueError( raise ValueError(
......
...@@ -54,27 +54,28 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): ...@@ -54,27 +54,28 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static() paddle.disable_static()
x = np.arange(6, dtype=np.float32).reshape(2,3) x = paddle.arange(6, dtype="float32").reshape([2,3])
x = paddle.to_tensor(x)
y = F.normalize(x) y = F.normalize(x)
print(y.numpy()) print(y)
# [[0. 0.4472136 0.8944272 ] # Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.42426404 0.5656854 0.7071067 ]] # [[0. , 0.44721359, 0.89442718],
# [0.42426404, 0.56568539, 0.70710671]])
y = F.normalize(x, p=1.5) y = F.normalize(x, p=1.5)
print(y.numpy()) print(y)
# [[0. 0.40862012 0.81724024] # Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.35684016 0.4757869 0.5947336 ]] # [[0. , 0.40862012, 0.81724024],
# [0.35684016, 0.47578689, 0.59473360]])
y = F.normalize(x, axis=0) y = F.normalize(x, axis=0)
print(y.numpy()) print(y)
# [[0. 0.24253564 0.37139067] # Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1. 0.97014254 0.9284767 ]] # [[0. , 0.24253564, 0.37139067],
# [1. , 0.97014254, 0.92847669]])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
...@@ -162,21 +163,30 @@ def batch_norm( ...@@ -162,21 +163,30 @@ def batch_norm(
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.arange(12, dtype="float32").reshape([2, 1, 2, 3])
x = np.random.seed(123) print(x)
x = np.random.random(size=(2, 1, 2, 3)).astype('float32') # Tensor(shape=[2, 1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
running_mean = np.random.random(size=1).astype('float32') # [[[[0. , 1. , 2. ],
running_variance = np.random.random(size=1).astype('float32') # [3. , 4. , 5. ]]],
weight_data = np.random.random(size=1).astype('float32')
bias_data = np.random.random(size=1).astype('float32') # [[[6. , 7. , 8. ],
x = paddle.to_tensor(x) # [9. , 10., 11.]]]])
rm = paddle.to_tensor(running_mean)
rv = paddle.to_tensor(running_variance) running_mean = paddle.to_tensor([0], dtype="float32")
w = paddle.to_tensor(weight_data) running_variance = paddle.to_tensor([1], dtype="float32")
b = paddle.to_tensor(bias_data) weight = paddle.to_tensor([2], dtype="float32")
batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b) bias = paddle.to_tensor([1], dtype="float32")
batch_norm_out = paddle.nn.functional.batch_norm(x, running_mean,
running_variance, weight, bias)
print(batch_norm_out) print(batch_norm_out)
# Tensor(shape=[2, 1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[1. , 2.99998999 , 4.99997997 ],
# [6.99996948 , 8.99995995 , 10.99994946]]],
# [[[12.99993896, 14.99992943, 16.99991989],
# [18.99990845, 20.99989891, 22.99988937]]]])
""" """
assert len(x.shape) >= 2, "input dim must be larger than 1" assert len(x.shape) >= 2, "input dim must be larger than 1"
......
...@@ -1780,10 +1780,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): ...@@ -1780,10 +1780,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
# #
import paddle import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32) x = paddle.rand([2, 3, 32, 32])
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 32, 32] # x.shape is [2, 3, 32, 32]
out = paddle.nn.functional.adaptive_avg_pool2d( out = paddle.nn.functional.adaptive_avg_pool2d(
x = x, x = x,
......
...@@ -91,56 +91,48 @@ def sparse_attention( ...@@ -91,56 +91,48 @@ def sparse_attention(
# required: skiptest # required: skiptest
import paddle import paddle
import numpy as np
query_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
key_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
value_data = np.array([[[[0, 1,], [2, 3],
[ 0, 1], [2, 3]]]]).astype("float32")
sparse_csr_offset_data = np.array([[[0, 2,
4, 6, 8]]]).astype("int32")
sparse_csr_columns_data = np.array([[[0, 1,
0, 1, 2, 3, 2, 3]]]).astype("int32")
key_padding_mask_data = np.array([[1,1,1,0]]).astype("float32")
attention_mask_data = np.array([[1,0,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1]]).astype("float32")
print(query_data.shape)
# (1, 1, 4, 2)
print(sparse_csr_offset_data.shape)
# (1, 1, 5)
print(sparse_csr_columns_data.shape)
# (1, 1, 8)
paddle.disable_static() paddle.disable_static()
query = paddle.to_tensor(query_data, stop_gradient=False,
place=paddle.CUDAPlace(0)) # `query`, `key` and `value` all have shape [1, 1, 4, 2]
key = paddle.to_tensor(key_data, stop_gradient=False, query = paddle.to_tensor([[[[0, 1, ], [2, 3],
place=paddle.CUDAPlace(0)) [0, 1], [2, 3]]]], dtype="float32")
value = paddle.to_tensor(value_data, stop_gradient=False, key = paddle.to_tensor([[[[0, 1], [2, 3],
place=paddle.CUDAPlace(0)) [0, 1], [2, 3]]]], dtype="float32")
offset = paddle.to_tensor(sparse_csr_offset_data, stop_gradient=False, value = paddle.to_tensor([[[[0, 1], [2, 3],
place=paddle.CUDAPlace(0)) [0, 1], [2, 3]]]], dtype="float32")
columns = paddle.to_tensor(sparse_csr_columns_data, stop_gradient=False,
place=paddle.CUDAPlace(0))
key_padding_mask = paddle.to_tensor(key_padding_mask_data, stop_gradient=False, offset = paddle.to_tensor([[[0, 2, 4, 6, 8]]], dtype="int32")
place=paddle.CUDAPlace(0)) columns = paddle.to_tensor([[[0, 1, 0, 1, 2, 3, 2, 3]]], dtype="int32")
attention_mask = paddle.to_tensor(attention_mask_data, stop_gradient=False,
place=paddle.CUDAPlace(0)) print(offset.shape) # (1, 1, 5)
print(columns.shape) # (1, 1, 8)
key_padding_mask = paddle.to_tensor([[1, 1, 1, 0]], dtype="float32")
attention_mask = paddle.to_tensor([[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype="float32")
output_mask = paddle.nn.functional.sparse_attention(query, key, output_mask = paddle.nn.functional.sparse_attention(query, key,
value, offset, columns, value, offset, columns,
key_padding_mask=key_padding_mask, attn_mask=attention_mask) key_padding_mask=key_padding_mask,
attn_mask=attention_mask)
print(output_mask) print(output_mask)
# Tensor(shape=[1, 1, 4, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[0. , 1. ], # [[[[0. , 1. ],
# [1.99830270, 2.99830270], # [1.99830270, 2.99830270],
# [0. , 1. ], # [0. , 1. ],
# [0. , 1. ]]]] # [0. , 1. ]]]])
output = paddle.nn.functional.sparse_attention(query, key, output = paddle.nn.functional.sparse_attention(query, key,
value, offset, columns) value, offset, columns)
print(output) print(output)
# Tensor(shape=[1, 1, 4, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[1.60885942, 2.60885954], # [[[[1.60885942, 2.60885954],
# [1.99830270, 2.99830270], # [1.99830270, 2.99830270],
# [1.60885942, 2.60885954], # [1.60885942, 2.60885954],
# [1.99830270, 2.99830270]]]] # [1.99830270, 2.99830270]]]])
""" """
if in_dynamic_mode(): if in_dynamic_mode():
( (
......
...@@ -282,13 +282,13 @@ class Tanh(Layer): ...@@ -282,13 +282,13 @@ class Tanh(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Tanh() m = paddle.nn.Tanh()
out = m(x) out = m(x)
print(out) print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261] # Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.37994894, -0.19737533, 0.09966800, 0.29131261])
""" """
def __init__(self, name=None): def __init__(self, name=None):
...@@ -879,11 +879,13 @@ class Softshrink(Layer): ...@@ -879,11 +879,13 @@ class Softshrink(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
m = paddle.nn.Softshrink() m = paddle.nn.Softshrink()
out = m(x) # [-0.4, 0, 0, 0.3] out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.39999998, 0. , 0. , 0.30000001])
""" """
def __init__(self, threshold=0.5, name=None): def __init__(self, threshold=0.5, name=None):
...@@ -919,11 +921,13 @@ class Softsign(Layer): ...@@ -919,11 +921,13 @@ class Softsign(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Softsign() m = paddle.nn.Softsign()
out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
""" """
def __init__(self, name=None): def __init__(self, name=None):
...@@ -958,11 +962,13 @@ class Swish(Layer): ...@@ -958,11 +962,13 @@ class Swish(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.])) x = paddle.to_tensor([-2., 0., 1.])
m = paddle.nn.Swish() m = paddle.nn.Swish()
out = m(x) # [-0.238406, 0., 0.731059] out = m(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.23840584, 0. , 0.73105854])
""" """
def __init__(self, name=None): def __init__(self, name=None):
...@@ -1042,11 +1048,13 @@ class Tanhshrink(Layer): ...@@ -1042,11 +1048,13 @@ class Tanhshrink(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
m = paddle.nn.Tanhshrink() m = paddle.nn.Tanhshrink()
out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] out = m(x)
print(out)
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
""" """
def __init__(self, name=None): def __init__(self, name=None):
...@@ -1089,11 +1097,13 @@ class ThresholdedReLU(Layer): ...@@ -1089,11 +1097,13 @@ class ThresholdedReLU(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.])) x = paddle.to_tensor([2., 0., 1.])
m = paddle.nn.ThresholdedReLU() m = paddle.nn.ThresholdedReLU()
out = m(x) # [2., 0., 0.] out = m(x)
print(out)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2., 0., 0.])
""" """
def __init__(self, threshold=1.0, name=None): def __init__(self, threshold=1.0, name=None):
......
...@@ -364,16 +364,13 @@ class Upsample(Layer): ...@@ -364,16 +364,13 @@ class Upsample(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn as nn
import numpy as np
input_data = np.random.rand(2,3,6,10).astype("float32") input = paddle.rand([2,3,6,10], dtype="float32")
upsample_out = paddle.nn.Upsample(size=[12,12]) upsample_out = paddle.nn.Upsample(size=[12,12])
input = paddle.to_tensor(input_data)
output = upsample_out(x=input) output = upsample_out(x=input)
print(output.shape) print(output.shape)
# [2L, 3L, 12L, 12L] # [2, 3, 12, 12]
""" """
...@@ -640,14 +637,12 @@ class Bilinear(Layer): ...@@ -640,14 +637,12 @@ class Bilinear(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy
layer1 = numpy.random.random((5, 5)).astype('float32') layer1 = paddle.rand((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32') layer2 = paddle.rand((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear( bilinear = paddle.nn.Bilinear(
in1_features=5, in2_features=4, out_features=1000) in1_features=5, in2_features=4, out_features=1000)
result = bilinear(paddle.to_tensor(layer1), result = bilinear(layer1,layer2) # result shape [5, 1000]
paddle.to_tensor(layer2)) # result shape [5, 1000]
""" """
...@@ -739,17 +734,22 @@ class Dropout(Layer): ...@@ -739,17 +734,22 @@ class Dropout(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = np.array([[1,2,3], [4,5,6]]).astype('float32') x = paddle.to_tensor([[1,2,3], [4,5,6]], dtype="float32")
x = paddle.to_tensor(x)
m = paddle.nn.Dropout(p=0.5) m = paddle.nn.Dropout(p=0.5)
y_train = m(x) y_train = m(x)
print(y_train)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[2., 0., 6.],
# [0., 0., 0.]])
m.eval() # switch the model to test phase m.eval() # switch the model to test phase
y_test = m(x) y_test = m(x)
print(x)
print(y_train)
print(y_test) print(y_test)
# Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
""" """
def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None): def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
...@@ -804,17 +804,35 @@ class Dropout2D(Layer): ...@@ -804,17 +804,35 @@ class Dropout2D(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5)).astype('float32') x = paddle.rand([2, 2, 1, 3], dtype="float32")
x = paddle.to_tensor(x) print(x)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0.10052059, 0.93890846, 0.45351565]],
# [[0.47507706, 0.45021373, 0.11331241]]],
# [[[0.53358698, 0.97375143, 0.34997326]],
# [[0.24758087, 0.52628899, 0.17970420]]]])
m = paddle.nn.Dropout2D(p=0.5) m = paddle.nn.Dropout2D(p=0.5)
y_train = m(x) y_train = m(x)
print(y_train)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0. , 0. , 0. ]],
# [[0.95015413, 0.90042746, 0.22662482]]],
# [[[1.06717396, 1.94750285, 0.69994652]],
# [[0. , 0. , 0. ]]]])
m.eval() # switch the model to test phase m.eval() # switch the model to test phase
y_test = m(x) y_test = m(x)
print(x)
print(y_train)
print(y_test) print(y_test)
# Tensor(shape=[2, 2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[0.10052059, 0.93890846, 0.45351565]],
# [[0.47507706, 0.45021373, 0.11331241]]],
# [[[0.53358698, 0.97375143, 0.34997326]],
# [[0.24758087, 0.52628899, 0.17970420]]]])
""" """
def __init__(self, p=0.5, data_format='NCHW', name=None): def __init__(self, p=0.5, data_format='NCHW', name=None):
...@@ -867,17 +885,47 @@ class Dropout3D(Layer): ...@@ -867,17 +885,47 @@ class Dropout3D(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32') x = paddle.arange(24, dtype="float32").reshape((1, 2, 2, 2, 3))
x = paddle.to_tensor(x) print(x)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]],
# [[6. , 7. , 8. ],
# [9. , 10., 11.]]],
# [[[12., 13., 14.],
# [15., 16., 17.]],
# [[18., 19., 20.],
# [21., 22., 23.]]]]])
m = paddle.nn.Dropout3D(p=0.5) m = paddle.nn.Dropout3D(p=0.5)
y_train = m(x) y_train = m(x)
print(y_train)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 2. , 4. ],
# [6. , 8. , 10.]],
# [[12., 14., 16.],
# [18., 20., 22.]]],
# [[[0. , 0. , 0. ],
# [0. , 0. , 0. ]],
# [[0. , 0. , 0. ],
# [0. , 0. , 0. ]]]]])
m.eval() # switch the model to test phase m.eval() # switch the model to test phase
y_test = m(x) y_test = m(x)
print(x)
print(y_train)
print(y_test) print(y_test)
# Tensor(shape=[1, 2, 2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[[[0. , 1. , 2. ],
# [3. , 4. , 5. ]],
# [[6. , 7. , 8. ],
# [9. , 10., 11.]]],
# [[[12., 13., 14.],
# [15., 16., 17.]],
# [[18., 19., 20.],
# [21., 22., 23.]]]]])
""" """
def __init__(self, p=0.5, data_format='NCDHW', name=None): def __init__(self, p=0.5, data_format='NCDHW', name=None):
...@@ -928,18 +976,21 @@ class AlphaDropout(Layer): ...@@ -928,18 +976,21 @@ class AlphaDropout(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = np.array([[-1, 1], [-1, 1]]).astype('float32') x = paddle.to_tensor([[-1, 1], [-1, 1]], dtype="float32")
x = paddle.to_tensor(x)
m = paddle.nn.AlphaDropout(p=0.5) m = paddle.nn.AlphaDropout(p=0.5)
y_train = m(x) y_train = m(x)
print(y_train)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-0.77919382, 1.66559887],
# [-0.77919382, -0.77919382]])
m.eval() # switch the model to test phase m.eval() # switch the model to test phase
y_test = m(x) y_test = m(x)
print(x)
print(y_train)
# [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
print(y_test) print(y_test)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-1., 1.],
# [-1., 1.]])
""" """
def __init__(self, p=0.5, name=None): def __init__(self, p=0.5, name=None):
...@@ -1278,18 +1329,17 @@ class CosineSimilarity(Layer): ...@@ -1278,18 +1329,17 @@ class CosineSimilarity(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
np.random.seed(0) x1 = paddle.to_tensor([[1., 2., 3.],
x1 = np.random.rand(2,3) [2., 3., 4.]], dtype="float32")
x2 = np.random.rand(2,3) x2 = paddle.to_tensor([[8., 3., 3.],
x1 = paddle.to_tensor(x1) [2., 3., 4.]], dtype="float32")
x2 = paddle.to_tensor(x2)
cos_sim_func = nn.CosineSimilarity(axis=0) cos_sim_func = nn.CosineSimilarity(axis=0)
result = cos_sim_func(x1, x2) result = cos_sim_func(x1, x2)
print(result) print(result)
# [0.99806249 0.9817672 0.94987036] # Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.65079135, 0.98058069, 1. ])
""" """
def __init__(self, axis=1, eps=1e-8): def __init__(self, axis=1, eps=1e-8):
...@@ -1376,30 +1426,33 @@ class Embedding(Layer): ...@@ -1376,30 +1426,33 @@ class Embedding(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
x = paddle.to_tensor(x_data, stop_gradient=False)
y = paddle.to_tensor(y_data, stop_gradient=False)
embedding = paddle.nn.Embedding(10, 3, sparse=True) x = paddle.to_tensor([[0], [1], [3]], dtype="int64", stop_gradient=False)
embedding = paddle.nn.Embedding(4, 3, sparse=True)
w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32) w0 = paddle.to_tensor([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]], dtype="float32")
embedding.weight.set_value(w0) embedding.weight.set_value(w0)
print(embedding.weight)
# Tensor(shape=[4, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[0., 0., 0.],
# [1., 1., 1.],
# [2., 2., 2.],
# [3., 3., 3.]])
adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01) adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01)
adam.clear_grad() adam.clear_grad()
# weight.shape = [10, 3]
# x.data = [[3],[4],[5]] out = embedding(x)
# x.shape = [3, 1] print(out)
# Tensor(shape=[3, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[0., 0., 0.]],
# [[1., 1., 1.]],
# [[3., 3., 3.]]])
# out.data = [[2,2,2], [2,2,2], [2,2,2]]
# out.shape = [3, 1, 3]
out=embedding(x)
out.backward() out.backward()
adam.step() adam.step()
......
...@@ -311,24 +311,24 @@ class Conv1D(_ConvNd): ...@@ -311,24 +311,24 @@ class Conv1D(_ConvNd):
import paddle import paddle
from paddle.nn import Conv1D from paddle.nn import Conv1D
import numpy as np
x = np.array([[[4, 8, 1, 9], x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9], [7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32) [6, 9, 2, 6]]], dtype="float32")
w=np.array( w = paddle.to_tensor([[[9, 3, 4],
[[[9, 3, 4],
[0, 0, 7], [0, 0, 7],
[2, 5, 6]], [2, 5, 6]],
[[0, 3, 4], [[0, 3, 4],
[2, 9, 7], [2, 9, 7],
[5, 6, 8]]]).astype(np.float32) [5, 6, 8]]], dtype="float32")
x_t = paddle.to_tensor(x)
conv = Conv1D(3, 2, 3) conv = Conv1D(3, 2, 3)
conv.weight.set_value(w) conv.weight.set_value(w)
y_t = conv(x_t) y = conv(x)
print(y_t) print(y)
# [[[133. 238.] # Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [160. 211.]]] # [[[133., 238.],
# [160., 211.]]])
""" """
def __init__( def __init__(
...@@ -497,21 +497,20 @@ class Conv1DTranspose(_ConvNd): ...@@ -497,21 +497,20 @@ class Conv1DTranspose(_ConvNd):
import paddle import paddle
from paddle.nn import Conv1DTranspose from paddle.nn import Conv1DTranspose
import numpy as np
# shape: (1, 2, 4) # shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7], x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2]]]).astype(np.float32) [8, 0, 9, 2]]], dtype="float32")
# shape: (2, 1, 2) # shape: (2, 1, 2)
y=np.array([[[7, 0]], w = paddle.to_tensor([[[7, 0]],
[[4, 2]]]).astype(np.float32) [[4, 2]]], dtype="float32")
x_t = paddle.to_tensor(x)
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(y)
y_t = conv(x_t)
print(y_t)
# [[[60. 16. 99. 75. 4.]]] conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(w)
y = conv(x)
print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[60., 16., 99., 75., 4. ]]])
""" """
def __init__( def __init__(
......
...@@ -765,16 +765,15 @@ class BCELoss(Layer): ...@@ -765,16 +765,15 @@ class BCELoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
input_data = np.array([0.5, 0.6, 0.7]).astype("float32")
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
input = paddle.to_tensor(input_data) input = paddle.to_tensor([0.5, 0.6, 0.7])
label = paddle.to_tensor(label_data) label = paddle.to_tensor([1.0, 0.0, 1.0])
bce_loss = paddle.nn.BCELoss() bce_loss = paddle.nn.BCELoss()
output = bce_loss(input, label) output = bce_loss(input, label)
print(output) # [0.65537095] print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.65537101])
""" """
...@@ -1078,7 +1077,6 @@ class CTCLoss(Layer): ...@@ -1078,7 +1077,6 @@ class CTCLoss(Layer):
.. code-block:: python .. code-block:: python
# declarative mode # declarative mode
import numpy as np
import paddle import paddle
# length of the longest logit sequence # length of the longest logit sequence
...@@ -1090,8 +1088,7 @@ class CTCLoss(Layer): ...@@ -1090,8 +1088,7 @@ class CTCLoss(Layer):
# class num # class num
class_num = 3 class_num = 3
np.random.seed(1) log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
...@@ -1104,26 +1101,25 @@ class CTCLoss(Layer): ...@@ -1104,26 +1101,25 @@ class CTCLoss(Layer):
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32") [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]], dtype="float32")
labels = np.array([[1, 2, 2], labels = paddle.to_tensor([[1, 2, 2],
[1, 2, 2]]).astype("int32") [1, 2, 2]], dtype="int32")
input_lengths = np.array([5, 5]).astype("int64") input_lengths = paddle.to_tensor([5, 5], dtype="int64")
label_lengths = np.array([3, 3]).astype("int64") label_lengths = paddle.to_tensor([3, 3], dtype="int64")
log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths)
label_lengths = paddle.to_tensor(label_lengths)
loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels, loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels,
input_lengths, input_lengths,
label_lengths) label_lengths)
print(loss) #[3.9179852 2.9076521] print(loss)
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.91798496, 2.90765190])
loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels, loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels,
input_lengths, input_lengths,
label_lengths) label_lengths)
print(loss) #[1.1376063] print(loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.13760614])
""" """
def __init__(self, blank=0, reduction='mean'): def __init__(self, blank=0, reduction='mean'):
...@@ -1858,20 +1854,29 @@ class SoftMarginLoss(Layer): ...@@ -1858,20 +1854,29 @@ class SoftMarginLoss(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32') input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32') label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
soft_margin_loss = paddle.nn.SoftMarginLoss() soft_margin_loss = paddle.nn.SoftMarginLoss()
output = soft_margin_loss(input, label) output = soft_margin_loss(input, label)
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.64022040])
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) input_np = paddle.uniform(shape=(5, 5), min=0.1, max=0.8, dtype="float64")
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64) label_np = paddle.randint(high=2, shape=(5, 5), dtype="int64")
label_np[label_np==0]=-1 label_np[label_np==0]=-1
input = paddle.to_tensor(input_np) input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np) label = paddle.to_tensor(label_np)
soft_margin_loss = paddle.nn.SoftMarginLoss(reduction='none') soft_margin_loss = paddle.nn.SoftMarginLoss(reduction='none')
output = soft_margin_loss(input, label) output = soft_margin_loss(input, label)
print(output)
# Tensor(shape=[5, 5], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [[0.61739663, 0.51405668, 1.09346100, 0.42385561, 0.91602303],
# [0.76997038, 1.01977148, 0.98971722, 1.13976032, 0.88152088],
# [0.55476735, 1.10505384, 0.89923519, 0.45018155, 1.06587511],
# [0.37998142, 0.48067240, 0.47791212, 0.55664053, 0.98581399],
# [0.78571653, 0.59319711, 0.39701841, 0.76172109, 0.83781742]])
""" """
def __init__(self, reduction='mean', name=None): def __init__(self, reduction='mean', name=None):
......
...@@ -349,16 +349,12 @@ class GroupNorm(Layer): ...@@ -349,16 +349,12 @@ class GroupNorm(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
paddle.disable_static() x = paddle.arange(48, dtype="float32").reshape((2, 6, 2, 2))
np.random.seed(123)
x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32')
x = paddle.to_tensor(x_data)
group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6) group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
group_norm_out = group_norm(x) group_norm_out = group_norm(x)
print(group_norm_out.numpy()) print(group_norm_out)
""" """
def __init__( def __init__(
...@@ -1123,18 +1119,23 @@ class SyncBatchNorm(_BatchNormBase): ...@@ -1123,18 +1119,23 @@ class SyncBatchNorm(_BatchNormBase):
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: gpu
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
x = paddle.to_tensor(x)
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
sync_batch_norm = nn.SyncBatchNorm(2) sync_batch_norm = nn.SyncBatchNorm(2)
hidden1 = sync_batch_norm(x) hidden1 = sync_batch_norm(x)
print(hidden1) print(hidden1)
# [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]] # Tensor(shape=[1, 2, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[[ 0.26824948, 1.09363246],
# [ 0.26824948, -1.63013160]],
# [[ 0.80956620, -0.66528702],
# [-1.27446556, 1.13018656]]]])
""" """
def __init__( def __init__(
......
...@@ -67,9 +67,8 @@ class AvgPool1D(Layer): ...@@ -67,9 +67,8 @@ class AvgPool1D(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0) AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
pool_out = AvgPool1D(data) pool_out = AvgPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
...@@ -172,10 +171,9 @@ class AvgPool2D(Layer): ...@@ -172,10 +171,9 @@ class AvgPool2D(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
# max pool2d # max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) input = paddle.uniform([1, 3, 32, 32], dtype="float32", min=-1, max=1)
AvgPool2D = nn.AvgPool2D(kernel_size=2, AvgPool2D = nn.AvgPool2D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = AvgPool2D(input) output = AvgPool2D(input)
...@@ -270,10 +268,9 @@ class AvgPool3D(Layer): ...@@ -270,10 +268,9 @@ class AvgPool3D(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
# avg pool3d # avg pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32)) input = paddle.uniform([1, 2, 3, 32, 32], dtype="float32", min=-1, max=1)
AvgPool3D = nn.AvgPool3D(kernel_size=2, AvgPool3D = nn.AvgPool3D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = AvgPool3D(input) output = AvgPool3D(input)
...@@ -369,9 +366,8 @@ class MaxPool1D(Layer): ...@@ -369,9 +366,8 @@ class MaxPool1D(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0) MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
pool_out = MaxPool1D(data) pool_out = MaxPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
...@@ -475,10 +471,9 @@ class MaxPool2D(Layer): ...@@ -475,10 +471,9 @@ class MaxPool2D(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
# max pool2d # max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) input = paddle.uniform([1, 3, 32, 32], dtype="float32", min=-1, max=1)
MaxPool2D = nn.MaxPool2D(kernel_size=2, MaxPool2D = nn.MaxPool2D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = MaxPool2D(input) output = MaxPool2D(input)
...@@ -573,10 +568,9 @@ class MaxPool3D(Layer): ...@@ -573,10 +568,9 @@ class MaxPool3D(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
# max pool3d # max pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32)) input = paddle.uniform([1, 2, 3, 32, 32], dtype="float32", min=-1, max=1)
MaxPool3D = nn.MaxPool3D(kernel_size=2, MaxPool3D = nn.MaxPool3D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = MaxPool3D(input) output = MaxPool3D(input)
...@@ -668,9 +662,8 @@ class AdaptiveAvgPool1D(Layer): ...@@ -668,9 +662,8 @@ class AdaptiveAvgPool1D(Layer):
# #
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16) AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16)
pool_out = AdaptiveAvgPool1D(data) pool_out = AdaptiveAvgPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
...@@ -747,11 +740,9 @@ class AdaptiveAvgPool2D(Layer): ...@@ -747,11 +740,9 @@ class AdaptiveAvgPool2D(Layer):
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
# #
import paddle import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32) x = paddle.rand([2, 3, 32, 32])
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 32, 32]
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=3) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=3)
pool_out = adaptive_avg_pool(x = x) pool_out = adaptive_avg_pool(x = x)
# pool_out.shape is [2, 3, 3, 3] # pool_out.shape is [2, 3, 3, 3]
...@@ -841,11 +832,9 @@ class AdaptiveAvgPool3D(Layer): ...@@ -841,11 +832,9 @@ class AdaptiveAvgPool3D(Layer):
# output[:, :, i, j, k] = # output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
import paddle import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32) x = paddle.rand([2, 3, 8, 32, 32])
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 8, 32, 32]
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=3) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=3)
pool_out = adaptive_avg_pool(x = x) pool_out = adaptive_avg_pool(x = x)
# pool_out = [2, 3, 3, 3, 3] # pool_out = [2, 3, 3, 3, 3]
...@@ -921,9 +910,8 @@ class AdaptiveMaxPool1D(Layer): ...@@ -921,9 +910,8 @@ class AdaptiveMaxPool1D(Layer):
# #
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.uniform([1, 3, 32], dtype="float32", min=-1, max=1)
AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16) AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16)
pool_out = AdaptiveMaxPool1D(data) pool_out = AdaptiveMaxPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
...@@ -1007,10 +995,9 @@ class AdaptiveMaxPool2D(Layer): ...@@ -1007,10 +995,9 @@ class AdaptiveMaxPool2D(Layer):
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
# #
import paddle import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32) x = paddle.rand([2, 3, 32, 32])
x = paddle.to_tensor(input_data)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=3, return_mask=True) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=3, return_mask=True)
pool_out, indices = adaptive_max_pool(x = x) pool_out, indices = adaptive_max_pool(x = x)
""" """
...@@ -1097,10 +1084,8 @@ class AdaptiveMaxPool3D(Layer): ...@@ -1097,10 +1084,8 @@ class AdaptiveMaxPool3D(Layer):
# output[:, :, i, j, k] = # output[:, :, i, j, k] =
# max(input[:, :, dstart:dend, hstart: hend, wstart: wend]) # max(input[:, :, dstart:dend, hstart: hend, wstart: wend])
import paddle import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32) x = paddle.rand([2, 3, 8, 32, 32])
x = paddle.to_tensor(input_data)
pool = paddle.nn.AdaptiveMaxPool3D(output_size=4) pool = paddle.nn.AdaptiveMaxPool3D(output_size=4)
out = pool(x) out = pool(x)
# out shape: [2, 3, 4, 4, 4] # out shape: [2, 3, 4, 4, 4]
......
...@@ -46,7 +46,6 @@ def export(layer, path, input_spec=None, opset_version=9, **configs): ...@@ -46,7 +46,6 @@ def export(layer, path, input_spec=None, opset_version=9, **configs):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
class LinearNet(paddle.nn.Layer): class LinearNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
...@@ -77,8 +76,8 @@ def export(layer, path, input_spec=None, opset_version=9, **configs): ...@@ -77,8 +76,8 @@ def export(layer, path, input_spec=None, opset_version=9, **configs):
# Export model with 'Tensor' to support pruned model by set 'output_spec'. # Export model with 'Tensor' to support pruned model by set 'output_spec'.
def export_logic(): def export_logic():
model = Logic() model = Logic()
x = paddle.to_tensor(np.array([1])) x = paddle.to_tensor([1])
y = paddle.to_tensor(np.array([2])) y = paddle.to_tensor([2])
# Static and run model. # Static and run model.
paddle.jit.to_static(model) paddle.jit.to_static(model)
out = model(x, y, z=True) out = model(x, y, z=True)
......
...@@ -68,10 +68,9 @@ class Adadelta(Optimizer): ...@@ -68,10 +68,9 @@ class Adadelta(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32") beta1 = paddle.to_tensor([0.9], dtype="float32")
......
...@@ -84,9 +84,8 @@ class Adamax(Optimizer): ...@@ -84,9 +84,8 @@ class Adamax(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp) inp = paddle.to_tensor(inp)
out = linear(inp) out = linear(inp)
......
...@@ -83,8 +83,8 @@ class Momentum(Optimizer): ...@@ -83,8 +83,8 @@ class Momentum(Optimizer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp) inp = paddle.to_tensor(inp)
out = linear(inp) out = linear(inp)
......
...@@ -1004,14 +1004,13 @@ class Optimizer(object): ...@@ -1004,14 +1004,13 @@ class Optimizer(object):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np x = paddle.arange(26, dtype="float32").reshape([2, 13])
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5) linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph. # This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01, adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters()) parameters = linear.parameters())
out = linear(a) out = linear(x)
out.backward() out.backward()
adam.step() adam.step()
adam.clear_grad() adam.clear_grad()
...@@ -1081,11 +1080,9 @@ class Optimizer(object): ...@@ -1081,11 +1080,9 @@ class Optimizer(object):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
optimizer = paddle.optimizer.Adam(learning_rate=0.1, optimizer = paddle.optimizer.Adam(learning_rate=0.1,
...@@ -1286,11 +1283,9 @@ class Optimizer(object): ...@@ -1286,11 +1283,9 @@ class Optimizer(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.arange(26, dtype="float32").reshape([2, 13])
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5) linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph. # This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01, adam = paddle.optimizer.Adam(learning_rate = 0.01,
...@@ -1396,10 +1391,8 @@ class Optimizer(object): ...@@ -1396,10 +1391,8 @@ class Optimizer(object):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.arange(26, dtype="float32").reshape([2, 13])
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5) linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph. # This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01, adam = paddle.optimizer.Adam(learning_rate = 0.01,
......
...@@ -183,13 +183,12 @@ class InputSpec(object): ...@@ -183,13 +183,12 @@ class InputSpec(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
from paddle.static import InputSpec from paddle.static import InputSpec
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.ones([2, 2], np.float32)) x = paddle.ones([2, 2], dtype="float32")
x_spec = InputSpec.from_tensor(x, name='x') x_spec = InputSpec.from_tensor(x, name='x')
print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x) print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
......
...@@ -757,69 +757,82 @@ def cond(x, p=None, name=None): ...@@ -757,69 +757,82 @@ def cond(x, p=None, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
# compute conditional number when p is None # compute conditional number when p is None
out = paddle.linalg.cond(x) out = paddle.linalg.cond(x)
# out.numpy() [1.4142135] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.41421342])
# compute conditional number when order of the norm is 'fro' # compute conditional number when order of the norm is 'fro'
out_fro = paddle.linalg.cond(x, p='fro') out_fro = paddle.linalg.cond(x, p='fro')
# out_fro.numpy() [3.1622777] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.16227770])
# compute conditional number when order of the norm is 'nuc' # compute conditional number when order of the norm is 'nuc'
out_nuc = paddle.linalg.cond(x, p='nuc') out_nuc = paddle.linalg.cond(x, p='nuc')
# out_nuc.numpy() [9.2426405] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [9.24263859])
# compute conditional number when order of the norm is 1 # compute conditional number when order of the norm is 1
out_1 = paddle.linalg.cond(x, p=1) out_1 = paddle.linalg.cond(x, p=1)
# out_1.numpy() [2.] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2.])
# compute conditional number when order of the norm is -1 # compute conditional number when order of the norm is -1
out_minus_1 = paddle.linalg.cond(x, p=-1) out_minus_1 = paddle.linalg.cond(x, p=-1)
# out_minus_1.numpy() [1.] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.])
# compute conditional number when order of the norm is 2 # compute conditional number when order of the norm is 2
out_2 = paddle.linalg.cond(x, p=2) out_2 = paddle.linalg.cond(x, p=2)
# out_2.numpy() [1.4142135] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.41421342])
# compute conditional number when order of the norm is -1 # compute conditional number when order of the norm is -1
out_minus_2 = paddle.linalg.cond(x, p=-2) out_minus_2 = paddle.linalg.cond(x, p=-2)
# out_minus_2.numpy() [0.70710677] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.70710683])
# compute conditional number when order of the norm is inf # compute conditional number when order of the norm is inf
out_inf = paddle.linalg.cond(x, p=np.inf) out_inf = paddle.linalg.cond(x, p=float("inf"))
# out_inf.numpy() [2.] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2.])
# compute conditional number when order of the norm is -inf # compute conditional number when order of the norm is -inf
out_minus_inf = paddle.linalg.cond(x, p=-np.inf) out_minus_inf = paddle.linalg.cond(x, p=-float("inf"))
# out_minus_inf.numpy() [1.] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.])
a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))
# a.numpy() a = paddle.randn([2, 4, 4])
# [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # Tensor(shape=[2, 4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [[[-0.06784091, -0.07095790, 1.31792855, -0.58959651],
# [-1.1341571 -0.6022629 0.5445269 0.29154757] # [ 0.20818676, -0.85640615, -0.89998871, -1.47439921],
# [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [-0.49132481, 0.42250812, -0.77383220, -2.19794774],
# [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.33551720, -1.70003879, -1.09795380, -0.63737559]],
# [-0.15178485 -1.1604939 0.75810957 0.30971205]
# [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [[ 1.12026262, -0.16119350, -1.21157813, 2.74383283],
# [-1.216529 2.0018666 -0.7773689 -0.17556527]]] # [-0.15999718, 0.18798758, -0.69392562, 1.35720372],
# [-0.53013402, -2.26304483, 1.40843511, -1.02288902],
# [ 0.69533503, 2.05261683, -0.02251151, -1.43127477]]])
a_cond_fro = paddle.linalg.cond(a, p='fro') a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.numpy() [31.572273 28.120834] # Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [8.86691189 , 75.23817444])
b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))
# b.numpy() b = paddle.randn([2, 3, 4])
# [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # Tensor(shape=[2, 3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-1.72710298 0.08866375 -0.62518804 0.16128892] # [[[-0.43754861, 1.80796063, -0.78729683, -1.82264030],
# [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [-0.27670753, 0.06620564, 0.29072434, -0.31155765],
# [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [ 0.34123746, -0.05444612, 0.05001324, -1.46877074]],
# [-0.76620689 0.56673047 0.85064753 -0.45158196]
# [ 1.47595418 2.23646462 1.5701758 0.10497519]]] # [[-0.64331555, -1.51103854, -1.26277697, -0.68024760],
# [ 2.59375715, -1.06665540, 0.96575671, -0.73330832],
# [-0.47064447, -0.23945692, -0.95150250, -1.07125998]]])
b_cond_2 = paddle.linalg.cond(b, p=2) b_cond_2 = paddle.linalg.cond(b, p=2)
# b_cond_2.numpy() [3.30064451 2.51976252] # Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [6.64228773, 3.89068866])
""" """
...@@ -1503,18 +1516,13 @@ def cholesky(x, upper=False, name=None): ...@@ -1503,18 +1516,13 @@ def cholesky(x, upper=False, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
a = np.random.rand(3, 3) a = paddle.rand([3, 3], dtype="float32")
a_t = np.transpose(a, [1, 0]) a_t = paddle.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03 x = paddle.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
out = paddle.linalg.cholesky(x, upper=False) out = paddle.linalg.cholesky(x, upper=False)
print(out) print(out)
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.cholesky(x, upper) return _C_ops.cholesky(x, upper)
......
...@@ -174,15 +174,11 @@ def yolo_loss( ...@@ -174,15 +174,11 @@ def yolo_loss(
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = np.random.random([2, 14, 8, 8]).astype('float32') x = paddle.rand([2, 14, 8, 8]).astype('float32')
gt_box = np.random.random([2, 10, 4]).astype('float32') gt_box = paddle.rand([2, 10, 4]).astype('float32')
gt_label = np.random.random([2, 10]).astype('int32') gt_label = paddle.rand([2, 10]).astype('int32')
x = paddle.to_tensor(x)
gt_box = paddle.to_tensor(gt_box)
gt_label = paddle.to_tensor(gt_label)
loss = paddle.vision.ops.yolo_loss(x, loss = paddle.vision.ops.yolo_loss(x,
gt_box=gt_box, gt_box=gt_box,
...@@ -391,13 +387,9 @@ def yolo_box( ...@@ -391,13 +387,9 @@ def yolo_box(
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
x = np.random.random([2, 14, 8, 8]).astype('float32') x = paddle.rand([2, 14, 8, 8]).astype('float32')
img_size = np.ones((2, 2)).astype('int32') img_size = paddle.ones((2, 2)).astype('int32')
x = paddle.to_tensor(x)
img_size = paddle.to_tensor(img_size)
boxes, scores = paddle.vision.ops.yolo_box(x, boxes, scores = paddle.vision.ops.yolo_box(x,
img_size=img_size, img_size=img_size,
...@@ -2118,33 +2110,36 @@ def nms( ...@@ -2118,33 +2110,36 @@ def nms(
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
boxes = np.random.rand(4, 4).astype('float32') boxes = paddle.rand([4, 4]).astype('float32')
boxes[:, 2] = boxes[:, 0] + boxes[:, 2] boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3] boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
# [[0.06287421 0.5809351 0.3443958 0.8713329 ] print(boxes)
# [0.0749094 0.9713205 0.99241287 1.2799143 ] # Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.46246734 0.6753201 1.346266 1.3821303 ] # [[0.64811575, 0.89756244, 0.86473107, 1.48552322],
# [0.8984796 0.5619834 1.1254641 1.0201943 ]] # [0.48085716, 0.84799081, 0.54517937, 0.86396021],
# [0.62646860, 0.72901905, 1.17392159, 1.69691563],
# [0.89729202, 0.46281594, 1.88733089, 0.98588502]])
out = paddle.vision.ops.nms(paddle.to_tensor(boxes), 0.1) out = paddle.vision.ops.nms(boxes, 0.1)
# [0, 1, 3, 0] print(out)
# Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 1, 3])
scores = np.random.rand(4).astype('float32') scores = paddle.to_tensor([0.6, 0.7, 0.4, 0.233])
# [0.98015213 0.3156527 0.8199343 0.874901 ]
categories = [0, 1, 2, 3] categories = [0, 1, 2, 3]
category_idxs = np.random.choice(categories, 4) category_idxs = paddle.to_tensor([2, 0, 0, 3], dtype="int64")
# [2 0 0 3]
out = paddle.vision.ops.nms(paddle.to_tensor(boxes), out = paddle.vision.ops.nms(boxes,
0.1, 0.1,
paddle.to_tensor(scores), paddle.to_tensor(scores),
paddle.to_tensor(category_idxs), paddle.to_tensor(category_idxs),
categories, categories,
4) 4)
# [0, 3, 2] print(out)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 0, 2, 3])
""" """
def _nms(boxes, iou_threshold): def _nms(boxes, iou_threshold):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册