提交 b43239f9 编写于 作者: K Kevin吴嘉文 提交者: 梦柳

#47042

上级 7bef9603
...@@ -1120,7 +1120,6 @@ class Fleet(object): ...@@ -1120,7 +1120,6 @@ class Fleet(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.enable_static() paddle.enable_static()
......
...@@ -43,7 +43,7 @@ class L1Decay(fluid.regularizer.L1Decay): ...@@ -43,7 +43,7 @@ class L1Decay(fluid.regularizer.L1Decay):
# Example1: set Regularizer in optimizer # Example1: set Regularizer in optimizer
import paddle import paddle
from paddle.regularizer import L1Decay from paddle.regularizer import L1Decay
import numpy as np
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32") inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp) out = linear(inp)
......
...@@ -2276,21 +2276,33 @@ def unique_consecutive( ...@@ -2276,21 +2276,33 @@ def unique_consecutive(
x = paddle.to_tensor([1, 1, 2, 2, 3, 1, 1, 2]) x = paddle.to_tensor([1, 1, 2, 2, 3, 1, 1, 2])
output = paddle.unique_consecutive(x) # output = paddle.unique_consecutive(x) #
np_output = output.numpy() # [1 2 3 1 2] print(output)
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 1, 2])
_, inverse, counts = paddle.unique_consecutive(x, return_inverse=True, return_counts=True) _, inverse, counts = paddle.unique_consecutive(x, return_inverse=True, return_counts=True)
np_inverse = inverse.numpy() # [0 0 1 1 2 3 3 4] print(inverse)
np_counts = inverse.numpy() # [2 2 1 2 1] # Tensor(shape=[8], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 0, 1, 1, 2, 3, 3, 4])
print(counts)
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [2, 2, 1, 2, 1])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]]) x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) # output = paddle.unique_consecutive(x, axis=0) #
np_output = output.numpy() # [2 1 3 0 1 2 1 3 2 1 3] print(output)
# Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1],
# [2, 1, 3]])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]]) x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) # output = paddle.unique_consecutive(x, axis=0) #
np_output = output.numpy() print(output)
# [[2 1 3] # Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [3 0 1] # [[2, 1, 3],
# [2 1 3]] # [3, 0, 1],
# [2, 1, 3]])
""" """
if axis is None: if axis is None:
...@@ -2411,18 +2423,33 @@ def unique( ...@@ -2411,18 +2423,33 @@ def unique(
unique = paddle.unique(x) unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5] np_unique = unique.numpy() # [1 2 3 5]
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True) _, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
np_indices = indices.numpy() # [3 0 1 4] print(indices)
np_inverse = inverse.numpy() # [1 2 2 0 3 2] # Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
np_counts = counts.numpy() # [1 1 3 1] # [3, 0, 1, 4])
print(inverse)
# Tensor(shape=[6], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 2, 0, 3, 2])
print(counts)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 1, 3, 1])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]]) x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x) unique = paddle.unique(x)
np_unique = unique.numpy() # [0 1 2 3] print(unique)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 1, 2, 3])
unique = paddle.unique(x, axis=0) unique = paddle.unique(x, axis=0)
<<<<<<< HEAD
np_unique = unique.numpy() np_unique = unique.numpy()
# [[2 1 3] # [[2 1 3]
# [3 0 1]] # [3 0 1]]
=======
print(unique)
# Tensor(shape=[2, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1]])
>>>>>>> a65746580b (fix numpy issue in codeblock examples (#47042))
""" """
if axis is None: if axis is None:
axis = [] axis = []
...@@ -3032,12 +3059,10 @@ def scatter_nd(index, updates, shape, name=None): ...@@ -3032,12 +3059,10 @@ def scatter_nd(index, updates, shape, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
index_data = np.array([[1, 1], index = paddle.to_tensor([[1, 1],
[0, 1], [0, 1],
[1, 3]]).astype(np.int64) [1, 3]], dtype="int64")
index = paddle.to_tensor(index_data)
updates = paddle.rand(shape=[3, 9, 10], dtype='float32') updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10] shape = [3, 5, 9, 10]
...@@ -3109,19 +3134,22 @@ def tile(x, repeat_times, name=None): ...@@ -3109,19 +3134,22 @@ def tile(x, repeat_times, name=None):
data = paddle.to_tensor([1, 2, 3], dtype='int32') data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1]) out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy() print(out)
# [[1, 2, 3] # Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3]] # [[1, 2, 3],
# [1, 2, 3]])
out = paddle.tile(data, repeat_times=(2, 2)) out = paddle.tile(data, repeat_times=(2, 2))
np_out = out.numpy() print(out)
# [[1, 2, 3, 1, 2, 3] # Tensor(shape=[2, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 1, 2, 3]] # [[1, 2, 3, 1, 2, 3],
# [1, 2, 3, 1, 2, 3]])
repeat_times = paddle.to_tensor([1, 2], dtype='int32') repeat_times = paddle.to_tensor([1, 2], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times) out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy() print(out)
# [[1, 2, 3, 1, 2, 3]] # Tensor(shape=[1, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3, 1, 2, 3]])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
if isinstance(repeat_times, core.eager.Tensor): if isinstance(repeat_times, core.eager.Tensor):
...@@ -3221,8 +3249,10 @@ def expand_as(x, y, name=None): ...@@ -3221,8 +3249,10 @@ def expand_as(x, y, name=None):
data_x = paddle.to_tensor([1, 2, 3], 'int32') data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32') data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y) out = paddle.expand_as(data_x, data_y)
np_out = out.numpy() print(out)
# [[1, 2, 3], [1, 2, 3]] # Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3],
# [1, 2, 3]])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.expand_as(x, None, y.shape) return _C_ops.expand_as(x, None, y.shape)
...@@ -4232,10 +4262,11 @@ def as_complex(x, name=None): ...@@ -4232,10 +4262,11 @@ def as_complex(x, name=None):
import paddle import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2]) x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x) y = paddle.as_complex(x)
print(y.numpy()) print(y)
# [[ 0. +1.j 2. +3.j 4. +5.j] # Tensor(shape=[2, 3], dtype=complex64, place=Place(gpu:0), stop_gradient=True,
# [ 6. +7.j 8. +9.j 10.+11.j]] # [[1j , (2+3j) , (4+5j) ],
# [(6+7j) , (8+9j) , (10+11j)]])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.as_complex(x) return _C_ops.as_complex(x)
...@@ -4279,15 +4310,16 @@ def as_real(x, name=None): ...@@ -4279,15 +4310,16 @@ def as_real(x, name=None):
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2]) x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x) y = paddle.as_complex(x)
z = paddle.as_real(y) z = paddle.as_real(y)
print(z.numpy()) print(z)
# [[[ 0. 1.] # Tensor(shape=[2, 3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [ 2. 3.] # [[[0. , 1. ],
# [ 4. 5.]] # [2. , 3. ],
# [4. , 5. ]],
# [[ 6. 7.] # [[6. , 7. ],
# [ 8. 9.] # [8. , 9. ],
# [10. 11.]]] # [10., 11.]]])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.as_real(x) return _C_ops.as_real(x)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册