未验证 提交 460d5040 编写于 作者: K Kevin吴嘉文 提交者: GitHub

Remove reduntant numpy input in Example code, test=document_fix (#47916)

上级 b7e120d2
...@@ -71,7 +71,6 @@ def global_scatter( ...@@ -71,7 +71,6 @@ def global_scatter(
.. code-block:: python .. code-block:: python
# required: distributed # required: distributed
import numpy as np
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
init_parallel_env() init_parallel_env()
...@@ -79,17 +78,14 @@ def global_scatter( ...@@ -79,17 +78,14 @@ def global_scatter(
world_size = 2 world_size = 2
d_model = 2 d_model = 2
in_feat = d_model in_feat = d_model
local_input_buf = np.array([[1, 2],[3, 4],[5, 6],[7, 8],[9, 10]], \ local_input_buf = paddle.to_tensor([[1, 2],[3, 4],[5, 6],[7, 8],[9, 10]], \
dtype=np.float32) dtype='float32', stop_gradient=False)
if paddle.distributed.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
local_count = np.array([2, 1, 1, 1]) local_count = paddle.to_tensor([2, 1, 1, 1], dtype="int64")
global_count = np.array([2, 1, 1, 1]) global_count = paddle.to_tensor([2, 1, 1, 1], dtype="int64")
else: else:
local_count = np.array([1, 1, 2, 1]) local_count = paddle.to_tensor([1, 1, 2, 1], dtype="int64")
global_count = np.array([1, 1, 2, 1]) global_count = paddle.to_tensor([1, 1, 2, 1], dtype="int64")
local_input_buf = paddle.to_tensor(local_input_buf, dtype="float32", stop_gradient=False)
local_count = paddle.to_tensor(local_count, dtype="int64")
global_count = paddle.to_tensor(global_count, dtype="int64")
a = paddle.distributed.utils.global_scatter(local_input_buf, \ a = paddle.distributed.utils.global_scatter(local_input_buf, \
local_count, global_count) local_count, global_count)
a.stop_gradient = False a.stop_gradient = False
...@@ -193,7 +189,6 @@ def global_gather( ...@@ -193,7 +189,6 @@ def global_gather(
.. code-block:: python .. code-block:: python
# required: distributed # required: distributed
import numpy as np
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
init_parallel_env() init_parallel_env()
...@@ -201,17 +196,15 @@ def global_gather( ...@@ -201,17 +196,15 @@ def global_gather(
world_size = 2 world_size = 2
d_model = 2 d_model = 2
in_feat = d_model in_feat = d_model
local_input_buf = np.array([[1, 2],[3, 4],[5, 6],[7, 8],[9, 10]],\ local_input_buf = paddle._to_tensor([[1, 2],[3, 4],[5, 6],[7, 8],[9, 10]],\
dtype=np.float32) dtype='float32', stop_gradient=False)
if paddle.distributed.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
local_count = np.array([2, 1, 1, 1]) local_count = paddle.to_tensor([2, 1, 1, 1], dtype="int64")
global_count = np.array([2, 1, 1, 1]) global_count = paddle.to_tensor([2, 1, 1, 1], dtype="int64")
else: else:
local_count = np.array([1, 1, 2, 1]) local_count = paddle.to_tensor([1, 1, 2, 1], dtype="int64")
global_count = np.array([1, 1, 2, 1]) global_count = paddle.to_tensor([1, 1, 2, 1], dtype="int64")
local_input_buf = paddle.to_tensor(local_input_buf, dtype="float32", stop_gradient=False)
local_count = paddle.to_tensor(local_count, dtype="int64")
global_count = paddle.to_tensor(global_count, dtype="int64")
a = paddle.distributed.utils.global_gather(local_input_buf, local_count, global_count) a = paddle.distributed.utils.global_gather(local_input_buf, local_count, global_count)
print(a) print(a)
# out for rank 0: [[1, 2], [3, 4], [7, 8], [1, 2], [7, 8]] # out for rank 0: [[1, 2], [3, 4], [7, 8], [1, 2], [7, 8]]
......
...@@ -521,26 +521,29 @@ def fftn(x, s=None, axes=None, norm="backward", name=None): ...@@ -521,26 +521,29 @@ def fftn(x, s=None, axes=None, norm="backward", name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.mgrid[:4, :4, :4][1] arr = paddle.arange(4, dtype="float64")
xp = paddle.to_tensor(x) x = paddle.meshgrid(arr, arr, arr)[1]
fftn_xp = paddle.fft.fftn(xp, axes=(1, 2)).numpy()
print(fftn_xp) fftn_xp = paddle.fft.fftn(x, axes=(1, 2))
# [[[24.+0.j 0.+0.j 0.+0.j 0.-0.j] print(fftn_xp.numpy())
# [[[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]] # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]]
...@@ -901,15 +904,16 @@ def fft2(x, s=None, axes=(-2, -1), norm="backward", name=None): ...@@ -901,15 +904,16 @@ def fft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.mgrid[:2, :2][1] arr = paddle.arange(2, dtype="float64")
xp = paddle.to_tensor(x) x = paddle.meshgrid(arr, arr)[0]
fft2_xp = paddle.fft.fft2(xp).numpy()
fft2_xp = paddle.fft.fft2(x)
print(fft2_xp) print(fft2_xp)
# [[ 2.+0.j -2.+0.j] # Tensor(shape=[2, 2], dtype=complex128, place=Place(gpu:0), stop_gradient=True,
# [ 0.+0.j 0.+0.j]] # [[ (2+0j), 0j ],
# [(-2+0j), 0j ]])
""" """
_check_at_least_ndim(x, 2) _check_at_least_ndim(x, 2)
...@@ -971,15 +975,16 @@ def ifft2(x, s=None, axes=(-2, -1), norm="backward", name=None): ...@@ -971,15 +975,16 @@ def ifft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.mgrid[:2, :2][1] arr = paddle.arange(2, dtype="float64")
xp = paddle.to_tensor(x) x = paddle.meshgrid(arr, arr)[0]
ifft2_xp = paddle.fft.ifft2(xp).numpy()
ifft2_xp = paddle.fft.ifft2(x)
print(ifft2_xp) print(ifft2_xp)
# [[ 0.5+0.j -0.5+0.j] # Tensor(shape=[2, 2], dtype=complex128, place=Place(gpu:0), stop_gradient=True,
# [ 0. +0.j 0. +0.j]] # [[ (0.5+0j), 0j ],
# [(-0.5+0j), 0j ]])
""" """
_check_at_least_ndim(x, 2) _check_at_least_ndim(x, 2)
if s is not None: if s is not None:
...@@ -1033,16 +1038,17 @@ def rfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): ...@@ -1033,16 +1038,17 @@ def rfft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
arr = paddle.arange(5, dtype="float64")
x = paddle.to_tensor(np.mgrid[:5, :5][0].astype(np.float32)) x = paddle.meshgrid(arr, arr)[0]
print(paddle.fft.rfft2(x))
# Tensor(shape=[5, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, result = paddle.fft.rfft2(x)
# [[ (50+0j) , (1.1920928955078125e-07+0j) , 0j ], print(result.numpy())
# [(-12.5+17.204774856567383j) , (-9.644234211236835e-08+7.006946134424652e-08j) , 0j ], # [[ 50. +0.j 0. +0.j 0. +0.j ]
# [(-12.500000953674316+4.061495304107666j) , (3.6837697336977726e-08-1.1337477445749755e-07j), 0j ], # [-12.5+17.20477401j 0. +0.j 0. +0.j ]
# [(-12.500000953674316-4.061495304107666j) , (3.6837697336977726e-08+1.1337477445749755e-07j), 0j ], # [-12.5 +4.0614962j 0. +0.j 0. +0.j ]
# [(-12.5-17.204774856567383j) , (-9.644234211236835e-08-7.006946134424652e-08j) , 0j ]]) # [-12.5 -4.0614962j 0. +0.j 0. +0.j ]
# [-12.5-17.20477401j 0. +0.j 0. +0.j ]]
""" """
_check_at_least_ndim(x, 2) _check_at_least_ndim(x, 2)
if s is not None: if s is not None:
...@@ -1192,13 +1198,20 @@ def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): ...@@ -1192,13 +1198,20 @@ def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.mgrid[:5, :5][0].astype(np.float64) arr = paddle.arange(5, dtype="float64")
xp = paddle.to_tensor(x) x = paddle.meshgrid(arr, arr)[0]
ihfft2_xp = paddle.fft.ihfft2(xp).numpy() print(x)
print(ihfft2_xp) # Tensor(shape=[5, 5], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [[0., 0., 0., 0., 0.],
# [1., 1., 1., 1., 1.],
# [2., 2., 2., 2., 2.],
# [3., 3., 3., 3., 3.],
# [4., 4., 4., 4., 4.]])
ihfft2_xp = paddle.fft.ihfft2(x)
print(ihfft2_xp.numpy())
# [[ 2. +0.j 0. +0.j 0. +0.j ] # [[ 2. +0.j 0. +0.j 0. +0.j ]
# [-0.5-0.68819096j 0. +0.j 0. +0.j ] # [-0.5-0.68819096j 0. +0.j 0. +0.j ]
# [-0.5-0.16245985j 0. +0.j 0. +0.j ] # [-0.5-0.16245985j 0. +0.j 0. +0.j ]
...@@ -1250,15 +1263,11 @@ def fftfreq(n, d=1.0, dtype=None, name=None): ...@@ -1250,15 +1263,11 @@ def fftfreq(n, d=1.0, dtype=None, name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float)
scalar_temp = 0.5 scalar_temp = 0.5
n = x.size fftfreq_xp = paddle.fft.fftfreq(5, d=scalar_temp)
fftfreq_xp = paddle.fft.fftfreq(n, d=scalar_temp)
print(fftfreq_xp) print(fftfreq_xp)
# Tensor(shape=[5], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # Tensor(shape=[5], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [ 0. , 0.40000001, 0.80000001, -0.80000001, -0.40000001]) # [ 0. , 0.40000001, 0.80000001, -0.80000001, -0.40000001])
""" """
...@@ -1301,13 +1310,10 @@ def rfftfreq(n, d=1.0, dtype=None, name=None): ...@@ -1301,13 +1310,10 @@ def rfftfreq(n, d=1.0, dtype=None, name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float)
scalar_temp = 0.3 scalar_temp = 0.3
n = x.size rfftfreq_xp = paddle.fft.rfftfreq(5, d=scalar_temp)
rfftfreq_xp = paddle.fft.rfftfreq(n, d=scalar_temp)
print(rfftfreq_xp) print(rfftfreq_xp)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
...@@ -1343,15 +1349,17 @@ def fftshift(x, axes=None, name=None): ...@@ -1343,15 +1349,17 @@ def fftshift(x, axes=None, name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float) fftfreq_xp = paddle.fft.fftfreq(5, d=0.3)
n = x.size print(fftfreq_xp)
fftfreq_xp = paddle.fft.fftfreq(n, d=0.3) # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
res = paddle.fft.fftshift(fftfreq_xp).numpy() # [ 0. , 0.66666669, 1.33333337, -1.33333337, -0.66666669])
res = paddle.fft.fftshift(fftfreq_xp)
print(res) print(res)
# [-1.3333334 -0.6666667 0. 0.6666667 1.3333334] # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-1.33333337, -0.66666669, 0. , 0.66666669, 1.33333337])
""" """
shape = paddle.shape(x) shape = paddle.shape(x)
...@@ -1386,15 +1394,17 @@ def ifftshift(x, axes=None, name=None): ...@@ -1386,15 +1394,17 @@ def ifftshift(x, axes=None, name=None):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float) fftfreq_xp = paddle.fft.fftfreq(5, d=0.3)
n = x.size print(fftfreq_xp)
fftfreq_xp = paddle.fft.fftfreq(n, d=0.3) # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
res = paddle.fft.ifftshift(fftfreq_xp).numpy() # [ 0. , 0.66666669, 1.33333337, -1.33333337, -0.66666669])
res = paddle.fft.ifftshift(fftfreq_xp)
print(res) print(res)
# [ 1.3333334 -1.3333334 -0.6666667 0. 0.6666667] # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [ 1.33333337, -1.33333337, -0.66666669, 0. , 0.66666669])
""" """
shape = paddle.shape(x) shape = paddle.shape(x)
......
...@@ -87,28 +87,31 @@ def softmax(x, axis=-1, name=None): ...@@ -87,28 +87,31 @@ def softmax(x, axis=-1, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
paddle.seed(100) paddle.seed(100)
mask = np.random.rand(3, 4) < 0.5 mask = paddle.rand((3, 4)) < 0.5
np_x = np.random.rand(3, 4) * mask x = paddle.rand((3, 4)) * mask
# [[0. 0. 0.96823406 0.19722934] print(x)
# [0.94373937 0. 0.02060066 0.71456372] # Tensor(shape=[3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0. 0. 0. 0.98275049]] # [[0.83438963, 0.70008713, 0. , 0.88831252],
# [0.02200012, 0. , 0.75432241, 0.65136462],
csr = paddle.to_tensor(np_x).to_sparse_csr() # [0.96088767, 0.82938021, 0.35367414, 0.86653489]])
# Tensor(shape=[3, 4], dtype=paddle.float64, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 5, 6], csr = x.to_sparse_csr()
# cols=[2, 3, 0, 2, 3, 3], print(csr)
# values=[0.96823406, 0.19722934, 0.94373937, 0.02060066, 0.71456372, # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# 0.98275049]) # crows=[0 , 3 , 6 , 10],
# cols=[0, 1, 3, 0, 2, 3, 0, 1, 2, 3],
# values=[0.83438963, 0.70008713, 0.88831252, 0.02200012, 0.75432241,
# 0.65136462, 0.96088767, 0.82938021, 0.35367414, 0.86653489])
out = paddle.sparse.nn.functional.softmax(csr) out = paddle.sparse.nn.functional.softmax(csr)
# Tensor(shape=[3, 4], dtype=paddle.float64, place=Place(gpu:0), stop_gradient=True, print(out)
# crows=[0, 2, 5, 6], # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# cols=[2, 3, 0, 2, 3, 3], # crows=[0 , 3 , 6 , 10],
# values=[0.68373820, 0.31626180, 0.45610887, 0.18119845, 0.36269269, # cols=[0, 1, 3, 0, 2, 3, 0, 1, 2, 3],
# 1. ]) # values=[0.34132850, 0.29843223, 0.36023921, 0.20176248, 0.41964680,
# 0.37859070, 0.30015594, 0.26316854, 0.16354506, 0.27313042])
""" """
return _C_ops.sparse_softmax(x, axis) return _C_ops.sparse_softmax(x, axis)
......
...@@ -86,29 +86,32 @@ class Softmax(Layer): ...@@ -86,29 +86,32 @@ class Softmax(Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np paddle.seed(2022)
paddle.seed(100)
mask = paddle.rand((3, 4)) < 0.7
mask = np.random.rand(3, 4) < 0.5 x = paddle.rand((3, 4)) * mask
np_x = np.random.rand(3, 4) * mask print(x)
# [[0. 0. 0.96823406 0.19722934] # Tensor(shape=[3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.94373937 0. 0.02060066 0.71456372] # [[0.08325022, 0.27030438, 0. , 0.83883715],
# [0. 0. 0. 0.98275049]] # [0. , 0.95856029, 0.24004589, 0. ],
# [0.14500992, 0.17088132, 0. , 0. ]])
csr = paddle.to_tensor(np_x).to_sparse_csr()
# Tensor(shape=[3, 4], dtype=paddle.float64, place=Place(gpu:0), stop_gradient=True, csr = x.to_sparse_csr()
# crows=[0, 2, 5, 6], print(csr)
# cols=[2, 3, 0, 2, 3, 3], # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# values=[0.96823406, 0.19722934, 0.94373937, 0.02060066, 0.71456372, # crows=[0, 3, 5, 7],
# 0.98275049]) # cols=[0, 1, 3, 1, 2, 0, 1],
# values=[0.08325022, 0.27030438, 0.83883715, 0.95856029, 0.24004589,
# 0.14500992, 0.17088132])
softmax = paddle.sparse.nn.Softmax() softmax = paddle.sparse.nn.Softmax()
out = softmax(csr) out = softmax(csr)
# Tensor(shape=[3, 4], dtype=paddle.float64, place=Place(gpu:0), stop_gradient=True, print(out)
# crows=[0, 2, 5, 6], # Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# cols=[2, 3, 0, 2, 3, 3], # crows=[0, 3, 5, 7],
# values=[0.68373820, 0.31626180, 0.45610887, 0.18119845, 0.36269269, # cols=[0, 1, 3, 1, 2, 0, 1],
# 1. ]) # values=[0.23070428, 0.27815846, 0.49113727, 0.67227983, 0.32772022,
# 0.49353254, 0.50646752])
""" """
def __init__(self, axis=-1, name=None): def __init__(self, axis=-1, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册