未验证 提交 e75c651d 编写于 作者: K Kevin吴嘉文 提交者: GitHub

Remove reduntant numpy output in Example code (1/3), test=document_fix (#48678)

上级 3a8aac35
......@@ -106,7 +106,7 @@ def all_reduce(
data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
task = dist.stream.all_reduce(data, sync_op=False)
task.wait()
out = data.numpy()
out = data
# [[5, 7, 9], [5, 7, 9]]
"""
if _warn_cur_rank_not_in_group(group):
......
......@@ -530,26 +530,27 @@ def fftn(x, s=None, axes=None, norm="backward", name=None):
x = paddle.meshgrid(arr, arr, arr)[1]
fftn_xp = paddle.fft.fftn(x, axes=(1, 2))
print(fftn_xp.numpy())
# [[[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]]
print(fftn_xp)
# Tensor(shape=[4, 4, 4], dtype=complex128, place=Place(gpu:0), stop_gradient=True,
# [[[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]]])
"""
if is_integer(x) or is_floating_point(x):
return fftn_r2c(
......
......@@ -533,8 +533,8 @@ class FusedFeedForward(Layer):
fused_feedforward_layer = FusedFeedForward(8, 8)
x = paddle.rand((1, 8, 8))
out = fused_feedforward_layer(x)
print(out.numpy().shape)
# (1, 8, 8)
print(out.shape)
# [1, 8, 8]
"""
def __init__(
......
......@@ -1677,11 +1677,12 @@ def glu(x, axis=-1, name=None):
x = paddle.to_tensor(
[[-0.22014759, -1.76358426, 0.80566144, 0.04241343],
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
)
print(F.glu(x).numpy())
# array([[-0.15216254, -0.9004892 ],
# [-1.0577879 , -0.46985325]], dtype=float32)
print(F.glu(x))
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-0.15216254, -0.90048921],
# [-1.05778778, -0.46985325]])
"""
check_variable_and_dtype(
......
......@@ -657,10 +657,9 @@ def conv2d(
w_var = paddle.randn((6, 3, 3, 3), dtype='float32')
y_var = F.conv2d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6]
"""
# entry checks
if data_format not in ["NCHW", "NHWC"]:
......@@ -1234,10 +1233,9 @@ def conv2d_transpose(
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
y_var = F.conv2d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10]
"""
if data_format not in ['NCHW', 'NHWC']:
......@@ -1523,10 +1521,9 @@ def conv3d(
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6, 6]
"""
# entry check
if data_format not in ["NCDHW", "NDHWC"]:
......@@ -1738,10 +1735,9 @@ def conv3d_transpose(
w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')
y_var = F.conv3d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10, 10]
"""
# entry checks
if data_format not in ["NCDHW", "NDHWC"]:
......
......@@ -63,7 +63,9 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
distance = paddle.nn.functional.pairwise_distance(x, y)
print(distance.numpy()) # [5. 5.]
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
"""
check_type(p, 'porder', (float, int), 'PairwiseDistance')
......
......@@ -215,10 +215,11 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
lengths = paddle.to_tensor([10, 9, 8])
mask = paddle.nn.functional.sequence_mask(lengths)
print(mask.numpy())
# [[1 1 1 1 1 1 1 1 1 1]
# [1 1 1 1 1 1 1 1 1 0]
# [1 1 1 1 1 1 1 1 0 0]]
print(mask)
# Tensor(shape=[3, 10], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
"""
......
......@@ -1353,17 +1353,20 @@ def l1_loss(input, label, reduction='mean', name=None):
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
# [0.35]
print(l1_loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none')
print(l1_loss.numpy())
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
print(l1_loss)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum')
print(l1_loss.numpy())
# [1.4]
print(l1_loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
"""
if reduction not in ['sum', 'mean', 'none']:
......@@ -2530,9 +2533,11 @@ def cross_entropy(
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss(
input,
label)
print(dy_ret.numpy()) #[5.41993642]
input,
label)
print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python
......@@ -2550,13 +2555,15 @@ def cross_entropy(
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343]
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
"""
......
......@@ -368,9 +368,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
x = paddle.randn(shape=[2,9,4,4])
out_var = F.pixel_shuffle(x, 3)
out = out_var.numpy()
print(out.shape)
# (2, 1, 12, 12)
print(out_var.shape)
# [2, 1, 12, 12]
"""
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
......
......@@ -32,11 +32,13 @@ class Constant(ConstantInitializer):
data = paddle.rand([30, 10, 2], dtype='float32')
linear = nn.Linear(2,
4,
weight_attr=nn.initializer.Constant(value=2.0))
4,
weight_attr=nn.initializer.Constant(value=2.0))
res = linear(data)
print(linear.weight.numpy())
#result is [[2. 2. 2. 2.],[2. 2. 2. 2.]]
print(linear.weight)
# Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[2., 2., 2., 2.],
# [2., 2., 2., 2.]])
"""
......
......@@ -668,9 +668,8 @@ class Conv2D(_ConvNd):
conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6]
"""
def __init__(
......@@ -841,9 +840,8 @@ class Conv2DTranspose(_ConvNd):
conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10]
"""
def __init__(
......@@ -999,9 +997,8 @@ class Conv3D(_ConvNd):
conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6, 6]
"""
def __init__(
......@@ -1181,9 +1178,8 @@ class Conv3DTranspose(_ConvNd):
conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10, 10]
"""
def __init__(
......
......@@ -56,7 +56,9 @@ class PairwiseDistance(Layer):
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance.numpy()) # [5. 5.]
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
"""
......
......@@ -102,7 +102,9 @@ class BCEWithLogitsLoss(Layer):
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
bce_logit_loss = paddle.nn.BCEWithLogitsLoss()
output = bce_logit_loss(logit, label)
print(output.numpy()) # [0.45618808]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.45618814])
"""
......@@ -319,9 +321,11 @@ class CrossEntropyLoss(Layer):
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss(
input,
label)
print(dy_ret.numpy()) #[5.41993642]
input,
label)
print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python
......@@ -339,13 +343,15 @@ class CrossEntropyLoss(Layer):
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343]
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
"""
......@@ -635,19 +641,22 @@ class L1Loss(Layer):
l1_loss = paddle.nn.L1Loss()
output = l1_loss(input, label)
print(output.numpy())
# [0.35]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.L1Loss(reduction='sum')
output = l1_loss(input, label)
print(output.numpy())
# [1.4]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
l1_loss = paddle.nn.L1Loss(reduction='none')
output = l1_loss(input, label)
print(output)
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
"""
......
......@@ -52,10 +52,9 @@ class PixelShuffle(Layer):
x = paddle.randn(shape=[2,9,4,4])
pixel_shuffle = nn.PixelShuffle(3)
out_var = pixel_shuffle(x)
out = out_var.numpy()
out = pixel_shuffle(x)
print(out.shape)
# (2, 1, 12, 12)
# [2, 1, 12, 12]
"""
......
......@@ -628,10 +628,8 @@ class QuantizedConv2DTranspose(Layer):
conv_quantized = QuantizedConv2DTranspose(conv)
y_quantized = conv_quantized(x_var)
y_var = conv(x_var)
y_quantized_np = y_quantized.numpy()
y_np = y_var.numpy()
print(y_np.shape, y_quantized_np.shape)
# (2, 6, 10, 10), (2, 6, 10, 10)
print(y_var.shape, y_quantized.shape)
# [2, 6, 10, 10], [2, 6, 10, 10]
"""
......
......@@ -2453,7 +2453,10 @@ def unique(
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
print(unique)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 5])
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
print(indices)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
......
......@@ -88,7 +88,7 @@ class Conll05st(Dataset):
model = SimpleNet()
pred_idx, mark, label= model(pred_idx, mark, label)
print(pred_idx.numpy(), mark.numpy(), label.numpy())
print(pred_idx, mark, label)
"""
......
......@@ -67,7 +67,7 @@ class Imdb(Dataset):
model = SimpleNet()
image, label = model(doc, label)
print(doc.numpy().shape, label.numpy().shape)
print(doc.shape, label.shape)
"""
......
......@@ -67,7 +67,7 @@ class Imikolov(Dataset):
model = SimpleNet()
src, trg = model(src, trg)
print(src.numpy().shape, trg.numpy().shape)
print(src.shape, trg.shape)
"""
......
......@@ -134,7 +134,7 @@ class Movielens(Dataset):
model = SimpleNet()
category, title, rating = model(category, title, rating)
print(category.numpy().shape, title.numpy().shape, rating.numpy().shape)
print(category.shape, title.shape, rating.shape)
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册