Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e75c651d
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e75c651d
编写于
12月 07, 2022
作者:
K
Kevin吴嘉文
提交者:
GitHub
12月 07, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove reduntant numpy output in Example code (1/3), test=document_fix (#48678)
上级
3a8aac35
变更
20
隐藏空白更改
内联
并排
Showing
20 changed file
with
126 addition
and
110 deletion
+126
-110
python/paddle/distributed/communication/stream/all_reduce.py
python/paddle/distributed/communication/stream/all_reduce.py
+1
-1
python/paddle/fft.py
python/paddle/fft.py
+21
-20
python/paddle/incubate/nn/layer/fused_transformer.py
python/paddle/incubate/nn/layer/fused_transformer.py
+2
-2
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+5
-4
python/paddle/nn/functional/conv.py
python/paddle/nn/functional/conv.py
+8
-12
python/paddle/nn/functional/distance.py
python/paddle/nn/functional/distance.py
+3
-1
python/paddle/nn/functional/extension.py
python/paddle/nn/functional/extension.py
+5
-4
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+24
-17
python/paddle/nn/functional/vision.py
python/paddle/nn/functional/vision.py
+2
-3
python/paddle/nn/initializer/constant.py
python/paddle/nn/initializer/constant.py
+6
-4
python/paddle/nn/layer/conv.py
python/paddle/nn/layer/conv.py
+8
-12
python/paddle/nn/layer/distance.py
python/paddle/nn/layer/distance.py
+3
-1
python/paddle/nn/layer/loss.py
python/paddle/nn/layer/loss.py
+26
-17
python/paddle/nn/layer/vision.py
python/paddle/nn/layer/vision.py
+2
-3
python/paddle/nn/quant/quant_layers.py
python/paddle/nn/quant/quant_layers.py
+2
-4
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+4
-1
python/paddle/text/datasets/conll05.py
python/paddle/text/datasets/conll05.py
+1
-1
python/paddle/text/datasets/imdb.py
python/paddle/text/datasets/imdb.py
+1
-1
python/paddle/text/datasets/imikolov.py
python/paddle/text/datasets/imikolov.py
+1
-1
python/paddle/text/datasets/movielens.py
python/paddle/text/datasets/movielens.py
+1
-1
未找到文件。
python/paddle/distributed/communication/stream/all_reduce.py
浏览文件 @
e75c651d
...
...
@@ -106,7 +106,7 @@ def all_reduce(
data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
task = dist.stream.all_reduce(data, sync_op=False)
task.wait()
out = data
.numpy()
out = data
# [[5, 7, 9], [5, 7, 9]]
"""
if
_warn_cur_rank_not_in_group
(
group
):
...
...
python/paddle/fft.py
浏览文件 @
e75c651d
...
...
@@ -530,26 +530,27 @@ def fftn(x, s=None, axes=None, norm="backward", name=None):
x = paddle.meshgrid(arr, arr, arr)[1]
fftn_xp = paddle.fft.fftn(x, axes=(1, 2))
print(fftn_xp.numpy())
# [[[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]]
print(fftn_xp)
# Tensor(shape=[4, 4, 4], dtype=complex128, place=Place(gpu:0), stop_gradient=True,
# [[[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]]])
"""
if
is_integer
(
x
)
or
is_floating_point
(
x
):
return
fftn_r2c
(
...
...
python/paddle/incubate/nn/layer/fused_transformer.py
浏览文件 @
e75c651d
...
...
@@ -533,8 +533,8 @@ class FusedFeedForward(Layer):
fused_feedforward_layer = FusedFeedForward(8, 8)
x = paddle.rand((1, 8, 8))
out = fused_feedforward_layer(x)
print(out.
numpy().
shape)
#
(1, 8, 8)
print(out.shape)
#
[1, 8, 8]
"""
def
__init__
(
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
e75c651d
...
...
@@ -1677,11 +1677,12 @@ def glu(x, axis=-1, name=None):
x = paddle.to_tensor(
[[-0.22014759, -1.76358426, 0.80566144, 0.04241343],
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
)
print(F.glu(x).numpy())
# array([[-0.15216254, -0.9004892 ],
# [-1.0577879 , -0.46985325]], dtype=float32)
print(F.glu(x))
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-0.15216254, -0.90048921],
# [-1.05778778, -0.46985325]])
"""
check_variable_and_dtype
(
...
...
python/paddle/nn/functional/conv.py
浏览文件 @
e75c651d
...
...
@@ -657,10 +657,9 @@ def conv2d(
w_var = paddle.randn((6, 3, 3, 3), dtype='float32')
y_var = F.conv2d(x_var, w_var)
y_np = y_var.numpy()
print(y_
np
.shape)
#
(2, 6, 6, 6)
print(y_
var
.shape)
#
[2, 6, 6, 6]
"""
# entry checks
if
data_format
not
in
[
"NCHW"
,
"NHWC"
]:
...
...
@@ -1234,10 +1233,9 @@ def conv2d_transpose(
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
y_var = F.conv2d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_
np
.shape)
#
(2, 6, 10, 10)
print(y_
var
.shape)
#
[2, 6, 10, 10]
"""
if
data_format
not
in
[
'NCHW'
,
'NHWC'
]:
...
...
@@ -1523,10 +1521,9 @@ def conv3d(
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var)
y_np = y_var.numpy()
print(y_
np
.shape)
#
(2, 6, 6, 6, 6)
print(y_
var
.shape)
#
[2, 6, 6, 6, 6]
"""
# entry check
if
data_format
not
in
[
"NCDHW"
,
"NDHWC"
]:
...
...
@@ -1738,10 +1735,9 @@ def conv3d_transpose(
w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')
y_var = F.conv3d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_
np
.shape)
#
(2, 6, 10, 10, 10)
print(y_
var
.shape)
#
[2, 6, 10, 10, 10]
"""
# entry checks
if
data_format
not
in
[
"NCDHW"
,
"NDHWC"
]:
...
...
python/paddle/nn/functional/distance.py
浏览文件 @
e75c651d
...
...
@@ -63,7 +63,9 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
distance = paddle.nn.functional.pairwise_distance(x, y)
print(distance.numpy()) # [5. 5.]
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
"""
check_type
(
p
,
'porder'
,
(
float
,
int
),
'PairwiseDistance'
)
...
...
python/paddle/nn/functional/extension.py
浏览文件 @
e75c651d
...
...
@@ -215,10 +215,11 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
lengths = paddle.to_tensor([10, 9, 8])
mask = paddle.nn.functional.sequence_mask(lengths)
print(mask.numpy())
# [[1 1 1 1 1 1 1 1 1 1]
# [1 1 1 1 1 1 1 1 1 0]
# [1 1 1 1 1 1 1 1 0 0]]
print(mask)
# Tensor(shape=[3, 10], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
"""
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
e75c651d
...
...
@@ -1353,17 +1353,20 @@ def l1_loss(input, label, reduction='mean', name=None):
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
# [0.35]
print(l1_loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none')
print(l1_loss.numpy())
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
print(l1_loss)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum')
print(l1_loss.numpy())
# [1.4]
print(l1_loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
"""
if
reduction
not
in
[
'sum'
,
'mean'
,
'none'
]:
...
...
@@ -2530,9 +2533,11 @@ def cross_entropy(
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss(
input,
label)
print(dy_ret.numpy()) #[5.41993642]
input,
label)
print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python
...
...
@@ -2550,13 +2555,15 @@ def cross_entropy(
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343]
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
"""
...
...
python/paddle/nn/functional/vision.py
浏览文件 @
e75c651d
...
...
@@ -368,9 +368,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
x = paddle.randn(shape=[2,9,4,4])
out_var = F.pixel_shuffle(x, 3)
out = out_var.numpy()
print(out.shape)
# (2, 1, 12, 12)
print(out_var.shape)
# [2, 1, 12, 12]
"""
if
not
isinstance
(
upscale_factor
,
int
):
raise
TypeError
(
"upscale factor must be int type"
)
...
...
python/paddle/nn/initializer/constant.py
浏览文件 @
e75c651d
...
...
@@ -32,11 +32,13 @@ class Constant(ConstantInitializer):
data = paddle.rand([30, 10, 2], dtype='float32')
linear = nn.Linear(2,
4,
weight_attr=nn.initializer.Constant(value=2.0))
4,
weight_attr=nn.initializer.Constant(value=2.0))
res = linear(data)
print(linear.weight.numpy())
#result is [[2. 2. 2. 2.],[2. 2. 2. 2.]]
print(linear.weight)
# Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[2., 2., 2., 2.],
# [2., 2., 2., 2.]])
"""
...
...
python/paddle/nn/layer/conv.py
浏览文件 @
e75c651d
...
...
@@ -668,9 +668,8 @@ class Conv2D(_ConvNd):
conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6]
"""
def
__init__
(
...
...
@@ -841,9 +840,8 @@ class Conv2DTranspose(_ConvNd):
conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10]
"""
def
__init__
(
...
...
@@ -999,9 +997,8 @@ class Conv3D(_ConvNd):
conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6, 6]
"""
def
__init__
(
...
...
@@ -1181,9 +1178,8 @@ class Conv3DTranspose(_ConvNd):
conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10, 10]
"""
def
__init__
(
...
...
python/paddle/nn/layer/distance.py
浏览文件 @
e75c651d
...
...
@@ -56,7 +56,9 @@ class PairwiseDistance(Layer):
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance.numpy()) # [5. 5.]
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
"""
...
...
python/paddle/nn/layer/loss.py
浏览文件 @
e75c651d
...
...
@@ -102,7 +102,9 @@ class BCEWithLogitsLoss(Layer):
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
bce_logit_loss = paddle.nn.BCEWithLogitsLoss()
output = bce_logit_loss(logit, label)
print(output.numpy()) # [0.45618808]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.45618814])
"""
...
...
@@ -319,9 +321,11 @@ class CrossEntropyLoss(Layer):
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss(
input,
label)
print(dy_ret.numpy()) #[5.41993642]
input,
label)
print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python
...
...
@@ -339,13 +343,15 @@ class CrossEntropyLoss(Layer):
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343]
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
"""
...
...
@@ -635,19 +641,22 @@ class L1Loss(Layer):
l1_loss = paddle.nn.L1Loss()
output = l1_loss(input, label)
print(output.numpy())
# [0.35]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.L1Loss(reduction='sum')
output = l1_loss(input, label)
print(output.numpy())
# [1.4]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
l1_loss = paddle.nn.L1Loss(reduction='none')
output = l1_loss(input, label)
print(output)
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
"""
...
...
python/paddle/nn/layer/vision.py
浏览文件 @
e75c651d
...
...
@@ -52,10 +52,9 @@ class PixelShuffle(Layer):
x = paddle.randn(shape=[2,9,4,4])
pixel_shuffle = nn.PixelShuffle(3)
out_var = pixel_shuffle(x)
out = out_var.numpy()
out = pixel_shuffle(x)
print(out.shape)
#
(2, 1, 12, 12)
#
[2, 1, 12, 12]
"""
...
...
python/paddle/nn/quant/quant_layers.py
浏览文件 @
e75c651d
...
...
@@ -628,10 +628,8 @@ class QuantizedConv2DTranspose(Layer):
conv_quantized = QuantizedConv2DTranspose(conv)
y_quantized = conv_quantized(x_var)
y_var = conv(x_var)
y_quantized_np = y_quantized.numpy()
y_np = y_var.numpy()
print(y_np.shape, y_quantized_np.shape)
# (2, 6, 10, 10), (2, 6, 10, 10)
print(y_var.shape, y_quantized.shape)
# [2, 6, 10, 10], [2, 6, 10, 10]
"""
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
e75c651d
...
...
@@ -2453,7 +2453,10 @@ def unique(
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
print(unique)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 5])
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
print(indices)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
...
...
python/paddle/text/datasets/conll05.py
浏览文件 @
e75c651d
...
...
@@ -88,7 +88,7 @@ class Conll05st(Dataset):
model = SimpleNet()
pred_idx, mark, label= model(pred_idx, mark, label)
print(pred_idx
.numpy(), mark.numpy(), label.numpy()
)
print(pred_idx
, mark, label
)
"""
...
...
python/paddle/text/datasets/imdb.py
浏览文件 @
e75c651d
...
...
@@ -67,7 +67,7 @@ class Imdb(Dataset):
model = SimpleNet()
image, label = model(doc, label)
print(doc.
numpy().shape, label.numpy()
.shape)
print(doc.
shape, label
.shape)
"""
...
...
python/paddle/text/datasets/imikolov.py
浏览文件 @
e75c651d
...
...
@@ -67,7 +67,7 @@ class Imikolov(Dataset):
model = SimpleNet()
src, trg = model(src, trg)
print(src.
numpy().shape, trg.numpy()
.shape)
print(src.
shape, trg
.shape)
"""
...
...
python/paddle/text/datasets/movielens.py
浏览文件 @
e75c651d
...
...
@@ -134,7 +134,7 @@ class Movielens(Dataset):
model = SimpleNet()
category, title, rating = model(category, title, rating)
print(category.
numpy().shape, title.numpy().shape, rating.numpy()
.shape)
print(category.
shape, title.shape, rating
.shape)
"""
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录