未验证 提交 dc12605d 编写于 作者: C Chen Long 提交者: GitHub

[cherry-pick] Fix code examples (#43904)

* Update api docs (#42725)

* Fix max_pool3d doc, test=document_fix (#42715)

* fix pooling doc

* fix typo test=document_fix

* fix doc typo, test=document_fix

* fix adaptive_avg_pool1d doc bug (#42721)

* fix adaptive_avg_pool1d doc bug

* fix adaptive_avg_pool1d doc bug

* fix spectral_norm en doc (#42728)

* Fix example code bugs (#42739)

* update readme test=document_fix

* fix api docs bugs test=document_fix

* fix code example bugs;test=document_fix
Co-authored-by: NLinjie Chen <40840292+linjieccc@users.noreply.github.com>
Co-authored-by: NWei Shengyu <weisy11@163.com>
Co-authored-by: NWalter <dongshl1226@hotmail.com>
Co-authored-by: Nwangna11BD <79366697+wangna11BD@users.noreply.github.com>
上级 63458e5b
...@@ -389,20 +389,24 @@ def hsigmoid_loss(input, ...@@ -389,20 +389,24 @@ def hsigmoid_loss(input,
paddle.set_device('cpu') paddle.set_device('cpu')
input = paddle.uniform([2, 3]) input = paddle.uniform([4, 3])
# [[-0.8018668 0.8736385 -0.9064771 ] # random # [[0.45424712 -0.77296764 0.82943869] # random
# [-0.10228515 -0.87188244 -0.8783718 ]] # random # [0.85062802 0.63303483 0.35312140] # random
# [0.57170701 0.16627562 0.21588242] # random
# [0.27610803 -0.99303514 -0.17114788]] # random
label = paddle.to_tensor([0, 1, 4, 5]) label = paddle.to_tensor([0, 1, 4, 5])
num_classes = 5 num_classes = 5
weight=paddle.uniform([num_classes-1, 3]) weight=paddle.uniform([num_classes-1, 3])
# [[-0.24148715 0.8449961 -0.7399121 ] # random # [[-0.64477652 0.24821866 -0.17456549] # random
# [-0.9800559 0.43509364 0.9091208 ] # random # [-0.04635394 0.07473493 -0.25081766] # random
# [ 0.60194826 0.10430074 -0.4521166 ] # random # [ 0.05986035 -0.12185556 0.45153677] # random
# [-0.4469818 -0.01536179 -0.604454 ]] # random # [-0.66236806 0.91271877 -0.88088769]] # random
out=F.hsigmoid_loss(input, label, num_classes, weight) out=F.hsigmoid_loss(input, label, num_classes, weight)
# [[3.0159328] # [[1.96709502]
# [2.2407534]] # [2.40019274]
# [2.11009121]
# [1.92374969]]
""" """
if in_dynamic_mode(): if in_dynamic_mode():
......
...@@ -1160,22 +1160,21 @@ def max_pool3d(x, ...@@ -1160,22 +1160,21 @@ def max_pool3d(x,
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
# max pool3d # max pool3d
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) x = paddle.uniform([1, 3, 32, 32, 32])
output = F.max_pool2d(x, output = F.max_pool3d(x,
kernel_size=2, kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output.shape [1, 3, 16, 16, 16] # output.shape [1, 3, 16, 16, 16]
# for return_mask=True # for return_mask=True
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) x = paddle.uniform([1, 3, 32, 32, 32])
output, max_indices = paddle.nn.functional.max_pool3d(x, output, max_indices = paddle.nn.functional.max_pool3d(x,
kernel_size = 2, kernel_size = 2,
stride = 2, stride = 2,
padding=0, padding=0,
return_mask=True) return_mask=True)
# output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16], # output.shape [1, 3, 16, 16, 16], max_indices.shape [1, 3, 16, 16, 16]
""" """
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
if stride is None: if stride is None:
...@@ -1267,10 +1266,9 @@ def adaptive_avg_pool1d(x, output_size, name=None): ...@@ -1267,10 +1266,9 @@ def adaptive_avg_pool1d(x, output_size, name=None):
Returns: Returns:
Tensor: The output tensor of adaptive average pooling result. The data type is same Tensor: The output tensor of adaptive average pooling result. The data type is same
as input tensor. as input tensor.
Raises:
ValueError: 'output_size' should be an integer.
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1
# average adaptive pool1d # average adaptive pool1d
# suppose input data in shape of [N, C, L], `output_size` is m or [m], # suppose input data in shape of [N, C, L], `output_size` is m or [m],
...@@ -1286,10 +1284,9 @@ def adaptive_avg_pool1d(x, output_size, name=None): ...@@ -1286,10 +1284,9 @@ def adaptive_avg_pool1d(x, output_size, name=None):
# #
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.uniform([1, 3, 32])
pool_out = F.adaptive_average_pool1d(data, output_size=16) pool_out = F.adaptive_avg_pool1d(data, output_size=16)
# pool_out shape: [1, 3, 16]) # pool_out shape: [1, 3, 16])
""" """
pool_type = 'avg' pool_type = 'avg'
......
...@@ -465,14 +465,18 @@ class HSigmoidLoss(Layer): ...@@ -465,14 +465,18 @@ class HSigmoidLoss(Layer):
import paddle import paddle
paddle.set_device('cpu') paddle.set_device('cpu')
input = paddle.uniform([2, 3]) input = paddle.uniform([4, 3])
# [[-0.2820413 0.9528898 -0.81638825] # random # [[0.56194401 -0.22450298 -0.10741806] # random
# [-0.6733154 -0.33866507 0.25770962]] # random # [0.36136317 0.23556745 0.88748658] # random
# [0.18151939 0.80947340 -0.31078976] # random
# [0.68886101 -0.14239830 -0.41297770]] # random
label = paddle.to_tensor([0, 1, 4, 5]) label = paddle.to_tensor([0, 1, 4, 5])
m = paddle.nn.HSigmoidLoss(3, 5) m = paddle.nn.HSigmoidLoss(3, 5)
out = m(input, label) out = m(input, label)
# [[2.4543471] # [[2.42524505]
# [1.9359267]] # [1.74917245]
# [3.14571381]
# [2.34564662]]
""" """
def __init__(self, def __init__(self,
......
...@@ -178,7 +178,7 @@ def spectral_norm(layer, ...@@ -178,7 +178,7 @@ def spectral_norm(layer,
.. code-block:: python .. code-block:: python
from paddle.nn import Conv2D from paddle.nn import Conv2D
from paddle.nn.utils import Spectralnorm from paddle.nn.utils import spectral_norm
conv = Conv2D(3, 1, 3) conv = Conv2D(3, 1, 3)
sn_conv = spectral_norm(conv) sn_conv = spectral_norm(conv)
......
...@@ -219,8 +219,14 @@ def remove_weight_norm(layer, name='weight'): ...@@ -219,8 +219,14 @@ def remove_weight_norm(layer, name='weight'):
conv = Conv2D(3, 5, 3) conv = Conv2D(3, 5, 3)
wn = weight_norm(conv) wn = weight_norm(conv)
remove_weight_norm(conv)
print(conv.weight_g) print(conv.weight_g)
# Parameter containing:
# Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [0., 0., 0., 0., 0.])
# Conv2D(3, 5, kernel_size=[3, 3], data_format=NCHW)
remove_weight_norm(conv)
# print(conv.weight_g)
# AttributeError: 'Conv2D' object has no attribute 'weight_g' # AttributeError: 'Conv2D' object has no attribute 'weight_g'
""" """
for k, hook in layer._forward_pre_hooks.items(): for k, hook in layer._forward_pre_hooks.items():
......
...@@ -851,15 +851,14 @@ def read_file(filename, name=None): ...@@ -851,15 +851,14 @@ def read_file(filename, name=None):
import cv2 import cv2
import paddle import paddle
fake_img = (np.random.random( fake_img = (paddle.rand((400, 300, 3)).numpy() * 255).astype('uint8')
(400, 300, 3)) * 255).astype('uint8')
cv2.imwrite('fake.jpg', fake_img) cv2.imwrite('fake.jpg', fake_img)
img_bytes = paddle.vision.ops.read_file('fake.jpg') img_bytes = paddle.vision.ops.read_file('fake.jpg')
print(img_bytes.shape) print(img_bytes.shape)
# [142915]
""" """
if _non_static_mode(): if _non_static_mode():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册