From dc12605d59f7ea04733f397185c742b3c8f092bd Mon Sep 17 00:00:00 2001 From: Chen Long <1300851984@qq.com> Date: Tue, 28 Jun 2022 19:05:28 +0800 Subject: [PATCH] [cherry-pick] Fix code examples (#43904) * Update api docs (#42725) * Fix max_pool3d doc, test=document_fix (#42715) * fix pooling doc * fix typo test=document_fix * fix doc typo, test=document_fix * fix adaptive_avg_pool1d doc bug (#42721) * fix adaptive_avg_pool1d doc bug * fix adaptive_avg_pool1d doc bug * fix spectral_norm en doc (#42728) * Fix example code bugs (#42739) * update readme test=document_fix * fix api docs bugs test=document_fix * fix code example bugs;test=document_fix Co-authored-by: Linjie Chen <40840292+linjieccc@users.noreply.github.com> Co-authored-by: Wei Shengyu Co-authored-by: Walter Co-authored-by: wangna11BD <79366697+wangna11BD@users.noreply.github.com> --- python/paddle/nn/functional/loss.py | 22 ++++++++++-------- python/paddle/nn/functional/pooling.py | 19 +++++++--------- python/paddle/nn/layer/loss.py | 14 ++++++++---- python/paddle/nn/utils/spectral_norm_hook.py | 2 +- python/paddle/nn/utils/weight_norm_hook.py | 24 ++++++++++++-------- python/paddle/vision/ops.py | 7 +++--- 6 files changed, 49 insertions(+), 39 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 593cea2d2cf..94cd8806444 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -389,20 +389,24 @@ def hsigmoid_loss(input, paddle.set_device('cpu') - input = paddle.uniform([2, 3]) - # [[-0.8018668 0.8736385 -0.9064771 ] # random - # [-0.10228515 -0.87188244 -0.8783718 ]] # random + input = paddle.uniform([4, 3]) + # [[0.45424712 -0.77296764 0.82943869] # random + # [0.85062802 0.63303483 0.35312140] # random + # [0.57170701 0.16627562 0.21588242] # random + # [0.27610803 -0.99303514 -0.17114788]] # random label = paddle.to_tensor([0, 1, 4, 5]) num_classes = 5 weight=paddle.uniform([num_classes-1, 3]) - # [[-0.24148715 0.8449961 -0.7399121 ] # random - # [-0.9800559 0.43509364 0.9091208 ] # random - # [ 0.60194826 0.10430074 -0.4521166 ] # random - # [-0.4469818 -0.01536179 -0.604454 ]] # random + # [[-0.64477652 0.24821866 -0.17456549] # random + # [-0.04635394 0.07473493 -0.25081766] # random + # [ 0.05986035 -0.12185556 0.45153677] # random + # [-0.66236806 0.91271877 -0.88088769]] # random out=F.hsigmoid_loss(input, label, num_classes, weight) - # [[3.0159328] - # [2.2407534]] + # [[1.96709502] + # [2.40019274] + # [2.11009121] + # [1.92374969]] """ if in_dynamic_mode(): diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index b9cae478472..2e2524a03e5 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -1160,22 +1160,21 @@ def max_pool3d(x, import paddle import paddle.nn.functional as F - import numpy as np # max pool3d - x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) - output = F.max_pool2d(x, + x = paddle.uniform([1, 3, 32, 32, 32]) + output = F.max_pool3d(x, kernel_size=2, stride=2, padding=0) - output.shape [1, 3, 16, 16, 16] + # output.shape [1, 3, 16, 16, 16] # for return_mask=True - x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) + x = paddle.uniform([1, 3, 32, 32, 32]) output, max_indices = paddle.nn.functional.max_pool3d(x, kernel_size = 2, stride = 2, padding=0, return_mask=True) - # output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16], + # output.shape [1, 3, 16, 16, 16], max_indices.shape [1, 3, 16, 16, 16] """ kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') if stride is None: @@ -1267,10 +1266,9 @@ def adaptive_avg_pool1d(x, output_size, name=None): Returns: Tensor: The output tensor of adaptive average pooling result. The data type is same as input tensor. - Raises: - ValueError: 'output_size' should be an integer. Examples: .. code-block:: python + :name: code-example1 # average adaptive pool1d # suppose input data in shape of [N, C, L], `output_size` is m or [m], @@ -1286,10 +1284,9 @@ def adaptive_avg_pool1d(x, output_size, name=None): # import paddle import paddle.nn.functional as F - import numpy as np - data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) - pool_out = F.adaptive_average_pool1d(data, output_size=16) + data = paddle.uniform([1, 3, 32]) + pool_out = F.adaptive_avg_pool1d(data, output_size=16) # pool_out shape: [1, 3, 16]) """ pool_type = 'avg' diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 7e40c029a02..d91472b3d5f 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -465,14 +465,18 @@ class HSigmoidLoss(Layer): import paddle paddle.set_device('cpu') - input = paddle.uniform([2, 3]) - # [[-0.2820413 0.9528898 -0.81638825] # random - # [-0.6733154 -0.33866507 0.25770962]] # random + input = paddle.uniform([4, 3]) + # [[0.56194401 -0.22450298 -0.10741806] # random + # [0.36136317 0.23556745 0.88748658] # random + # [0.18151939 0.80947340 -0.31078976] # random + # [0.68886101 -0.14239830 -0.41297770]] # random label = paddle.to_tensor([0, 1, 4, 5]) m = paddle.nn.HSigmoidLoss(3, 5) out = m(input, label) - # [[2.4543471] - # [1.9359267]] + # [[2.42524505] + # [1.74917245] + # [3.14571381] + # [2.34564662]] """ def __init__(self, diff --git a/python/paddle/nn/utils/spectral_norm_hook.py b/python/paddle/nn/utils/spectral_norm_hook.py index 75266abdf0d..56c9e83c38b 100644 --- a/python/paddle/nn/utils/spectral_norm_hook.py +++ b/python/paddle/nn/utils/spectral_norm_hook.py @@ -178,7 +178,7 @@ def spectral_norm(layer, .. code-block:: python from paddle.nn import Conv2D - from paddle.nn.utils import Spectralnorm + from paddle.nn.utils import spectral_norm conv = Conv2D(3, 1, 3) sn_conv = spectral_norm(conv) diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index c131d218a1c..84644ccc484 100755 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -213,15 +213,21 @@ def remove_weight_norm(layer, name='weight'): Examples: .. code-block:: python - import paddle - from paddle.nn import Conv2D - from paddle.nn.utils import weight_norm, remove_weight_norm - - conv = Conv2D(3, 5, 3) - wn = weight_norm(conv) - remove_weight_norm(conv) - print(conv.weight_g) - # AttributeError: 'Conv2D' object has no attribute 'weight_g' + import paddle + from paddle.nn import Conv2D + from paddle.nn.utils import weight_norm, remove_weight_norm + + conv = Conv2D(3, 5, 3) + wn = weight_norm(conv) + print(conv.weight_g) + # Parameter containing: + # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=False, + # [0., 0., 0., 0., 0.]) + # Conv2D(3, 5, kernel_size=[3, 3], data_format=NCHW) + + remove_weight_norm(conv) + # print(conv.weight_g) + # AttributeError: 'Conv2D' object has no attribute 'weight_g' """ for k, hook in layer._forward_pre_hooks.items(): if isinstance(hook, WeightNorm) and hook.name == name: diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index d45c652885b..352ec13dc00 100644 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -851,15 +851,14 @@ def read_file(filename, name=None): import cv2 import paddle - fake_img = (np.random.random( - (400, 300, 3)) * 255).astype('uint8') - + fake_img = (paddle.rand((400, 300, 3)).numpy() * 255).astype('uint8') + cv2.imwrite('fake.jpg', fake_img) img_bytes = paddle.vision.ops.read_file('fake.jpg') print(img_bytes.shape) - + # [142915] """ if _non_static_mode(): -- GitLab