From e7caf3b8d9fee95b8814ca51f8513d719b0d1831 Mon Sep 17 00:00:00 2001 From: ceci3 Date: Thu, 26 Nov 2020 14:10:01 +0800 Subject: [PATCH] fix examples, test=document_fix (#29019) * fix examples, test=document_fix * fix, test=document_fix --- python/paddle/fluid/layers/nn.py | 11 +++++------ python/paddle/nn/layer/activation.py | 3 --- python/paddle/nn/layer/norm.py | 7 +++---- python/paddle/nn/utils/weight_norm_hook.py | 2 -- 4 files changed, 8 insertions(+), 15 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9bbec75ba0c..97dea27f3b7 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3126,7 +3126,7 @@ def instance_norm(input, `H` means height of feature map, `W` means width of feature map. Args: - input(variable): The rank of input variable can be 2, 3, 4, 5. + input(Tensor): The rank of input tensor can be 2, 3, 4, 5. The data type is float32 or float64. epsilon(float, Default 1e-05): A value added to the denominator for numerical stability. Default is 1e-5. @@ -3146,19 +3146,18 @@ def instance_norm(input, will be named automatically. Returns: - A Variable holding Tensor which is the result after applying instance normalization on the input, + A Tensor which is the result after applying instance normalization on the input, has same shape and data type with input. Examples: .. code-block:: python - import paddle.fluid as fluid import paddle paddle.enable_static() - x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') - hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') - hidden2 = fluid.layers.instance_norm(input=hidden1) + x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32') + hidden1 = paddle.static.nn.fc(x, size=200) + hidden2 = paddle.static.nn.instance_norm(hidden1) """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'instance_norm') diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index b002b534625..edab5660517 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -515,9 +515,6 @@ class LeakyReLU(layers.Layer): .. code-block:: python import paddle - import numpy as np - - paddle.disable_static() m = paddle.nn.LeakyReLU() x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 7bff2d64a65..181cc4de4b2 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -996,12 +996,12 @@ class SyncBatchNorm(_BatchNormBase): import numpy as np x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') - paddle.disable_static() x = paddle.to_tensor(x) - if paddle.fluid.is_compiled_with_cuda(): + + if paddle.is_compiled_with_cuda(): sync_batch_norm = nn.SyncBatchNorm(2) hidden1 = sync_batch_norm(x) - print(hidden1.numpy()) + print(hidden1) # [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]] """ @@ -1096,7 +1096,6 @@ class SyncBatchNorm(_BatchNormBase): import paddle import paddle.nn as nn - paddle.disable_static() model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5)) sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model) diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index 89a7a53b0aa..59a69337f2e 100755 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -188,7 +188,6 @@ def weight_norm(layer, name='weight', dim=0): from paddle.nn.utils import weight_norm x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') - paddle.disable_static() conv = Conv2D(3, 5, 3) wn = weight_norm(conv) print(conv.weight_g.shape) @@ -217,7 +216,6 @@ def remove_weight_norm(layer, name='weight'): from paddle.nn import Conv2D from paddle.nn.utils import weight_norm, remove_weight_norm - paddle.disable_static() conv = Conv2D(3, 5, 3) wn = weight_norm(conv) remove_weight_norm(conv) -- GitLab