未验证 提交 e7caf3b8 编写于 作者: C ceci3 提交者: GitHub

fix examples, test=document_fix (#29019)

* fix examples, test=document_fix

* fix, test=document_fix
上级 db412585
......@@ -3126,7 +3126,7 @@ def instance_norm(input,
`H` means height of feature map, `W` means width of feature map.
Args:
input(variable): The rank of input variable can be 2, 3, 4, 5.
input(Tensor): The rank of input tensor can be 2, 3, 4, 5.
The data type is float32 or float64.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
......@@ -3146,19 +3146,18 @@ def instance_norm(input,
will be named automatically.
Returns:
A Variable holding Tensor which is the result after applying instance normalization on the input,
A Tensor which is the result after applying instance normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1)
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x, size=200)
hidden2 = paddle.static.nn.instance_norm(hidden1)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'instance_norm')
......
......@@ -515,9 +515,6 @@ class LeakyReLU(layers.Layer):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
m = paddle.nn.LeakyReLU()
x = paddle.to_tensor(np.array([-2, 0, 1], 'float32'))
......
......@@ -996,12 +996,12 @@ class SyncBatchNorm(_BatchNormBase):
import numpy as np
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
paddle.disable_static()
x = paddle.to_tensor(x)
if paddle.fluid.is_compiled_with_cuda():
if paddle.is_compiled_with_cuda():
sync_batch_norm = nn.SyncBatchNorm(2)
hidden1 = sync_batch_norm(x)
print(hidden1.numpy())
print(hidden1)
# [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]]
"""
......@@ -1096,7 +1096,6 @@ class SyncBatchNorm(_BatchNormBase):
import paddle
import paddle.nn as nn
paddle.disable_static()
model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5))
sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
......
......@@ -188,7 +188,6 @@ def weight_norm(layer, name='weight', dim=0):
from paddle.nn.utils import weight_norm
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
paddle.disable_static()
conv = Conv2D(3, 5, 3)
wn = weight_norm(conv)
print(conv.weight_g.shape)
......@@ -217,7 +216,6 @@ def remove_weight_norm(layer, name='weight'):
from paddle.nn import Conv2D
from paddle.nn.utils import weight_norm, remove_weight_norm
paddle.disable_static()
conv = Conv2D(3, 5, 3)
wn = weight_norm(conv)
remove_weight_norm(conv)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册