未验证 提交 8388abe6 编写于 作者: Z zhang wenhui 提交者: GitHub

Fix api 1128 (#29174)

* fix 2.0 api, test=develop

* fix api, test=develop
上级 f92fdfb8
......@@ -150,7 +150,6 @@ def batch_norm(x,
import paddle
import numpy as np
paddle.disable_static()
x = np.random.seed(123)
x = np.random.random(size=(2, 1, 2, 3)).astype('float32')
running_mean = np.random.random(size=1).astype('float32')
......@@ -163,7 +162,7 @@ def batch_norm(x,
w = paddle.to_tensor(weight_data)
b = paddle.to_tensor(bias_data)
batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b)
print(batch_norm_out.numpy())
print(batch_norm_out)
"""
assert len(x.shape) >= 2, "input dim must be larger than 1"
......@@ -269,14 +268,13 @@ def layer_norm(x,
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
layer_norm = paddle.nn.functional.layer_norm(x, x.shape[1:])
layer_norm_out = layer_norm(x)
print(layer_norm_out.numpy())
print(layer_norm_out)
"""
input_shape = list(x.shape)
input_ndim = len(input_shape)
......@@ -362,13 +360,12 @@ def instance_norm(x,
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm_out = paddle.nn.functional.instancenorm(x)
print(instance_norm_out.numpy())
print(instance_norm_out)
"""
......
......@@ -163,14 +163,13 @@ class InstanceNorm1D(_InstanceNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm1D(2)
instance_norm_out = instance_norm(x)
print(instance_norm_out.numpy())
print(instance_norm_out)
"""
......@@ -235,14 +234,13 @@ class InstanceNorm2D(_InstanceNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm2D(2)
instance_norm_out = instance_norm(x)
print(instance_norm_out.numpy())
print(instance_norm_out)
"""
def _check_input_dim(self, input):
......@@ -306,14 +304,13 @@ class InstanceNorm3D(_InstanceNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm3D(2)
instance_norm_out = instance_norm(x)
print(instance_norm_out.numpy())
print(instance_norm_out.numpy)
"""
def _check_input_dim(self, input):
......@@ -352,6 +349,7 @@ class GroupNorm(layers.Layer):
Examples:
.. code-block:: python
import paddle
import numpy as np
......@@ -492,14 +490,13 @@ class LayerNorm(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
layer_norm = paddle.nn.LayerNorm(x_data.shape[1:])
layer_norm_out = layer_norm(x)
print(layer_norm_out.numpy())
print(layer_norm_out)
"""
def __init__(self,
......@@ -714,14 +711,13 @@ class BatchNorm1D(_BatchNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 1, 3)).astype('float32')
x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm1D(1)
batch_norm_out = batch_norm(x)
print(batch_norm_out.numpy())
print(batch_norm_out)
"""
def _check_data_format(self, input):
......@@ -804,14 +800,13 @@ class BatchNorm2D(_BatchNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm2D(1)
batch_norm_out = batch_norm(x)
print(batch_norm_out.numpy())
print(batch_norm_out)
"""
def _check_data_format(self, input):
......@@ -893,14 +888,13 @@ class BatchNorm3D(_BatchNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm3D(1)
batch_norm_out = batch_norm(x)
print(batch_norm_out.numpy())
print(batch_norm_out)
"""
def _check_data_format(self, input):
......
......@@ -50,8 +50,8 @@ class Adagrad(Optimizer):
The default value is None in static mode, at this time all parameters will be updated.
weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
It canbe a float value as coeff of L2 regularization or \
:ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
:ref:`api_paddle_regularizer_L1Decay`, :ref:`api_paddle_regularizer_L2Decay`.
If a parameter has set regularizer using :ref:`api_paddle_fluid_param_attr_aramAttr` already, \
the regularization setting here in optimizer will be ignored for this parameter. \
Otherwise, the regularization setting here in optimizer will take effect. \
Default None, meaning there is no regularization.
......@@ -71,7 +71,6 @@ class Adagrad(Optimizer):
import paddle
import numpy as np
paddle.disable_static()
inp = paddle.rand(shape=[10, 10])
linear = paddle.nn.Linear(10, 10)
out = linear(inp)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册