diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 94ab2e63faeecce29b9eff6dce0c97439ab9b3d3..efde54182e5a0b2de0a13ccddbd33bff1b3f8f78 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -150,7 +150,6 @@ def batch_norm(x, import paddle import numpy as np - paddle.disable_static() x = np.random.seed(123) x = np.random.random(size=(2, 1, 2, 3)).astype('float32') running_mean = np.random.random(size=1).astype('float32') @@ -163,7 +162,7 @@ def batch_norm(x, w = paddle.to_tensor(weight_data) b = paddle.to_tensor(bias_data) batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b) - print(batch_norm_out.numpy()) + print(batch_norm_out) """ assert len(x.shape) >= 2, "input dim must be larger than 1" @@ -269,14 +268,13 @@ def layer_norm(x, import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) layer_norm = paddle.nn.functional.layer_norm(x, x.shape[1:]) layer_norm_out = layer_norm(x) - print(layer_norm_out.numpy()) + print(layer_norm_out) """ input_shape = list(x.shape) input_ndim = len(input_shape) @@ -362,13 +360,12 @@ def instance_norm(x, import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) instance_norm_out = paddle.nn.functional.instancenorm(x) - print(instance_norm_out.numpy()) + print(instance_norm_out) """ diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 181cc4de4b2707f1bb85109ca3ebe16e906eb284..b1f6906386cc6b09c8ba2344b821296ef94c7505 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -163,14 +163,13 @@ class InstanceNorm1D(_InstanceNormBase): import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) instance_norm = paddle.nn.InstanceNorm1D(2) instance_norm_out = instance_norm(x) - print(instance_norm_out.numpy()) + print(instance_norm_out) """ @@ -235,14 +234,13 @@ class InstanceNorm2D(_InstanceNormBase): import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) instance_norm = paddle.nn.InstanceNorm2D(2) instance_norm_out = instance_norm(x) - print(instance_norm_out.numpy()) + print(instance_norm_out) """ def _check_input_dim(self, input): @@ -306,14 +304,13 @@ class InstanceNorm3D(_InstanceNormBase): import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) instance_norm = paddle.nn.InstanceNorm3D(2) instance_norm_out = instance_norm(x) - print(instance_norm_out.numpy()) + print(instance_norm_out.numpy) """ def _check_input_dim(self, input): @@ -352,6 +349,7 @@ class GroupNorm(layers.Layer): Examples: .. code-block:: python + import paddle import numpy as np @@ -492,14 +490,13 @@ class LayerNorm(layers.Layer): import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) layer_norm = paddle.nn.LayerNorm(x_data.shape[1:]) layer_norm_out = layer_norm(x) - print(layer_norm_out.numpy()) + print(layer_norm_out) """ def __init__(self, @@ -714,14 +711,13 @@ class BatchNorm1D(_BatchNormBase): import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 1, 3)).astype('float32') x = paddle.to_tensor(x_data) batch_norm = paddle.nn.BatchNorm1D(1) batch_norm_out = batch_norm(x) - print(batch_norm_out.numpy()) + print(batch_norm_out) """ def _check_data_format(self, input): @@ -804,14 +800,13 @@ class BatchNorm2D(_BatchNormBase): import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) batch_norm = paddle.nn.BatchNorm2D(1) batch_norm_out = batch_norm(x) - print(batch_norm_out.numpy()) + print(batch_norm_out) """ def _check_data_format(self, input): @@ -893,14 +888,13 @@ class BatchNorm3D(_BatchNormBase): import paddle import numpy as np - paddle.disable_static() np.random.seed(123) x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) batch_norm = paddle.nn.BatchNorm3D(1) batch_norm_out = batch_norm(x) - print(batch_norm_out.numpy()) + print(batch_norm_out) """ def _check_data_format(self, input): diff --git a/python/paddle/optimizer/adagrad.py b/python/paddle/optimizer/adagrad.py index 72a3f8ce99606f500fb4985688a026e54924948d..ec14828e693ee67c03a748622bc1034f1303d80f 100644 --- a/python/paddle/optimizer/adagrad.py +++ b/python/paddle/optimizer/adagrad.py @@ -50,8 +50,8 @@ class Adagrad(Optimizer): The default value is None in static mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + :ref:`api_paddle_regularizer_L1Decay`, :ref:`api_paddle_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_paddle_fluid_param_attr_aramAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. @@ -71,7 +71,6 @@ class Adagrad(Optimizer): import paddle import numpy as np - paddle.disable_static() inp = paddle.rand(shape=[10, 10]) linear = paddle.nn.Linear(10, 10) out = linear(inp)