norm.py 71.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19 20 21 22 23 24 25 26 27
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

28
# TODO: define normalization api
29

30 31
import numbers
import warnings
C
ceci3 已提交
32

33
import numpy as np
34

35 36
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.device import get_all_custom_device_type
37
from paddle.fluid.framework import in_dygraph_mode
38

39
from ...fluid import dygraph_utils
40
from ...fluid.data_feeder import check_variable_and_dtype
41
from ...framework import ParamAttr, _global_flags, get_default_dtype, no_grad
42 43
from .. import functional as F
from ..functional import batch_norm, instance_norm, layer_norm
44
from ..initializer import Constant, Normal
45
from .layers import Layer
46

47 48
__all__ = []

C
ceci3 已提交
49

Z
zhiboniu 已提交
50
class _InstanceNormBase(Layer):
51
    """
52
    This class is based class for InstanceNorm1D, 2d, 3d.
53

C
cnn 已提交
54
    See InstaceNorm1D, InstanceNorm2D or InstanceNorm3D for more details.
55 56
    """

57 58 59 60 61 62 63 64 65 66
    def __init__(
        self,
        num_features,
        epsilon=1e-5,
        momentum=0.9,
        weight_attr=None,
        bias_attr=None,
        data_format="NCHW",
        name=None,
    ):
67
        super().__init__()
68

69
        if weight_attr is False or bias_attr is False:
70 71
            assert (
                weight_attr == bias_attr
72
            ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm"
73 74 75
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
76
        self._num_features = num_features
77

78
        if weight_attr is not False and bias_attr is not False:
79 80 81 82
            self.scale = self.create_parameter(
                attr=self._weight_attr,
                shape=[num_features],
                default_initializer=Constant(1.0),
83 84 85 86 87 88 89 90
                is_bias=False,
            )
            self.bias = self.create_parameter(
                attr=self._bias_attr,
                shape=[num_features],
                default_initializer=Constant(0.0),
                is_bias=True,
            )
91 92 93 94 95 96 97 98 99 100
        else:
            self.scale = None
            self.bias = None

    def _check_input_dim(self, input):
        raise NotImplementedError("InstanceNorm Base error")

    def forward(self, input):
        self._check_input_dim(input)

101 102 103
        return instance_norm(
            input, weight=self.scale, bias=self.bias, eps=self._epsilon
        )
104

105
    def extra_repr(self):
106 107 108
        return 'num_features={}, epsilon={}'.format(
            self._num_features, self._epsilon
        )
109

110

C
cnn 已提交
111
class InstanceNorm1D(_InstanceNormBase):
112
    r"""
113
    Create a callable object of `InstanceNorm1D`. Applies Instance Normalization over a 3D input (a mini-batch of 1D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
114 115 116 117 118 119

    DataLayout: NCL `[batch, in_channels, length]`

    :math:`input` is the input features over a mini-batch.

    ..  math::
120

121 122 123 124 125 126 127
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
128

129
    Where `H` means height of feature map, `W` means width of feature map.
130 131 132 133 134 135

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
136 137
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of instance_norm.
            If it is set to None or one attribute of ParamAttr, instance_norm
138 139
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
140
            one. If it is set to False, will not create weight_attr. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
141
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
142 143 144
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
145
            If it is set to False, will not create bias_attr. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
146
        data_format(str, optional): Specify the input data format, may be "NC", "NCL". Default "NCL".
147
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name` .
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163


    Shape:
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length).
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

          import paddle

164
          x = paddle.rand((2, 2, 3))
C
cnn 已提交
165
          instance_norm = paddle.nn.InstanceNorm1D(2)
166 167
          instance_norm_out = instance_norm(x)

Z
zhang wenhui 已提交
168
          print(instance_norm_out)
169 170 171

    """

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
    def __init__(
        self,
        num_features,
        epsilon=0.00001,
        momentum=0.9,
        weight_attr=None,
        bias_attr=None,
        data_format="NCL",
        name=None,
    ):
        super().__init__(
            num_features,
            epsilon,
            momentum,
            weight_attr,
            bias_attr,
            data_format,
            name,
        )

192 193
    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
194 195 196 197 198
            raise ValueError(
                'expected 2D or 3D input (got {}D input)'.format(
                    len(input.shape)
                )
            )
199 200


C
cnn 已提交
201
class InstanceNorm2D(_InstanceNormBase):
202
    r"""
203
    Create a callable object of `InstanceNorm2D`. Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
204 205 206 207 208 209 210

    DataLayout: NCHW `[batch, in_channels, in_height, in_width]`


    :math:`input` is the input features over a mini-batch.

    ..  math::
211

212 213 214 215 216 217 218
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
219

220
    Where `H` means height of feature map, `W` means width of feature map.
221 222 223 224 225 226 227

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
228 229 230
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
231
            one. If it is set to False, will not create weight_attr. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
232
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
233 234 235
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
236
            If it is set to False, will not create bias_attr. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
237
        data_format(str, optional): Specify the input data format, could be "NCHW". Default: NCHW.
238
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name` .
239 240 241 242 243 244 245 246 247 248 249 250 251

    Shape:
        - x: 4-D tensor with shape: (batch, num_features, height, weight).
        - output: 4-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

252
            import paddle
253

254 255 256
            x = paddle.rand((2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm2D(2)
            instance_norm_out = instance_norm(x)
257

258
            print(instance_norm_out)
259 260
    """

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
    def __init__(
        self,
        num_features,
        epsilon=0.00001,
        momentum=0.9,
        weight_attr=None,
        bias_attr=None,
        data_format="NCHW",
        name=None,
    ):
        super().__init__(
            num_features,
            epsilon,
            momentum,
            weight_attr,
            bias_attr,
            data_format,
            name,
        )

281 282
    def _check_input_dim(self, input):
        if len(input.shape) != 4:
283
            raise ValueError(
284
                f'expected 4D input (got {len(input.shape)}D input)'
285
            )
286 287


C
cnn 已提交
288
class InstanceNorm3D(_InstanceNormBase):
289
    r"""
290
    Create a callable object of `InstanceNorm3D`. Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
291

292
    DataLayout: NCDHW `[batch, in_channels, D, in_height, in_width]`
293 294 295 296 297


    :math:`input` is the input features over a mini-batch.

    ..  math::
298

299 300 301 302 303 304 305
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
306

307
    Where `H` means height of feature map, `W` means width of feature map.
308 309 310 311 312 313 314

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
315 316 317
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
318
            one. If it is set to False, will not create weight_attr. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
319
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
320 321 322
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
323
            If it is set to False, will not create bias_attr. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
324
        data_format(str, optional): Specify the input data format, could be "NCDHW". Default: NCDHW.
325
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name` .
326 327 328 329 330 331 332 333 334 335 336 337 338

    Shape:
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight).
        - output: 5-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

339
            import paddle
340

341 342 343
            x = paddle.rand((2, 2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm3D(2)
            instance_norm_out = instance_norm(x)
344

345
            print(instance_norm_out.numpy)
346 347
    """

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
    def __init__(
        self,
        num_features,
        epsilon=0.00001,
        momentum=0.9,
        weight_attr=None,
        bias_attr=None,
        data_format="NCDHW",
        name=None,
    ):
        super().__init__(
            num_features,
            epsilon,
            momentum,
            weight_attr,
            bias_attr,
            data_format,
            name,
        )

368 369
    def _check_input_dim(self, input):
        if len(input.shape) != 5:
370
            raise ValueError(
371
                f'expected 5D input (got {len(input.shape)}D input)'
372
            )
373 374


Z
zhiboniu 已提交
375
class GroupNorm(Layer):
376
    """
377

378 379 380 381 382 383 384
    This interface is used to construct a callable object of the ``GroupNorm`` class.
    For more details, refer to code examples.
    It implements the function of the Group Normalization Layer.
    Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .

    Parameters:
        num_groups(int): The number of groups that divided from channels.
385
        num_channels(int): The number of channels of input.
386
        epsilon(float, optional): The small value added to the variance to prevent
387
            division by zero. Default: 1e-05.
388
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
389 390
            scale :math:`g`. If it is set to False, no scale will be added to the output units.
            If it is set to None, the bias is initialized one. Default: None.
391
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
392 393
            bias :math:`b`. If it is set to False, no bias will be added to the output units.
            If it is set to None, the bias is initialized zero. Default: None.
394 395 396 397
        data_format(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
        name(str, optional): Name for the GroupNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
398
        - x: Tensor with shape: attr:`(batch, num_features, *)`.
399
        - output: The same shape as input x.
400 401 402 403 404 405

    Returns:
        None

    Examples:
        .. code-block:: python
Z
zhang wenhui 已提交
406

407
            import paddle
408

409
            x = paddle.arange(48, dtype="float32").reshape((2, 6, 2, 2))
410 411
            group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
            group_norm_out = group_norm(x)
412

413
            print(group_norm_out)
414 415
    """

416 417 418 419 420 421 422 423 424 425
    def __init__(
        self,
        num_groups,
        num_channels,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        name=None,
    ):
426
        super().__init__()
427 428 429 430 431
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self._epsilon = epsilon
        self._num_channels = num_channels
        self._num_groups = num_groups
432
        if data_format not in ['NCHW', 'NHWC']:
433
            raise ValueError("unsupported data layout:" + data_format)
434
        self._data_format = data_format
435 436 437

        param_shape = [self._num_channels]

438
        if weight_attr is False:
439
            self.weight = self.create_parameter(
440 441
                attr=None, shape=param_shape, default_initializer=Constant(1.0)
            )
442 443 444 445 446
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
447 448
                default_initializer=Constant(1.0),
            )
449 450
            self.weight.stop_gradient = self._weight_attr is not None and (
                hasattr(self._weight_attr, "learning_rate")
451 452
                and self._weight_attr.learning_rate == 0.0
            )
453

454
        if bias_attr is False:
455 456 457 458 459 460
            self.bias = self.create_parameter(
                attr=None,
                shape=param_shape,
                default_initializer=Constant(0.0),
                is_bias=True,
            )
461 462
            self.bias.stop_gradient = True
        else:
463 464 465
            self.bias = self.create_parameter(
                attr=self._bias_attr, shape=param_shape, is_bias=True
            )
466 467
            self.bias.stop_gradient = self._bias_attr is not None and (
                hasattr(self._bias_attr, "learning_rate")
468
                and self._bias_attr.learning_rate == 0.0
469
            )
470 471

    def forward(self, input):
472
        if in_dygraph_mode():
473
            return _C_ops.group_norm(
474 475 476 477 478
                input,
                self.weight,
                self.bias,
                self._epsilon,
                self._num_groups,
479
                self._data_format,
480
            )
481

482 483 484 485 486 487
        mean_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True
        )
        variance_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True
        )
488

489 490 491 492 493 494 495 496
        inputs = {'X': input}
        if self.bias is not None:
            inputs['Bias'] = self.bias
        if self.weight is not None:
            inputs['Scale'] = self.weight

        # create output
        group_norm_out = self._helper.create_variable_for_type_inference(
497 498 499 500 501 502 503 504 505 506 507 508 509
            dtype=input.dtype
        )

        self._helper.append_op(
            type="group_norm",
            inputs=inputs,
            outputs={
                "Y": group_norm_out,
                "Mean": mean_out,
                "Variance": variance_out,
            },
            attrs={"epsilon": self._epsilon, "groups": self._num_groups},
        )
510 511 512

        return self._helper.append_activation(group_norm_out, None)

513 514
    def extra_repr(self):
        return 'num_groups={}, num_channels={}, epsilon={}'.format(
515 516
            self._num_groups, self._num_channels, self._epsilon
        )
517

518

Z
zhiboniu 已提交
519
class LayerNorm(Layer):
520
    r"""
521
    Construct a callable object of the ``LayerNorm`` class.
522 523 524 525 526 527 528 529
    For more details, refer to code examples.
    It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
    Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_

    The formula is as follows:

    ..  math::

530
        \mu & = \frac{1}{H}\sum_{i=1}^{H} x_i
531

532
        \sigma & = \sqrt{\frac{1}{H}\sum_{i=1}^{H}{(x_i - \mu)^2} + \epsilon}
533

534
        y & = f(\frac{g}{\sigma}(x - \mu) + b)
535 536 537

    - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
    - :math:`H`: the number of hidden units in a layers
538
    - :math:`\epsilon`: the small value added to the variance to prevent division by zero.
539 540 541 542 543 544 545 546 547 548 549 550
    - :math:`g`: the trainable scale parameter.
    - :math:`b`: the trainable bias parameter.

    Parameters:
        normalized_shape(int|list|tuple): Input shape from an expected input of
            size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
            If it is a single integer, this module will normalize over the last dimension
            which is expected to be of that specific size.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero. Default: 1e-05.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            gain :math:`g`. If False, weight is None. If is None, a default :code:`ParamAttr` would be added as scale. The
551
            :attr:`param_attr` is initialized as 1 if it is added. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
552 553
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            bias :math:`b`. If is False, bias is None. If is None, a default :code:`ParamAttr` would be added as bias. The
554 555
            :attr:`bias_attr` is initialized as 0 if it is added. Default: None. For more information, please refer to :ref:`api_paddle_ParamAttr` .
        name(str, optional): Name for the LayerNorm, default is None. For more information, please refer to :ref:`api_guide_Name` .
556 557 558 559 560 561 562 563 564 565 566 567 568 569

    Shape:
        - x: 2-D, 3-D, 4-D or 5-D tensor.
        - output: same shape as input x.

    Returns:
        None

    Examples:

        .. code-block:: python

          import paddle

570 571
          x = paddle.rand((2, 2, 2, 3))
          layer_norm = paddle.nn.LayerNorm(x.shape[1:])
572 573
          layer_norm_out = layer_norm(x)

Z
zhang wenhui 已提交
574
          print(layer_norm_out)
575 576
    """

577 578 579 580 581 582 583 584
    def __init__(
        self,
        normalized_shape,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        name=None,
    ):
585
        super().__init__()
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = [normalized_shape]

        self._normalized_shape = list(normalized_shape)
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        param_shape = [np.prod(self._normalized_shape)]

        if weight_attr is False:
            self.weight = None
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
601 602
                default_initializer=Constant(1.0),
            )
603 604 605 606

        if bias_attr is False:
            self.bias = None
        else:
607 608 609
            self.bias = self.create_parameter(
                attr=self._bias_attr, shape=param_shape, is_bias=True
            )
610 611

    def forward(self, input):
612 613 614 615 616 617 618
        return layer_norm(
            input,
            normalized_shape=self._normalized_shape,
            weight=self.weight,
            bias=self.bias,
            epsilon=self._epsilon,
        )
619

620
    def extra_repr(self):
621 622 623
        return 'normalized_shape={}, epsilon={}'.format(
            self._normalized_shape, self._epsilon
        )
624

625

Z
zhiboniu 已提交
626
class _BatchNormBase(Layer):
627 628 629 630
    """
    BatchNorm base .
    """

631 632 633 634 635 636 637 638 639 640 641
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        use_global_stats=None,
        name=None,
    ):
642
        super().__init__()
643 644 645
        self._num_features = num_features
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
C
ceci3 已提交
646
        self._use_global_stats = use_global_stats
647 648

        if get_default_dtype() == 'float16':
G
Guoxia Wang 已提交
649 650 651
            self._dtype = 'float32'
        else:
            self._dtype = get_default_dtype()
652 653 654 655

        param_shape = [num_features]

        # create parameter
656
        if weight_attr is False:
657
            self.weight = self.create_parameter(
G
Guoxia Wang 已提交
658 659 660
                attr=None,
                shape=param_shape,
                dtype=self._dtype,
661 662
                default_initializer=Constant(1.0),
            )
663 664 665 666 667
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
G
Guoxia Wang 已提交
668
                dtype=self._dtype,
669 670 671
                default_initializer=Constant(1.0),
            )
            self.weight.stop_gradient = (
672
                self._weight_attr is not None
673 674
                and self._weight_attr.learning_rate == 0.0
            )
675

676
        if bias_attr is False:
677 678 679 680 681 682 683
            self.bias = self.create_parameter(
                attr=None,
                shape=param_shape,
                dtype=self._dtype,
                default_initializer=Constant(0.0),
                is_bias=True,
            )
684 685
            self.bias.stop_gradient = True
        else:
686 687 688 689 690 691 692
            self.bias = self.create_parameter(
                attr=self._bias_attr,
                shape=param_shape,
                dtype=self._dtype,
                is_bias=True,
            )
            self.bias.stop_gradient = (
693 694
                self._bias_attr is not None
                and self._bias_attr.learning_rate == 0.0
695
            )
696 697 698 699 700 701 702 703

        moving_mean_name = None
        moving_variance_name = None

        if name is not None:
            moving_mean_name = name + "_mean"
            moving_variance_name = name + "_variance"

704 705 706 707 708 709 710 711 712 713
        self._mean = self.create_parameter(
            dtype=self._dtype,
            attr=ParamAttr(
                name=moving_mean_name,
                initializer=Constant(0.0),
                trainable=False,
                do_model_average=True,
            ),
            shape=param_shape,
        )
714 715
        self._mean.stop_gradient = True

716 717 718 719 720 721 722 723 724 725
        self._variance = self.create_parameter(
            dtype=self._dtype,
            attr=ParamAttr(
                name=moving_variance_name,
                initializer=Constant(1.0),
                trainable=False,
                do_model_average=True,
            ),
            shape=param_shape,
        )
726 727
        self._variance.stop_gradient = True

728
        # TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op
729
        if (
730
            _global_flags()['FLAGS_npu_storage_format']
731 732
            and 'npu' in get_all_custom_device_type()
        ):
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
            with no_grad():
                weight_trans = _C_ops.npu_identity(
                    self.weight, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                bias_trans = _C_ops.npu_identity(
                    self.bias, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                mean_trans = _C_ops.npu_identity(
                    self._mean, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                var_trans = _C_ops.npu_identity(
                    self._variance, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                weight_trans._share_underline_tensor_to(self.weight)
                bias_trans._share_underline_tensor_to(self.bias)
                mean_trans._share_underline_tensor_to(self._mean)
                var_trans._share_underline_tensor_to(self._variance)

751 752 753 754 755
        self._data_format = data_format
        self._in_place = False
        self._momentum = momentum
        self._epsilon = epsilon
        self._fuse_with_relu = False
756
        self._name = name
757 758 759 760

    def _check_input_dim(self, input):
        raise NotImplementedError("BatchNorm Base error")

761 762 763
    def _check_data_format(self, input):
        raise NotImplementedError("BatchNorm Base data format error")

764 765
    def forward(self, input):

766 767
        self._check_data_format(self._data_format)

768 769
        self._check_input_dim(input)

770
        if self.training:
771
            warnings.warn(
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
                "When training, we now always track global mean and variance."
            )

        return batch_norm(
            input,
            self._mean,
            self._variance,
            weight=self.weight,
            bias=self.bias,
            training=self.training,
            momentum=self._momentum,
            epsilon=self._epsilon,
            data_format=self._data_format,
            use_global_stats=self._use_global_stats,
        )
787

788 789
    def extra_repr(self):
        main_str = 'num_features={}, momentum={}, epsilon={}'.format(
790 791
            self._num_features, self._momentum, self._epsilon
        )
792
        if self._data_format != 'NCHW':
793
            main_str += f', data_format={self._data_format}'
794
        if self._name is not None:
795
            main_str += f', name={self._name}'
796 797
        return main_str

798

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
class BatchNorm(Layer):
    r"""
    This interface is used to construct a callable object of the ``BatchNorm`` class.
    For more details, refer to code examples.
    It implements the function of the Batch Normalization Layer and can be used
    as a normalizer function for conv2d and fully connected operations.
    The data is normalized by the mean and variance of the channel based on the current batch data.
    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.

    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
    Calculated as follows:

    ..  math::

        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &
        //\ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \qquad &
        //\ mini-batch\ variance \\

    - :math:`x` : mini-batch data
    - :math:`m` : the size of the mini-batch data

    When use_global_stats = True, the :math:`\\mu_{\\beta}`
    and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\

    The normalization function formula is as follows:

    ..  math::

        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift


    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter

    Parameters:
        num_channels(int): Indicate the number of channels of the input ``Tensor``.
        act(str, optional): Activation to be applied to the output of batch normalization. Default: None.
        is_test (bool, optional): A flag indicating whether it is in test phrase or not.
             This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``.
             Default: False.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`
             of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
             will create ParamAttr as param_attr. If the Initializer of the param_attr
             is not set, the parameter is initialized with Xavier. Default: None.
        bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm.
             If it is set to None or one attribute of ParamAttr, batch_norm
             will create ParamAttr as bias_attr. If the Initializer of the bias_attr
             is not set, the bias is initialized zero. Default: None.
        dtype(str, optional): Indicate the data type of the input ``Tensor``,
             which can be float32 or float64. Default: float32.
学渣戊's avatar
学渣戊 已提交
864
        data_layout(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC", where `N` is batch size, `C` is the number of the feature map, `H` is the height of the feature map, `W` is the width of the feature map. Default: NCHW.
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
        in_place(bool, optional): Make the input and output of batch norm reuse memory. Default: False.
        moving_mean_name(str, optional): The name of moving_mean which store the global Mean. Default: None.
        moving_variance_name(str, optional): The name of the moving_variance which store the global Variance. Default: None.
        do_model_average_for_mean_and_var(bool, optional): Whether parameter mean and variance should do model
            average when model average is enabled. Default: True.
        use_global_stats(bool, optional): Whether to use global mean and
            variance. In inference or test mode, set use_global_stats to true
            or is_test to true, and the behavior is equivalent.
            In train mode, when setting use_global_stats True, the global mean
            and variance are also used during train period. Default: False.
        trainable_statistics(bool, optional): Whether to calculate mean and var in eval mode. In eval mode, when
            setting trainable_statistics True, mean and variance will be calculated by current batch statistics.
            Default: False.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import paddle.nn as nn
          from paddle.fluid.dygraph.base import to_variable
          import numpy as np


          x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
          with fluid.dygraph.guard():
              x = to_variable(x)
              batch_norm = nn.layer.norm.BatchNorm(10)
              hidden1 = batch_norm(x)
    """

    def __init__(
        self,
        num_channels,
        act=None,
        is_test=False,
        momentum=0.9,
        epsilon=1e-05,
        param_attr=None,
        bias_attr=None,
        dtype='float32',
        data_layout='NCHW',
        in_place=False,
        moving_mean_name=None,
        moving_variance_name=None,
        do_model_average_for_mean_and_var=True,
        use_global_stats=False,
        trainable_statistics=False,
    ):
        super().__init__()
        self._param_attr = param_attr
        self._bias_attr = bias_attr
        self._act = act
        self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]

        assert (
            bias_attr is not False
        ), "bias_attr should not be False in batch_norm."

        if dtype == "float16":
            self._dtype = "float32"
        else:
            self._dtype = dtype

        param_shape = [num_channels]

        # create parameter
        self.weight = self.create_parameter(
            attr=self._param_attr,
            shape=param_shape,
            dtype=self._dtype,
            default_initializer=Constant(1.0),
        )
        self.weight.stop_gradient = (
            use_global_stats and self._param_attr.learning_rate == 0.0
        )

        self.bias = self.create_parameter(
            attr=self._bias_attr,
            shape=param_shape,
            dtype=self._dtype,
            is_bias=True,
        )
        self.bias.stop_gradient = (
            use_global_stats and self._param_attr.learning_rate == 0.0
        )

        self._mean = self.create_parameter(
            attr=ParamAttr(
                name=moving_mean_name,
                initializer=Constant(0.0),
                trainable=False,
                do_model_average=do_model_average_for_mean_and_var,
            ),
            shape=param_shape,
            dtype=self._dtype,
        )
        self._mean.stop_gradient = True

        self._variance = self.create_parameter(
            attr=ParamAttr(
                name=moving_variance_name,
                initializer=Constant(1.0),
                trainable=False,
                do_model_average=do_model_average_for_mean_and_var,
            ),
            shape=param_shape,
            dtype=self._dtype,
        )
        self._variance.stop_gradient = True

        self._in_place = in_place
        self._data_layout = data_layout
        self._momentum = momentum
        self._epsilon = epsilon
        self._is_test = is_test
        self._fuse_with_relu = False
        self._use_global_stats = use_global_stats
        self._trainable_statistics = trainable_statistics

    def forward(self, input):
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
        if in_dygraph_mode():
            batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
                input,
                self._mean,
                self._variance,
                self.weight,
                self.bias,
                not self.training,
                self._momentum,
                self._epsilon,
                self._data_layout,
                self._use_global_stats,
                self._trainable_statistics,
            )
1002 1003
            if self._act is None:
                return batch_norm_out
1004 1005 1006
            return dygraph_utils._append_activation_in_dygraph(
                batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn
            )
1007 1008 1009 1010 1011 1012 1013 1014 1015
        else:
            # create output
            # mean and mean_out share the same memory
            mean_out = self._mean
            # variance and variance out share the same memory
            variance_out = self._variance
            check_variable_and_dtype(
                input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
            )
1016

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
            attrs = {
                "momentum": self._momentum,
                "epsilon": self._epsilon,
                "is_test": self._is_test,
                "data_layout": self._data_layout,
                "use_mkldnn": False,
                "fuse_with_relu": self._fuse_with_relu,
                "use_global_stats": self._use_global_stats,
                "trainable_statistics": self._trainable_statistics,
            }

            inputs = {
                "X": [input],
                "Scale": [self.weight],
                "Bias": [self.bias],
                "Mean": [self._mean],
                "Variance": [self._variance],
            }

            saved_mean = self._helper.create_variable_for_type_inference(
                dtype=self._dtype, stop_gradient=True
            )
            saved_variance = self._helper.create_variable_for_type_inference(
                dtype=self._dtype, stop_gradient=True
            )
            reserve_space = self._helper.create_variable_for_type_inference(
                dtype=self._helper.input_dtype(input), stop_gradient=True
            )
1045

1046 1047 1048 1049 1050 1051 1052
            batch_norm_out = (
                input
                if self._in_place
                else self._helper.create_variable_for_type_inference(
                    self._dtype
                )
            )
1053

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
            outputs = {
                "Y": [batch_norm_out],
                "MeanOut": [mean_out],
                "VarianceOut": [variance_out],
                "SavedMean": [saved_mean],
                "SavedVariance": [saved_variance],
            }
            if reserve_space is not None:
                outputs["ReserveSpace"] = [reserve_space]

            self._helper.append_op(
                type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
            )
1067

1068 1069
            # Currently, we don't support inplace in dygraph mode
            return self._helper.append_activation(batch_norm_out, self._act)
1070 1071


C
cnn 已提交
1072
class BatchNorm1D(_BatchNormBase):
1073
    r"""
1074 1075
    Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

1076 1077
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
1078 1079 1080 1081
    Calculated as follows:

    ..  math::

1082 1083 1084 1085
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
1086

1087 1088
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
1089 1090 1091 1092
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
1093 1094
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
1095 1096 1097 1098 1099

    The normalization function formula is as follows:

    ..  math::

1100 1101
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
1102

1103 1104 1105
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
1106 1107 1108 1109 1110 1111 1112

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
1113
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
1114
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
1115 1116
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
1117
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
1118
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
学渣戊's avatar
学渣戊 已提交
1119
        data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC", where `N` is batch size, `C` is the number of the feature map, `L` is the length of the feature map. Default "NCL".
C
ceci3 已提交
1120
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
1121 1122 1123
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
1124 1125
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length) when data_format is "NC" or "NCL",
            (batch, length, num_features) when data_format is "NLC".
1126 1127 1128 1129
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.
1130

1131 1132 1133 1134 1135 1136

    Examples:
        .. code-block:: python

          import paddle

1137
          x = paddle.rand((2, 1, 3))
C
cnn 已提交
1138
          batch_norm = paddle.nn.BatchNorm1D(1)
1139 1140
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
1141
          print(batch_norm_out)
1142 1143
    """

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCL',
        use_global_stats=None,
        name=None,
    ):
1155
        super().__init__(
1156 1157 1158 1159 1160 1161 1162 1163 1164
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            use_global_stats,
            name,
        )
C
ceci3 已提交
1165

1166 1167 1168
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NC' or input == 'NCL':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
1169 1170
        elif input == "NHWC" or input == 'NLC':
            self._data_format = "NHWC"
1171
        else:
F
Feiyu Chan 已提交
1172
            raise ValueError(
1173 1174
                'expected NC , NCL, NLC or None for data_format input'
            )
1175

1176 1177
    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
1178 1179 1180 1181 1182
            raise ValueError(
                'expected 2D or 3D input (got {}D input)'.format(
                    len(input.shape)
                )
            )
1183 1184


C
cnn 已提交
1185
class BatchNorm2D(_BatchNormBase):
1186
    r"""
1187 1188
    Applies Batch Normalization over a 4D input (a mini-batch of 2D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

1189 1190
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
1191 1192 1193 1194
    Calculated as follows:

    ..  math::

1195 1196
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//
        \ mini-batch\ mean \\
1197
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i -
1198
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
1199

1200 1201
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
1202 1203 1204 1205
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
1206 1207
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
1208 1209 1210 1211 1212

    The normalization function formula is as follows:

    ..  math::

1213 1214
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
1215

1216 1217 1218
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
1219 1220 1221 1222 1223 1224 1225

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
1226
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
1227
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
1228 1229
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
1230
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
1231
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
学渣戊's avatar
学渣戊 已提交
1232
        data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC", where `N` is batch size, `C` is the number of the feature map, `H` is the height of the feature map, `W` is the width of the feature map. Default: NCHW.
C
ceci3 已提交
1233
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
1234 1235 1236
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
1237 1238
        - x: 4-D tensor with shape: (batch, num_features, height, weight) when data_format is "NCHW",
            or (batch, height, weight, num_features) when data_format is "NHWC".
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
        - output: 4-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

1249
          x = paddle.rand((2, 1, 2, 3))
C
cnn 已提交
1250
          batch_norm = paddle.nn.BatchNorm2D(1)
1251 1252
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
1253
          print(batch_norm_out)
1254 1255
    """

1256
    def _check_data_format(self, input):
1257
        if input == 'NCHW':
1258
            self._data_format = input
F
Feiyu Chan 已提交
1259 1260
        elif input == "NHWC":
            self._data_format = input
1261
        else:
F
Feiyu Chan 已提交
1262
            raise ValueError('expected NCHW or NHWC for data_format input')
1263

1264 1265
    def _check_input_dim(self, input):
        if len(input.shape) != 4:
1266
            raise ValueError(
1267
                f'expected 4D input (got {len(input.shape)}D input)'
1268
            )
1269 1270


C
cnn 已提交
1271
class BatchNorm3D(_BatchNormBase):
1272
    r"""
1273 1274
    Applies Batch Normalization over a 5D input (a mini-batch of 3D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

1275 1276
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
1277 1278 1279 1280
    Calculated as follows:

    ..  math::

1281 1282 1283 1284
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
1285

C
ceci3 已提交
1286
    When use_global_stats = True, the :math:`\\mu_{\\beta}`
1287 1288 1289 1290 1291
    and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
1292 1293
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
1294 1295 1296 1297 1298

    The normalization function formula is as follows:

    ..  math::

1299 1300
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
1301

1302 1303 1304
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
1305 1306 1307 1308 1309 1310 1311

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
1312
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
1313
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
1314 1315
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
1316
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
1317
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
学渣戊's avatar
学渣戊 已提交
1318
        data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC", where `N` is batch size, `C` is the number of the feature map, `D` is the depth of the feature, `H` is the height of the feature map, `W` is the width of the feature map. Default: NCDHW.
C
ceci3 已提交
1319
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
1320 1321 1322
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
1323 1324
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight) when data_format is "NCDHW",
            or (batch, dims, height, weight, num_features) when data_format is "NDHWC".
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
        - output: 5-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

1335
          x = paddle.rand((2, 1, 2, 2, 3))
C
cnn 已提交
1336
          batch_norm = paddle.nn.BatchNorm3D(1)
1337 1338
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
1339
          print(batch_norm_out)
1340 1341
    """

1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCDHW',
        use_global_stats=None,
        name=None,
    ):
1353
        super().__init__(
1354 1355 1356 1357 1358 1359 1360 1361 1362
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            use_global_stats,
            name,
        )
C
ceci3 已提交
1363

1364 1365 1366
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NCDHW':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
1367 1368
        elif input == "NHWC" or input == "NDHWC":
            self._data_format = 'NHWC'
1369
        else:
F
Feiyu Chan 已提交
1370
            raise ValueError(
1371 1372
                'expected NCDHW, NDHWC or None for data_format input'
            )
1373

1374 1375
    def _check_input_dim(self, input):
        if len(input.shape) != 5:
1376
            raise ValueError(
1377
                f'expected 5D input (got {len(input.shape)}D input)'
1378
            )
1379 1380


1381
class SyncBatchNorm(_BatchNormBase):
1382
    r"""
1383

C
ceci3 已提交
1384
    This interface is used to construct a callable object of the ``SyncBatchNorm`` class.
1385 1386
    It implements the function of the Cross-GPU Synchronized Batch Normalization Layer, and can
    be used as a normalizer function for other operations, such as conv2d and fully connected
C
ceci3 已提交
1387 1388 1389 1390 1391 1392 1393
    operations.
    The data is normalized by the mean and variance of the channel based on whole mini-batch
    , which including data in all gpus.
    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.

1394
    When model in training mode, the :math:`\\mu_{\\beta}`
C
ceci3 已提交
1395 1396 1397 1398 1399
    and :math:`\\sigma_{\\beta}^{2}` are the statistics of whole mini-batch data in all gpus.
    Calculated as follows:

    ..  math::

1400 1401 1402 1403
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
C
ceci3 已提交
1404 1405 1406 1407 1408

    - :math:`x` : whole mini-batch data in all gpus
    - :math:`m` : the size of the whole mini-batch data

    When model in evaluation mode, the :math:`\\mu_{\\beta}`
1409
    and :math:`\sigma_{\beta}^{2}` are global statistics (moving_mean and moving_variance,
C
ceci3 已提交
1410 1411 1412
    which usually got from the pre-trained model). Global statistics calculated as follows:

    .. math::
1413 1414
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
C
ceci3 已提交
1415 1416

    The formula of normalization is as follows:
1417

C
ceci3 已提交
1418 1419
    ..  math::

1420 1421 1422
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
C
ceci3 已提交
1423

1424 1425
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable scale parameter vector
1426
    - :math:`\beta` : trainable shift parameter vector
C
ceci3 已提交
1427

1428
    Note:
1429 1430 1431
        If you want to use container to pack your model and has :ref:`api_paddle_nn_SyncBatchNorm` in the
        evaluation phase, please use :ref:`api_paddle_nn_LayerList` or :ref:`api_paddle_nn_Sequential` instead of
        :ref:`api_paddle_hub_list` to pack the model.
1432

C
ceci3 已提交
1433 1434 1435 1436 1437 1438 1439
    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
             of this layer. If it is set to None or one attribute of ParamAttr, this layerr
             will create ParamAttr as param_attr. If the Initializer of the param_attr
1440
             is not set, the parameter is initialized with ones. If it is set to False,
C
ceci3 已提交
1441 1442 1443 1444
             this layer will not have trainable scale parameter. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of this layer.
             If it is set to None or one attribute of ParamAttr, this layer
             will create ParamAttr as bias_attr. If the Initializer of the bias_attr
1445
             is not set, the bias is initialized zero. If it is set to False, this layer will not
C
ceci3 已提交
1446 1447 1448
             have trainable bias parameter. Default: None.

    Shapes:
1449 1450
        - input: Tensor that the dimension from 2 to 5.
        - output: Tensor with the same shape as input.
C
ceci3 已提交
1451 1452 1453 1454

    Examples:
        .. code-block:: python

1455
            # required: gpu
1456

1457 1458
            import paddle
            import paddle.nn as nn
C
ceci3 已提交
1459

1460
            x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
C
ceci3 已提交
1461

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
            if paddle.is_compiled_with_cuda():
                sync_batch_norm = nn.SyncBatchNorm(2)
                hidden1 = sync_batch_norm(x)
                print(hidden1)
                # Tensor(shape=[1, 2, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
                #        [[[[ 0.26824948,  1.09363246],
                #           [ 0.26824948, -1.63013160]],

                #          [[ 0.80956620, -0.66528702],
                #           [-1.27446556,  1.13018656]]]])
1472

C
ceci3 已提交
1473 1474
    """

1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        name=None,
    ):
1485
        super().__init__(
1486 1487 1488 1489 1490 1491 1492 1493 1494
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            None,
            name,
        )
C
ceci3 已提交
1495

C
ceci3 已提交
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
    def _check_data_format(self):
        if self._data_format in ['NCHW', 'NCDHW', 'NC', 'NCL']:
            self._data_format = 'NCHW'
        elif self._data_format in ["NHWC", "NDHWC", 'NLC']:
            self._data_format = 'NHWC'
        else:
            raise ValueError(
                'expected \'NCDHW\', \'NDHWC\', \'NCL\', \'NLC\', \'NC\', \'NCHW\', \'NHWC\' for data_format'
            )

C
ceci3 已提交
1506
    def forward(self, x):
C
ceci3 已提交
1507
        self._check_data_format()
C
ceci3 已提交
1508 1509 1510 1511 1512 1513
        # create output
        # mean and mean_out share the same memory
        mean_out = self._mean
        # variance and variance out share the same memory
        variance_out = self._variance

1514 1515
        # train mode: use mini-batch stats, eval mode: use global stats
        # use_global_stats only support False in sync_batch_norm
1516
        if in_dygraph_mode():
1517
            sync_batch_norm_out, _, _, _, _, _ = _C_ops.sync_batch_norm_(
1518 1519 1520
                x,
                self._mean,
                self._variance,
1521 1522 1523
                self.weight,
                self.bias,
                not self.training,
1524 1525 1526 1527 1528 1529
                self._momentum,
                self._epsilon,
                self._data_format,
                False,
                False,
            )
1530 1531 1532
            return sync_batch_norm_out

        elif in_dynamic_mode():
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
            attrs = (
                "momentum",
                self._momentum,
                "epsilon",
                self._epsilon,
                "is_test",
                not self.training,
                "data_layout",
                self._data_format,
                "use_mkldnn",
                False,
                "fuse_with_relu",
                False,
                "use_global_stats",
                False,
                'trainable_statistics',
                False,
            )
1551
            sync_batch_norm_out, _, _, _, _, _ = _legacy_C_ops.sync_batch_norm(
1552 1553 1554 1555 1556 1557 1558
                x,
                self.weight,
                self.bias,
                self._mean,
                self._variance,
                mean_out,
                variance_out,
1559
                *attrs,
1560
            )
C
ceci3 已提交
1561 1562
            return sync_batch_norm_out

1563 1564 1565
        check_variable_and_dtype(
            x, 'input', ['float16', 'float32', 'float64'], 'SyncBatchNorm'
        )
C
ceci3 已提交
1566 1567 1568 1569 1570

        attrs = {
            "momentum": self._momentum,
            "epsilon": self._epsilon,
            "is_test": not self.training,
1571
            "data_layout": self._data_format,
C
ceci3 已提交
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
            "use_mkldnn": False,
            "fuse_with_relu": False,
            "use_global_stats": False,
            "trainable_statistics": False,
        }

        inputs = {
            "X": [x],
            "Scale": [self.weight],
            "Bias": [self.bias],
            "Mean": [self._mean],
1583
            "Variance": [self._variance],
C
ceci3 已提交
1584 1585 1586
        }

        saved_mean = self._helper.create_variable_for_type_inference(
1587 1588
            dtype=self._dtype, stop_gradient=True
        )
C
ceci3 已提交
1589
        saved_variance = self._helper.create_variable_for_type_inference(
1590 1591
            dtype=self._dtype, stop_gradient=True
        )
C
ceci3 已提交
1592
        sync_batch_norm_out = self._helper.create_variable_for_type_inference(
1593 1594
            self._dtype
        )
C
ceci3 已提交
1595 1596 1597 1598 1599 1600

        outputs = {
            "Y": [sync_batch_norm_out],
            "MeanOut": [mean_out],
            "VarianceOut": [variance_out],
            "SavedMean": [saved_mean],
1601
            "SavedVariance": [saved_variance],
C
ceci3 已提交
1602 1603
        }

1604 1605 1606
        self._helper.append_op(
            type="sync_batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
        )
C
ceci3 已提交
1607
        return sync_batch_norm_out
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621

    @classmethod
    def convert_sync_batchnorm(cls, layer):
        """
        Helper function to convert :class: `paddle.nn.BatchNorm*d` layers in the model to :class: `paddle.nn.SyncBatchNorm` layers.

        Parameters:
            layer(paddle.nn.Layer): model containing one or more `BatchNorm*d` layers.

        Returns:
            The original model with converted SyncBatchNorm layers. If BatchNorm*d layer in the model, use SyncBatchNorm layer instead.

        Examples:
            .. code-block:: python
1622

1623 1624 1625
                import paddle
                import paddle.nn as nn

C
cnn 已提交
1626
                model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5))
1627 1628 1629 1630 1631
                sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)

        """
        layer_output = layer
        if isinstance(layer, _BatchNormBase):
1632
            if (
1633
                layer._weight_attr is not None
1634
                and not isinstance(layer._weight_attr, bool)
1635
                and layer._weight_attr.name is not None
1636
            ):
C
ceci3 已提交
1637
                layer._weight_attr.name = layer._weight_attr.name + '_sync'
1638
            if (
1639
                layer._bias_attr is not None
1640
                and not isinstance(layer._bias_attr, bool)
1641
                and layer._bias_attr.name is not None
1642
            ):
C
ceci3 已提交
1643 1644
                layer._bias_attr.name = layer._bias_attr.name + '_sync'

1645 1646 1647 1648 1649 1650 1651 1652 1653
            layer_output = SyncBatchNorm(
                layer._num_features,
                layer._momentum,
                layer._epsilon,
                layer._weight_attr,
                layer._bias_attr,
                layer._data_format,
                layer._name,
            )
1654

1655 1656 1657 1658
            if (
                layer._weight_attr is not False
                and layer._bias_attr is not False
            ):
1659 1660 1661 1662 1663 1664
                with no_grad():
                    layer_output.weight = layer.weight
                    layer_output.bias = layer.bias
            layer_output._mean = layer._mean
            layer_output._variance = layer._variance

C
ceci3 已提交
1665
        for name, sublayer in layer.named_children():
1666 1667 1668
            layer_output.add_sublayer(
                name, cls.convert_sync_batchnorm(sublayer)
            )
1669 1670
        del layer
        return layer_output
1671 1672


Z
zhiboniu 已提交
1673
class LocalResponseNorm(Layer):
1674
    """
1675 1676
    Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions.
    For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
1677

1678
    See more details in :ref:`api_paddle_nn_functional_local_response_norm` .
1679

1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
    Parameters:
        size (int): The number of channels to sum over.
        alpha (float, optional): The scaling parameter, positive. Default:1e-4
        beta (float, optional): The exponent, positive. Default:0.75
        k (float, optional): An offset, positive. Default: 1.0
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:
            If input is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`,
            the data is stored in the order of: `[batch_size, input_channels, feature_length]`.
            If input is 4-D Tensor, the string could be  `"NCHW"`, `"NHWC"`. When it is `"NCHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`.
            If input is 5-D Tensor, the string could be  `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name (str, optional): Name for the operation (optional, default is None). For more information,
            please refer to :ref:`api_guide_Name`.
1695

1696 1697 1698
    Shape:
        - input: 3-D/4-D/5-D tensor.
        - output: 3-D/4-D/5-D tensor, the same shape as input.
1699

1700
    Examples:
1701

1702
    .. code-block:: python
1703

1704 1705 1706 1707 1708 1709 1710
        import paddle

        x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32")
        m = paddle.nn.LocalResponseNorm(size=5)
        y = m(x)
        print(y.shape)  # [3, 3, 112, 112]
    """
1711

1712 1713 1714 1715 1716 1717 1718 1719 1720
    def __init__(
        self,
        size,
        alpha=0.0001,
        beta=0.75,
        k=1.0,
        data_format="NCHW",
        name=None,
    ):
1721
        super().__init__()
1722 1723 1724 1725 1726 1727 1728 1729
        self.size = size
        self.alpha = alpha
        self.beta = beta
        self.k = k
        self.data_format = data_format
        self.name = name

    def forward(self, input):
1730 1731 1732 1733 1734 1735 1736 1737 1738
        out = F.local_response_norm(
            input,
            self.size,
            self.alpha,
            self.beta,
            self.k,
            self.data_format,
            self.name,
        )
1739
        return out
1740 1741 1742

    def extra_repr(self):
        main_str = 'size={}, alpha={}, beta={}, k={}'.format(
1743 1744
            self.size, self.alpha, self.beta, self.k
        )
1745
        if self.data_format != 'NCHW':
1746
            main_str += f', data_format={self.data_format}'
1747
        if self.name is not None:
1748
            main_str += f', name={self.name}'
1749
        return main_str
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761


class SpectralNorm(Layer):
    r"""
    This interface is used to construct a callable object of the ``SpectralNorm`` class.
    For more details, refer to code examples. It implements the function of the Spectral Normalization Layer.
    This layer calculates the spectral normalization value of weight parameters of
    fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
    Parameters. Calculations are showed as follows.

    Step 1:
    Generate vector U in shape of [H], and V in shape of [W].
1762
    While H is the :attr:`dim` th dimension of the input weights,
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
    and W is the product result of remaining dimensions.

    Step 2:
    :attr:`power_iters` should be a positive integer, do following
    calculations with U and V for :attr:`power_iters` rounds.

    .. math::

        \mathbf{v} := \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}

        \mathbf{u} := \frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}

    Step 3:
    Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.

    .. math::

        \sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}

        \mathbf{W} = \frac{\mathbf{W}}{\sigma(\mathbf{W})}


    Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .

    Parameters:
        weight_shape(list or tuple): The shape of weight parameter.
1789
        dim(int, optional): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
1790
        power_iters(int, optional): The number of power iterations to calculate spectral norm. Default: 1.
1791
        eps(float, optional): The epsilon for numerical stability in calculating norms. Default: 1e-12.
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
        name (str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
        dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".

    Returns:
        None

    Examples:
       .. code-block:: python

            import paddle
            x = paddle.rand((2,8,32,32))

1804
            spectral_norm = paddle.nn.SpectralNorm(x.shape, dim=1, power_iters=2)
1805 1806 1807 1808 1809 1810 1811 1812 1813
            spectral_norm_out = spectral_norm(x)

            print(spectral_norm_out.shape) # [2, 8, 32, 32]

    """

    def __init__(
        self,
        weight_shape,
1814
        dim=0,
1815
        power_iters=1,
1816
        eps=1e-12,
1817 1818 1819 1820
        dtype='float32',
    ):
        super().__init__()
        self._power_iters = power_iters
R
Roc 已提交
1821
        self._eps = eps
1822
        self._dim = dim
1823 1824 1825 1826 1827 1828
        self._dtype = dtype

        self._weight_shape = list(weight_shape)
        assert (
            np.prod(self._weight_shape) > 0
        ), "Any dimension of `weight_shape` cannot be equal to 0."
1829 1830 1831 1832
        assert dim < len(self._weight_shape), (
            "The input `dim` should be less than the "
            "length of `weight_shape`, but received dim="
            "{}".format(dim)
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
        )
        h = self._weight_shape[self._dim]
        w = np.prod(self._weight_shape) // h

        self.weight_u = self.create_parameter(
            attr=ParamAttr(),
            shape=[h],
            dtype=self._dtype,
            default_initializer=Normal(0.0, 1.0),
        )
        self.weight_u.stop_gradient = True

        self.weight_v = self.create_parameter(
            attr=ParamAttr(),
            shape=[w],
            dtype=self._dtype,
            default_initializer=Normal(0.0, 1.0),
        )
        self.weight_v.stop_gradient = True

    def forward(self, x):
        weight = x
        if in_dygraph_mode():
            return _C_ops.spectral_norm(
                weight,
                self.weight_u,
                self.weight_v,
                self._dim,
                self._power_iters,
R
Roc 已提交
1862
                self._eps,
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
            )

        check_variable_and_dtype(
            weight, "weight", ['float32', 'float64'], 'SpectralNorm'
        )
        inputs = {'Weight': weight, 'U': self.weight_u, 'V': self.weight_v}
        out = self._helper.create_variable_for_type_inference(self._dtype)
        self._helper.append_op(
            type="spectral_norm",
            inputs=inputs,
            outputs={
                "Out": out,
            },
            attrs={
                "dim": self._dim,
                "power_iters": self._power_iters,
R
Roc 已提交
1879
                "eps": self._eps,
1880 1881 1882 1883
            },
        )

        return out