norm.py 54.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19 20 21 22 23 24 25 26 27
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

28
# TODO: define normalization api
29

30 31
import numbers
import warnings
C
ceci3 已提交
32

33
import numpy as np
34

35 36 37
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.device import get_all_custom_device_type
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
38

39 40 41 42
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.dygraph import BatchNorm  # noqa: F401
from ...fluid.dygraph import SpectralNorm  # noqa: F401
from ...framework import ParamAttr, get_default_dtype, no_grad
Z
zhiboniu 已提交
43
from .. import Layer
44 45 46
from .. import functional as F
from ..functional import batch_norm, instance_norm, layer_norm
from ..initializer import Constant
47

48 49
__all__ = []

C
ceci3 已提交
50

Z
zhiboniu 已提交
51
class _InstanceNormBase(Layer):
52
    """
53
    This class is based class for InstanceNorm1D, 2d, 3d.
54

C
cnn 已提交
55
    See InstaceNorm1D, InstanceNorm2D or InstanceNorm3D for more details.
56 57
    """

58 59 60 61 62 63 64 65 66 67
    def __init__(
        self,
        num_features,
        epsilon=1e-5,
        momentum=0.9,
        weight_attr=None,
        bias_attr=None,
        data_format="NCHW",
        name=None,
    ):
68
        super().__init__()
69

70
        if weight_attr is False or bias_attr is False:
71 72
            assert (
                weight_attr == bias_attr
73
            ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm"
74 75 76
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
77
        self._num_features = num_features
78

79
        if weight_attr is not False and bias_attr is not False:
80 81 82 83
            self.scale = self.create_parameter(
                attr=self._weight_attr,
                shape=[num_features],
                default_initializer=Constant(1.0),
84 85 86 87 88 89 90 91
                is_bias=False,
            )
            self.bias = self.create_parameter(
                attr=self._bias_attr,
                shape=[num_features],
                default_initializer=Constant(0.0),
                is_bias=True,
            )
92 93 94 95 96 97 98 99 100 101
        else:
            self.scale = None
            self.bias = None

    def _check_input_dim(self, input):
        raise NotImplementedError("InstanceNorm Base error")

    def forward(self, input):
        self._check_input_dim(input)

102 103 104
        return instance_norm(
            input, weight=self.scale, bias=self.bias, eps=self._epsilon
        )
105

106
    def extra_repr(self):
107 108 109
        return 'num_features={}, epsilon={}'.format(
            self._num_features, self._epsilon
        )
110

111

C
cnn 已提交
112
class InstanceNorm1D(_InstanceNormBase):
113
    r"""
114
    Create a callable object of `InstanceNorm1D`. Applies Instance Normalization over a 3D input (a mini-batch of 1D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
115 116 117 118 119 120

    DataLayout: NCL `[batch, in_channels, length]`

    :math:`input` is the input features over a mini-batch.

    ..  math::
121

122 123 124 125 126 127 128
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
129

130
Where `H` means height of feature map, `W` means width of feature map.
131 132 133 134 135 136 137

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
138 139 140 141
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
142
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
143 144 145 146
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
            If it is set to False, will not create bias_attr. Default: None.
147
        data_format(str, optional): Specify the input data format, may be "NC", "NCL". Default "NCL".
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..


    Shape:
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length).
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

          import paddle

165
          x = paddle.rand((2, 2, 3))
C
cnn 已提交
166
          instance_norm = paddle.nn.InstanceNorm1D(2)
167 168
          instance_norm_out = instance_norm(x)

Z
zhang wenhui 已提交
169
          print(instance_norm_out)
170 171 172 173 174

    """

    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
175 176 177 178 179
            raise ValueError(
                'expected 2D or 3D input (got {}D input)'.format(
                    len(input.shape)
                )
            )
180 181


C
cnn 已提交
182
class InstanceNorm2D(_InstanceNormBase):
183
    r"""
184
    Create a callable object of `InstanceNorm2D`. Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
185 186 187 188 189 190 191

    DataLayout: NCHW `[batch, in_channels, in_height, in_width]`


    :math:`input` is the input features over a mini-batch.

    ..  math::
192

193 194 195 196 197 198 199
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
200

201
Where `H` means height of feature map, `W` means width of feature map.
202 203 204 205 206 207 208

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
209 210 211 212
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
213
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
214 215 216 217
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
    `       If it is set to False, will not create bias_attr. Default: None.
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
        data_format(str, optional): Specify the input data format, could be "NCHW". Default: NCHW.
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 4-D tensor with shape: (batch, num_features, height, weight).
        - output: 4-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

233
            import paddle
234

235 236 237
            x = paddle.rand((2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm2D(2)
            instance_norm_out = instance_norm(x)
238

239
            print(instance_norm_out)
240 241 242 243
    """

    def _check_input_dim(self, input):
        if len(input.shape) != 4:
244 245 246
            raise ValueError(
                'expected 4D input (got {}D input)'.format(len(input.shape))
            )
247 248


C
cnn 已提交
249
class InstanceNorm3D(_InstanceNormBase):
250
    r"""
251
    Create a callable object of `InstanceNorm3D`. Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
252 253 254 255 256 257 258

    DataLayout: NCHW `[batch, in_channels, D, in_height, in_width]`


    :math:`input` is the input features over a mini-batch.

    ..  math::
259

260 261 262 263 264 265 266
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
267

268
Where `H` means height of feature map, `W` means width of feature map.
269 270 271 272 273 274 275

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
276 277 278 279
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
280
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
281 282 283 284
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
            If it is set to False, will not create bias_attr. Default: None.
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
        data_format(str, optional): Specify the input data format, could be "NCDHW". Default: NCDHW.
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight).
        - output: 5-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

300
            import paddle
301

302 303 304
            x = paddle.rand((2, 2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm3D(2)
            instance_norm_out = instance_norm(x)
305

306
            print(instance_norm_out.numpy)
307 308 309 310
    """

    def _check_input_dim(self, input):
        if len(input.shape) != 5:
311 312 313
            raise ValueError(
                'expected 5D input (got {}D input)'.format(len(input.shape))
            )
314 315


Z
zhiboniu 已提交
316
class GroupNorm(Layer):
317
    """
318

319 320 321 322 323 324 325
    This interface is used to construct a callable object of the ``GroupNorm`` class.
    For more details, refer to code examples.
    It implements the function of the Group Normalization Layer.
    Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .

    Parameters:
        num_groups(int): The number of groups that divided from channels.
326
        num_channels(int): The number of channels of input.
327
        epsilon(float, optional): The small value added to the variance to prevent
328
            division by zero. Default: 1e-05.
329
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
330 331
            scale :math:`g`. If it is set to False, no scale will be added to the output units.
            If it is set to None, the bias is initialized one. Default: None.
332
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
333 334
            bias :math:`b`. If it is set to False, no bias will be added to the output units.
            If it is set to None, the bias is initialized zero. Default: None.
335 336 337 338
        data_format(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
        name(str, optional): Name for the GroupNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
339
        - x: Tensor with shape: attr:`(batch, num_features, *)`.
340
        - output: The same shape as input x.
341 342 343 344 345 346

    Returns:
        None

    Examples:
        .. code-block:: python
Z
zhang wenhui 已提交
347

348
            import paddle
349

350
            x = paddle.arange(48, dtype="float32").reshape((2, 6, 2, 2))
351 352
            group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
            group_norm_out = group_norm(x)
353

354
            print(group_norm_out)
355 356
    """

357 358 359 360 361 362 363 364 365 366
    def __init__(
        self,
        num_groups,
        num_channels,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        name=None,
    ):
367
        super().__init__()
368 369 370 371 372
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self._epsilon = epsilon
        self._num_channels = num_channels
        self._num_groups = num_groups
373
        if data_format not in ['NCHW', 'NHWC']:
374
            raise ValueError("unsupported data layout:" + data_format)
375
        self._data_format = data_format
376 377 378

        param_shape = [self._num_channels]

379
        if weight_attr is False:
380
            self.weight = self.create_parameter(
381 382
                attr=None, shape=param_shape, default_initializer=Constant(1.0)
            )
383 384 385 386 387
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
388 389 390
                default_initializer=Constant(1.0),
            )
            self.weight.stop_gradient = (
391
                self._weight_attr is not None
392 393
                and self._weight_attr.learning_rate == 0.0
            )
394

395
        if bias_attr is False:
396 397 398 399 400 401
            self.bias = self.create_parameter(
                attr=None,
                shape=param_shape,
                default_initializer=Constant(0.0),
                is_bias=True,
            )
402 403
            self.bias.stop_gradient = True
        else:
404 405 406 407
            self.bias = self.create_parameter(
                attr=self._bias_attr, shape=param_shape, is_bias=True
            )
            self.bias.stop_gradient = (
408 409
                self._bias_attr is not None
                and self._bias_attr.learning_rate == 0.0
410
            )
411 412

    def forward(self, input):
413
        if in_dygraph_mode():
414
            return _C_ops.group_norm(
415 416 417 418 419
                input,
                self.weight,
                self.bias,
                self._epsilon,
                self._num_groups,
420
                self._data_format,
421
            )
422

423 424 425 426 427 428
        mean_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True
        )
        variance_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True
        )
429

430
        if _in_legacy_dygraph():
431
            pre_act, _, _ = _legacy_C_ops.group_norm(
432 433 434 435 436 437 438 439
                input,
                self.weight,
                self.bias,
                mean_out,
                variance_out,
                'epsilon',
                self._epsilon,
                'groups',
440 441
                self._num_groups,
            )
442
            return pre_act
443

444 445 446 447 448 449 450 451
        inputs = {'X': input}
        if self.bias is not None:
            inputs['Bias'] = self.bias
        if self.weight is not None:
            inputs['Scale'] = self.weight

        # create output
        group_norm_out = self._helper.create_variable_for_type_inference(
452 453 454 455 456 457 458 459 460 461 462 463 464
            dtype=input.dtype
        )

        self._helper.append_op(
            type="group_norm",
            inputs=inputs,
            outputs={
                "Y": group_norm_out,
                "Mean": mean_out,
                "Variance": variance_out,
            },
            attrs={"epsilon": self._epsilon, "groups": self._num_groups},
        )
465 466 467

        return self._helper.append_activation(group_norm_out, None)

468 469
    def extra_repr(self):
        return 'num_groups={}, num_channels={}, epsilon={}'.format(
470 471
            self._num_groups, self._num_channels, self._epsilon
        )
472

473

Z
zhiboniu 已提交
474
class LayerNorm(Layer):
475
    r"""
476
    Construct a callable object of the ``LayerNorm`` class.
477 478 479 480 481 482 483 484
    For more details, refer to code examples.
    It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
    Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_

    The formula is as follows:

    ..  math::

485
        \mu & = \frac{1}{H}\sum_{i=1}^{H} x_i
486

487
        \sigma & = \sqrt{\frac{1}{H}\sum_{i=1}^{H}{(x_i - \mu)^2} + \epsilon}
488

489
        y & = f(\frac{g}{\sigma}(x - \mu) + b)
490 491 492

    - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
    - :math:`H`: the number of hidden units in a layers
493
    - :math:`\epsilon`: the small value added to the variance to prevent division by zero.
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
    - :math:`g`: the trainable scale parameter.
    - :math:`b`: the trainable bias parameter.

    Parameters:
        normalized_shape(int|list|tuple): Input shape from an expected input of
            size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
            If it is a single integer, this module will normalize over the last dimension
            which is expected to be of that specific size.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero. Default: 1e-05.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            gain :math:`g`. If False, weight is None. If is None, a default :code:`ParamAttr` would be added as scale. The
            :attr:`param_attr` is initialized as 1 if it is added. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            bias :math:`b`. If is False, bias is None. If is None, a default :code:`ParamAttr` would be added as bias. The
            :attr:`bias_attr` is initialized as 0 if it is added. Default: None.
        name(str, optional): Name for the LayerNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 2-D, 3-D, 4-D or 5-D tensor.
        - output: same shape as input x.

    Returns:
        None

    Examples:

        .. code-block:: python

          import paddle

525 526
          x = paddle.rand((2, 2, 2, 3))
          layer_norm = paddle.nn.LayerNorm(x.shape[1:])
527 528
          layer_norm_out = layer_norm(x)

Z
zhang wenhui 已提交
529
          print(layer_norm_out)
530 531
    """

532 533 534 535 536 537 538 539
    def __init__(
        self,
        normalized_shape,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        name=None,
    ):
540
        super().__init__()
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = [normalized_shape]

        self._normalized_shape = list(normalized_shape)
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        param_shape = [np.prod(self._normalized_shape)]

        if weight_attr is False:
            self.weight = None
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
556 557
                default_initializer=Constant(1.0),
            )
558 559 560 561

        if bias_attr is False:
            self.bias = None
        else:
562 563 564
            self.bias = self.create_parameter(
                attr=self._bias_attr, shape=param_shape, is_bias=True
            )
565 566

    def forward(self, input):
567 568 569 570 571 572 573
        return layer_norm(
            input,
            normalized_shape=self._normalized_shape,
            weight=self.weight,
            bias=self.bias,
            epsilon=self._epsilon,
        )
574

575
    def extra_repr(self):
576 577 578
        return 'normalized_shape={}, epsilon={}'.format(
            self._normalized_shape, self._epsilon
        )
579

580

Z
zhiboniu 已提交
581
class _BatchNormBase(Layer):
582 583 584 585
    """
    BatchNorm base .
    """

586 587 588 589 590 591 592 593 594 595 596
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        use_global_stats=None,
        name=None,
    ):
597
        super().__init__()
598 599 600
        self._num_features = num_features
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
C
ceci3 已提交
601
        self._use_global_stats = use_global_stats
602 603

        if get_default_dtype() == 'float16':
G
Guoxia Wang 已提交
604 605 606
            self._dtype = 'float32'
        else:
            self._dtype = get_default_dtype()
607 608 609 610

        param_shape = [num_features]

        # create parameter
611
        if weight_attr is False:
612
            self.weight = self.create_parameter(
G
Guoxia Wang 已提交
613 614 615
                attr=None,
                shape=param_shape,
                dtype=self._dtype,
616 617
                default_initializer=Constant(1.0),
            )
618 619 620 621 622
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
G
Guoxia Wang 已提交
623
                dtype=self._dtype,
624 625 626
                default_initializer=Constant(1.0),
            )
            self.weight.stop_gradient = (
627
                self._weight_attr is not None
628 629
                and self._weight_attr.learning_rate == 0.0
            )
630

631
        if bias_attr is False:
632 633 634 635 636 637 638
            self.bias = self.create_parameter(
                attr=None,
                shape=param_shape,
                dtype=self._dtype,
                default_initializer=Constant(0.0),
                is_bias=True,
            )
639 640
            self.bias.stop_gradient = True
        else:
641 642 643 644 645 646 647
            self.bias = self.create_parameter(
                attr=self._bias_attr,
                shape=param_shape,
                dtype=self._dtype,
                is_bias=True,
            )
            self.bias.stop_gradient = (
648 649
                self._bias_attr is not None
                and self._bias_attr.learning_rate == 0.0
650
            )
651 652 653 654 655 656 657 658

        moving_mean_name = None
        moving_variance_name = None

        if name is not None:
            moving_mean_name = name + "_mean"
            moving_variance_name = name + "_variance"

659 660 661 662 663 664 665 666 667 668
        self._mean = self.create_parameter(
            dtype=self._dtype,
            attr=ParamAttr(
                name=moving_mean_name,
                initializer=Constant(0.0),
                trainable=False,
                do_model_average=True,
            ),
            shape=param_shape,
        )
669 670
        self._mean.stop_gradient = True

671 672 673 674 675 676 677 678 679 680
        self._variance = self.create_parameter(
            dtype=self._dtype,
            attr=ParamAttr(
                name=moving_variance_name,
                initializer=Constant(1.0),
                trainable=False,
                do_model_average=True,
            ),
            shape=param_shape,
        )
681 682
        self._variance.stop_gradient = True

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
        # TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op
        if 'npu' in get_all_custom_device_type():
            with no_grad():
                weight_trans = _C_ops.npu_identity(
                    self.weight, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                bias_trans = _C_ops.npu_identity(
                    self.bias, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                mean_trans = _C_ops.npu_identity(
                    self._mean, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                var_trans = _C_ops.npu_identity(
                    self._variance, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                weight_trans._share_underline_tensor_to(self.weight)
                bias_trans._share_underline_tensor_to(self.bias)
                mean_trans._share_underline_tensor_to(self._mean)
                var_trans._share_underline_tensor_to(self._variance)

703 704 705 706 707
        self._data_format = data_format
        self._in_place = False
        self._momentum = momentum
        self._epsilon = epsilon
        self._fuse_with_relu = False
708
        self._name = name
709 710 711 712

    def _check_input_dim(self, input):
        raise NotImplementedError("BatchNorm Base error")

713 714 715
    def _check_data_format(self, input):
        raise NotImplementedError("BatchNorm Base data format error")

716 717
    def forward(self, input):

718 719
        self._check_data_format(self._data_format)

720 721
        self._check_input_dim(input)

722
        if self.training:
723
            warnings.warn(
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
                "When training, we now always track global mean and variance."
            )

        return batch_norm(
            input,
            self._mean,
            self._variance,
            weight=self.weight,
            bias=self.bias,
            training=self.training,
            momentum=self._momentum,
            epsilon=self._epsilon,
            data_format=self._data_format,
            use_global_stats=self._use_global_stats,
        )
739

740 741
    def extra_repr(self):
        main_str = 'num_features={}, momentum={}, epsilon={}'.format(
742 743
            self._num_features, self._momentum, self._epsilon
        )
744
        if self._data_format != 'NCHW':
745 746 747 748 749
            main_str += ', data_format={}'.format(self._data_format)
        if self._name is not None:
            main_str += ', name={}'.format(self._name)
        return main_str

750

C
cnn 已提交
751
class BatchNorm1D(_BatchNormBase):
752
    r"""
753 754
    Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

755 756
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
757 758 759 760
    Calculated as follows:

    ..  math::

761 762 763 764
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
765

766 767
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
768 769 770 771
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
772 773
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
774 775 776 777 778

    The normalization function formula is as follows:

    ..  math::

779 780
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
781

782 783 784
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
785 786 787 788 789 790 791

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
792
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
793
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
794 795
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
796
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
797
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
798
        data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
C
ceci3 已提交
799
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
800 801 802
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
803 804
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length) when data_format is "NC" or "NCL",
            (batch, length, num_features) when data_format is "NLC".
805 806 807 808
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.
809

810 811 812 813 814 815

    Examples:
        .. code-block:: python

          import paddle

816
          x = paddle.rand((2, 1, 3))
C
cnn 已提交
817
          batch_norm = paddle.nn.BatchNorm1D(1)
818 819
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
820
          print(batch_norm_out)
821 822
    """

823 824 825 826 827 828 829 830 831 832 833
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCL',
        use_global_stats=None,
        name=None,
    ):
834
        super().__init__(
835 836 837 838 839 840 841 842 843
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            use_global_stats,
            name,
        )
C
ceci3 已提交
844

845 846 847
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NC' or input == 'NCL':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
848 849
        elif input == "NHWC" or input == 'NLC':
            self._data_format = "NHWC"
850
        else:
F
Feiyu Chan 已提交
851
            raise ValueError(
852 853
                'expected NC , NCL, NLC or None for data_format input'
            )
854

855 856
    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
857 858 859 860 861
            raise ValueError(
                'expected 2D or 3D input (got {}D input)'.format(
                    len(input.shape)
                )
            )
862 863


C
cnn 已提交
864
class BatchNorm2D(_BatchNormBase):
865
    r"""
866 867
    Applies Batch Normalization over a 4D input (a mini-batch of 2D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

868 869
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
870 871 872 873
    Calculated as follows:

    ..  math::

874 875
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//
        \ mini-batch\ mean \\
876
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i -
877
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
878

879 880
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
881 882 883 884
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
885 886
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
887 888 889 890 891

    The normalization function formula is as follows:

    ..  math::

892 893
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
894

895 896 897
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
898 899 900 901 902 903 904

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
905
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
906
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
907 908
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
909
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
910
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
F
Feiyu Chan 已提交
911
        data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
C
ceci3 已提交
912
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
913 914 915
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
916 917
        - x: 4-D tensor with shape: (batch, num_features, height, weight) when data_format is "NCHW",
            or (batch, height, weight, num_features) when data_format is "NHWC".
918 919 920 921 922 923 924 925 926 927
        - output: 4-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

928
          x = paddle.rand((2, 1, 2, 3))
C
cnn 已提交
929
          batch_norm = paddle.nn.BatchNorm2D(1)
930 931
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
932
          print(batch_norm_out)
933 934
    """

935
    def _check_data_format(self, input):
936
        if input == 'NCHW':
937
            self._data_format = input
F
Feiyu Chan 已提交
938 939
        elif input == "NHWC":
            self._data_format = input
940
        else:
F
Feiyu Chan 已提交
941
            raise ValueError('expected NCHW or NHWC for data_format input')
942

943 944
    def _check_input_dim(self, input):
        if len(input.shape) != 4:
945 946 947
            raise ValueError(
                'expected 4D input (got {}D input)'.format(len(input.shape))
            )
948 949


C
cnn 已提交
950
class BatchNorm3D(_BatchNormBase):
951
    r"""
952 953
    Applies Batch Normalization over a 5D input (a mini-batch of 3D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

954 955
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
956 957 958 959
    Calculated as follows:

    ..  math::

960 961 962 963
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
964

C
ceci3 已提交
965
    When use_global_stats = True, the :math:`\\mu_{\\beta}`
966 967 968 969 970
    and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
971 972
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
973 974 975 976 977

    The normalization function formula is as follows:

    ..  math::

978 979
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
980

981 982 983
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
984 985 986 987 988 989 990

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
991
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
992
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
993 994
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
995
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
996
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
F
Feiyu Chan 已提交
997
        data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW.
C
ceci3 已提交
998
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
999 1000 1001
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
1002 1003
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight) when data_format is "NCDHW",
            or (batch, dims, height, weight, num_features) when data_format is "NDHWC".
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
        - output: 5-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

1014
          x = paddle.rand((2, 1, 2, 2, 3))
C
cnn 已提交
1015
          batch_norm = paddle.nn.BatchNorm3D(1)
1016 1017
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
1018
          print(batch_norm_out)
1019 1020
    """

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCDHW',
        use_global_stats=None,
        name=None,
    ):
1032
        super().__init__(
1033 1034 1035 1036 1037 1038 1039 1040 1041
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            use_global_stats,
            name,
        )
C
ceci3 已提交
1042

1043 1044 1045
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NCDHW':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
1046 1047
        elif input == "NHWC" or input == "NDHWC":
            self._data_format = 'NHWC'
1048
        else:
F
Feiyu Chan 已提交
1049
            raise ValueError(
1050 1051
                'expected NCDHW, NDHWC or None for data_format input'
            )
1052

1053 1054
    def _check_input_dim(self, input):
        if len(input.shape) != 5:
1055 1056 1057
            raise ValueError(
                'expected 5D input (got {}D input)'.format(len(input.shape))
            )
1058 1059


1060
class SyncBatchNorm(_BatchNormBase):
1061
    r"""
1062

C
ceci3 已提交
1063
    This interface is used to construct a callable object of the ``SyncBatchNorm`` class.
1064 1065
    It implements the function of the Cross-GPU Synchronized Batch Normalization Layer, and can
    be used as a normalizer function for other operations, such as conv2d and fully connected
C
ceci3 已提交
1066 1067 1068 1069 1070 1071 1072
    operations.
    The data is normalized by the mean and variance of the channel based on whole mini-batch
    , which including data in all gpus.
    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.

1073
    When model in training mode, the :math:`\\mu_{\\beta}`
C
ceci3 已提交
1074 1075 1076 1077 1078
    and :math:`\\sigma_{\\beta}^{2}` are the statistics of whole mini-batch data in all gpus.
    Calculated as follows:

    ..  math::

1079 1080 1081 1082
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
C
ceci3 已提交
1083 1084 1085 1086 1087

    - :math:`x` : whole mini-batch data in all gpus
    - :math:`m` : the size of the whole mini-batch data

    When model in evaluation mode, the :math:`\\mu_{\\beta}`
1088
    and :math:`\sigma_{\beta}^{2}` are global statistics (moving_mean and moving_variance,
C
ceci3 已提交
1089 1090 1091
    which usually got from the pre-trained model). Global statistics calculated as follows:

    .. math::
1092 1093
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
C
ceci3 已提交
1094 1095

    The formula of normalization is as follows:
1096

C
ceci3 已提交
1097 1098
    ..  math::

1099 1100 1101
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
C
ceci3 已提交
1102

1103 1104
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable scale parameter vector
1105
    - :math:`\beta` : trainable shift parameter vector
C
ceci3 已提交
1106

1107
    Note:
1108 1109 1110
        If you want to use container to pack your model and has :ref:`api_paddle_nn_SyncBatchNorm` in the
        evaluation phase, please use :ref:`api_paddle_nn_LayerList` or :ref:`api_paddle_nn_Sequential` instead of
        :ref:`api_paddle_hub_list` to pack the model.
1111

C
ceci3 已提交
1112 1113 1114 1115 1116 1117 1118
    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
             of this layer. If it is set to None or one attribute of ParamAttr, this layerr
             will create ParamAttr as param_attr. If the Initializer of the param_attr
1119
             is not set, the parameter is initialized with ones. If it is set to False,
C
ceci3 已提交
1120 1121 1122 1123
             this layer will not have trainable scale parameter. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of this layer.
             If it is set to None or one attribute of ParamAttr, this layer
             will create ParamAttr as bias_attr. If the Initializer of the bias_attr
1124
             is not set, the bias is initialized zero. If it is set to False, this layer will not
C
ceci3 已提交
1125 1126 1127
             have trainable bias parameter. Default: None.

    Shapes:
1128 1129
        - input: Tensor that the dimension from 2 to 5.
        - output: Tensor with the same shape as input.
C
ceci3 已提交
1130 1131 1132 1133

    Examples:
        .. code-block:: python

1134
            # required: gpu
1135

1136 1137
            import paddle
            import paddle.nn as nn
C
ceci3 已提交
1138

1139
            x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
C
ceci3 已提交
1140

1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
            if paddle.is_compiled_with_cuda():
                sync_batch_norm = nn.SyncBatchNorm(2)
                hidden1 = sync_batch_norm(x)
                print(hidden1)
                # Tensor(shape=[1, 2, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
                #        [[[[ 0.26824948,  1.09363246],
                #           [ 0.26824948, -1.63013160]],

                #          [[ 0.80956620, -0.66528702],
                #           [-1.27446556,  1.13018656]]]])
1151

C
ceci3 已提交
1152 1153
    """

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        name=None,
    ):
1164
        super().__init__(
1165 1166 1167 1168 1169 1170 1171 1172 1173
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            None,
            name,
        )
C
ceci3 已提交
1174

C
ceci3 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
    def _check_data_format(self):
        if self._data_format in ['NCHW', 'NCDHW', 'NC', 'NCL']:
            self._data_format = 'NCHW'
        elif self._data_format in ["NHWC", "NDHWC", 'NLC']:
            self._data_format = 'NHWC'
        else:
            raise ValueError(
                'expected \'NCDHW\', \'NDHWC\', \'NCL\', \'NLC\', \'NC\', \'NCHW\', \'NHWC\' for data_format'
            )

C
ceci3 已提交
1185
    def forward(self, x):
C
ceci3 已提交
1186
        self._check_data_format()
C
ceci3 已提交
1187 1188 1189 1190 1191 1192
        # create output
        # mean and mean_out share the same memory
        mean_out = self._mean
        # variance and variance out share the same memory
        variance_out = self._variance

1193 1194
        # train mode: use mini-batch stats, eval mode: use global stats
        # use_global_stats only support False in sync_batch_norm
1195
        if in_dygraph_mode():
1196
            sync_batch_norm_out, _, _, _, _, _ = _C_ops.sync_batch_norm_(
1197 1198 1199
                x,
                self._mean,
                self._variance,
1200 1201 1202
                self.weight,
                self.bias,
                not self.training,
1203 1204 1205 1206 1207 1208
                self._momentum,
                self._epsilon,
                self._data_format,
                False,
                False,
            )
1209 1210 1211
            return sync_batch_norm_out

        elif in_dynamic_mode():
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
            attrs = (
                "momentum",
                self._momentum,
                "epsilon",
                self._epsilon,
                "is_test",
                not self.training,
                "data_layout",
                self._data_format,
                "use_mkldnn",
                False,
                "fuse_with_relu",
                False,
                "use_global_stats",
                False,
                'trainable_statistics',
                False,
            )
1230
            sync_batch_norm_out, _, _, _, _, _ = _legacy_C_ops.sync_batch_norm(
1231 1232 1233 1234 1235 1236 1237 1238 1239
                x,
                self.weight,
                self.bias,
                self._mean,
                self._variance,
                mean_out,
                variance_out,
                *attrs
            )
C
ceci3 已提交
1240 1241
            return sync_batch_norm_out

1242 1243 1244
        check_variable_and_dtype(
            x, 'input', ['float16', 'float32', 'float64'], 'SyncBatchNorm'
        )
C
ceci3 已提交
1245 1246 1247 1248 1249

        attrs = {
            "momentum": self._momentum,
            "epsilon": self._epsilon,
            "is_test": not self.training,
1250
            "data_layout": self._data_format,
C
ceci3 已提交
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
            "use_mkldnn": False,
            "fuse_with_relu": False,
            "use_global_stats": False,
            "trainable_statistics": False,
        }

        inputs = {
            "X": [x],
            "Scale": [self.weight],
            "Bias": [self.bias],
            "Mean": [self._mean],
1262
            "Variance": [self._variance],
C
ceci3 已提交
1263 1264 1265
        }

        saved_mean = self._helper.create_variable_for_type_inference(
1266 1267
            dtype=self._dtype, stop_gradient=True
        )
C
ceci3 已提交
1268
        saved_variance = self._helper.create_variable_for_type_inference(
1269 1270
            dtype=self._dtype, stop_gradient=True
        )
C
ceci3 已提交
1271
        sync_batch_norm_out = self._helper.create_variable_for_type_inference(
1272 1273
            self._dtype
        )
C
ceci3 已提交
1274 1275 1276 1277 1278 1279

        outputs = {
            "Y": [sync_batch_norm_out],
            "MeanOut": [mean_out],
            "VarianceOut": [variance_out],
            "SavedMean": [saved_mean],
1280
            "SavedVariance": [saved_variance],
C
ceci3 已提交
1281 1282
        }

1283 1284 1285
        self._helper.append_op(
            type="sync_batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
        )
C
ceci3 已提交
1286
        return sync_batch_norm_out
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300

    @classmethod
    def convert_sync_batchnorm(cls, layer):
        """
        Helper function to convert :class: `paddle.nn.BatchNorm*d` layers in the model to :class: `paddle.nn.SyncBatchNorm` layers.

        Parameters:
            layer(paddle.nn.Layer): model containing one or more `BatchNorm*d` layers.

        Returns:
            The original model with converted SyncBatchNorm layers. If BatchNorm*d layer in the model, use SyncBatchNorm layer instead.

        Examples:
            .. code-block:: python
1301

1302 1303 1304
                import paddle
                import paddle.nn as nn

C
cnn 已提交
1305
                model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5))
1306 1307 1308 1309 1310
                sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)

        """
        layer_output = layer
        if isinstance(layer, _BatchNormBase):
1311
            if (
1312
                layer._weight_attr is not None
1313
                and not isinstance(layer._weight_attr, bool)
1314
                and layer._weight_attr.name is not None
1315
            ):
C
ceci3 已提交
1316
                layer._weight_attr.name = layer._weight_attr.name + '_sync'
1317
            if (
1318
                layer._bias_attr is not None
1319
                and not isinstance(layer._bias_attr, bool)
1320
                and layer._bias_attr.name is not None
1321
            ):
C
ceci3 已提交
1322 1323
                layer._bias_attr.name = layer._bias_attr.name + '_sync'

1324 1325 1326 1327 1328 1329 1330 1331 1332
            layer_output = SyncBatchNorm(
                layer._num_features,
                layer._momentum,
                layer._epsilon,
                layer._weight_attr,
                layer._bias_attr,
                layer._data_format,
                layer._name,
            )
1333

1334 1335 1336 1337
            if (
                layer._weight_attr is not False
                and layer._bias_attr is not False
            ):
1338 1339 1340 1341 1342 1343
                with no_grad():
                    layer_output.weight = layer.weight
                    layer_output.bias = layer.bias
            layer_output._mean = layer._mean
            layer_output._variance = layer._variance

C
ceci3 已提交
1344
        for name, sublayer in layer.named_children():
1345 1346 1347
            layer_output.add_sublayer(
                name, cls.convert_sync_batchnorm(sublayer)
            )
1348 1349
        del layer
        return layer_output
1350 1351


Z
zhiboniu 已提交
1352
class LocalResponseNorm(Layer):
1353
    """
1354 1355
    Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions.
    For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
1356

1357
    See more details in :ref:`api_paddle_nn_functional_local_response_norm` .
1358

1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
    Parameters:
        size (int): The number of channels to sum over.
        alpha (float, optional): The scaling parameter, positive. Default:1e-4
        beta (float, optional): The exponent, positive. Default:0.75
        k (float, optional): An offset, positive. Default: 1.0
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:
            If input is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`,
            the data is stored in the order of: `[batch_size, input_channels, feature_length]`.
            If input is 4-D Tensor, the string could be  `"NCHW"`, `"NHWC"`. When it is `"NCHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`.
            If input is 5-D Tensor, the string could be  `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name (str, optional): Name for the operation (optional, default is None). For more information,
            please refer to :ref:`api_guide_Name`.
1374

1375 1376 1377
    Shape:
        - input: 3-D/4-D/5-D tensor.
        - output: 3-D/4-D/5-D tensor, the same shape as input.
1378

1379
    Examples:
1380

1381
    .. code-block:: python
1382

1383 1384 1385 1386 1387 1388 1389
        import paddle

        x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32")
        m = paddle.nn.LocalResponseNorm(size=5)
        y = m(x)
        print(y.shape)  # [3, 3, 112, 112]
    """
1390

1391 1392 1393 1394 1395 1396 1397 1398 1399
    def __init__(
        self,
        size,
        alpha=0.0001,
        beta=0.75,
        k=1.0,
        data_format="NCHW",
        name=None,
    ):
1400
        super().__init__()
1401 1402 1403 1404 1405 1406 1407 1408
        self.size = size
        self.alpha = alpha
        self.beta = beta
        self.k = k
        self.data_format = data_format
        self.name = name

    def forward(self, input):
1409 1410 1411 1412 1413 1414 1415 1416 1417
        out = F.local_response_norm(
            input,
            self.size,
            self.alpha,
            self.beta,
            self.k,
            self.data_format,
            self.name,
        )
1418
        return out
1419 1420 1421

    def extra_repr(self):
        main_str = 'size={}, alpha={}, beta={}, k={}'.format(
1422 1423
            self.size, self.alpha, self.beta, self.k
        )
1424
        if self.data_format != 'NCHW':
1425 1426 1427 1428
            main_str += ', data_format={}'.format(self.data_format)
        if self.name is not None:
            main_str += ', name={}'.format(self.name)
        return main_str