norm.py 54.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19 20 21 22 23 24 25 26 27
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

28
# TODO: define normalization api
29

Z
zhiboniu 已提交
30 31
from ...fluid.dygraph import BatchNorm  # noqa: F401
from ...fluid.dygraph import SpectralNorm  # noqa: F401
C
ceci3 已提交
32

33
from ...framework import get_default_dtype
C
ceci3 已提交
34

Z
zhiboniu 已提交
35 36
from ..initializer import Constant
from ...framework import ParamAttr
37
from ...fluid.data_feeder import check_variable_and_dtype
38 39 40 41 42 43

from ..functional import batch_norm, layer_norm, instance_norm

import numpy as np
import numbers
import warnings
Z
zhiboniu 已提交
44
from ...framework import no_grad
45
from .. import functional as F
46
from paddle import _C_ops, _legacy_C_ops
Z
zhiboniu 已提交
47
from .. import Layer
Z
zhiboniu 已提交
48
from paddle import in_dynamic_mode
49
from paddle.device import get_all_custom_device_type
50
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
51

52 53
__all__ = []

C
ceci3 已提交
54

Z
zhiboniu 已提交
55
class _InstanceNormBase(Layer):
56
    """
57
    This class is based class for InstanceNorm1D, 2d, 3d.
58

C
cnn 已提交
59
    See InstaceNorm1D, InstanceNorm2D or InstanceNorm3D for more details.
60 61
    """

62 63 64 65 66 67 68 69 70 71
    def __init__(
        self,
        num_features,
        epsilon=1e-5,
        momentum=0.9,
        weight_attr=None,
        bias_attr=None,
        data_format="NCHW",
        name=None,
    ):
72
        super().__init__()
73

74
        if weight_attr is False or bias_attr is False:
75 76
            assert (
                weight_attr == bias_attr
77
            ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm"
78 79 80
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
81
        self._num_features = num_features
82

83
        if weight_attr is not False and bias_attr is not False:
84 85 86 87
            self.scale = self.create_parameter(
                attr=self._weight_attr,
                shape=[num_features],
                default_initializer=Constant(1.0),
88 89 90 91 92 93 94 95
                is_bias=False,
            )
            self.bias = self.create_parameter(
                attr=self._bias_attr,
                shape=[num_features],
                default_initializer=Constant(0.0),
                is_bias=True,
            )
96 97 98 99 100 101 102 103 104 105
        else:
            self.scale = None
            self.bias = None

    def _check_input_dim(self, input):
        raise NotImplementedError("InstanceNorm Base error")

    def forward(self, input):
        self._check_input_dim(input)

106 107 108
        return instance_norm(
            input, weight=self.scale, bias=self.bias, eps=self._epsilon
        )
109

110
    def extra_repr(self):
111 112 113
        return 'num_features={}, epsilon={}'.format(
            self._num_features, self._epsilon
        )
114

115

C
cnn 已提交
116
class InstanceNorm1D(_InstanceNormBase):
117
    r"""
118
    Create a callable object of `InstanceNorm1D`. Applies Instance Normalization over a 3D input (a mini-batch of 1D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
119 120 121 122 123 124

    DataLayout: NCL `[batch, in_channels, length]`

    :math:`input` is the input features over a mini-batch.

    ..  math::
125

126 127 128 129 130 131 132
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
133

134
Where `H` means height of feature map, `W` means width of feature map.
135 136 137 138 139 140 141

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
142 143 144 145
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
146
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
147 148 149 150
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
            If it is set to False, will not create bias_attr. Default: None.
151
        data_format(str, optional): Specify the input data format, may be "NC", "NCL". Default "NCL".
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..


    Shape:
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length).
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

          import paddle

169
          x = paddle.rand((2, 2, 3))
C
cnn 已提交
170
          instance_norm = paddle.nn.InstanceNorm1D(2)
171 172
          instance_norm_out = instance_norm(x)

Z
zhang wenhui 已提交
173
          print(instance_norm_out)
174 175 176 177 178

    """

    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
179 180 181 182 183
            raise ValueError(
                'expected 2D or 3D input (got {}D input)'.format(
                    len(input.shape)
                )
            )
184 185


C
cnn 已提交
186
class InstanceNorm2D(_InstanceNormBase):
187
    r"""
188
    Create a callable object of `InstanceNorm2D`. Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
189 190 191 192 193 194 195

    DataLayout: NCHW `[batch, in_channels, in_height, in_width]`


    :math:`input` is the input features over a mini-batch.

    ..  math::
196

197 198 199 200 201 202 203
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
204

205
Where `H` means height of feature map, `W` means width of feature map.
206 207 208 209 210 211 212

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
213 214 215 216
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
217
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
218 219 220 221
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
    `       If it is set to False, will not create bias_attr. Default: None.
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
        data_format(str, optional): Specify the input data format, could be "NCHW". Default: NCHW.
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 4-D tensor with shape: (batch, num_features, height, weight).
        - output: 4-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

237
            import paddle
238

239 240 241
            x = paddle.rand((2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm2D(2)
            instance_norm_out = instance_norm(x)
242

243
            print(instance_norm_out)
244 245 246 247
    """

    def _check_input_dim(self, input):
        if len(input.shape) != 4:
248 249 250
            raise ValueError(
                'expected 4D input (got {}D input)'.format(len(input.shape))
            )
251 252


C
cnn 已提交
253
class InstanceNorm3D(_InstanceNormBase):
254
    r"""
255
    Create a callable object of `InstanceNorm3D`. Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
256 257 258 259 260 261 262

    DataLayout: NCHW `[batch, in_channels, D, in_height, in_width]`


    :math:`input` is the input features over a mini-batch.

    ..  math::
263

264 265 266 267 268 269 270
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
271

272
Where `H` means height of feature map, `W` means width of feature map.
273 274 275 276 277 278 279

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
280 281 282 283
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
284
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
285 286 287 288
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
            If it is set to False, will not create bias_attr. Default: None.
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
        data_format(str, optional): Specify the input data format, could be "NCDHW". Default: NCDHW.
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight).
        - output: 5-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

304
            import paddle
305

306 307 308
            x = paddle.rand((2, 2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm3D(2)
            instance_norm_out = instance_norm(x)
309

310
            print(instance_norm_out.numpy)
311 312 313 314
    """

    def _check_input_dim(self, input):
        if len(input.shape) != 5:
315 316 317
            raise ValueError(
                'expected 5D input (got {}D input)'.format(len(input.shape))
            )
318 319


Z
zhiboniu 已提交
320
class GroupNorm(Layer):
321
    """
322

323 324 325 326 327 328 329
    This interface is used to construct a callable object of the ``GroupNorm`` class.
    For more details, refer to code examples.
    It implements the function of the Group Normalization Layer.
    Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .

    Parameters:
        num_groups(int): The number of groups that divided from channels.
330
        num_channels(int): The number of channels of input.
331
        epsilon(float, optional): The small value added to the variance to prevent
332
            division by zero. Default: 1e-05.
333
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
334 335
            scale :math:`g`. If it is set to False, no scale will be added to the output units.
            If it is set to None, the bias is initialized one. Default: None.
336
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
337 338
            bias :math:`b`. If it is set to False, no bias will be added to the output units.
            If it is set to None, the bias is initialized zero. Default: None.
339 340 341 342
        data_format(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
        name(str, optional): Name for the GroupNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
343
        - x: Tensor with shape: attr:`(batch, num_features, *)`.
344
        - output: The same shape as input x.
345 346 347 348 349 350

    Returns:
        None

    Examples:
        .. code-block:: python
Z
zhang wenhui 已提交
351

352
            import paddle
353

354
            x = paddle.arange(48, dtype="float32").reshape((2, 6, 2, 2))
355 356
            group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
            group_norm_out = group_norm(x)
357

358
            print(group_norm_out)
359 360
    """

361 362 363 364 365 366 367 368 369 370
    def __init__(
        self,
        num_groups,
        num_channels,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        name=None,
    ):
371
        super().__init__()
372 373 374 375 376
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self._epsilon = epsilon
        self._num_channels = num_channels
        self._num_groups = num_groups
377
        if data_format not in ['NCHW', 'NHWC']:
378
            raise ValueError("unsupported data layout:" + data_format)
379
        self._data_format = data_format
380 381 382

        param_shape = [self._num_channels]

383
        if weight_attr is False:
384
            self.weight = self.create_parameter(
385 386
                attr=None, shape=param_shape, default_initializer=Constant(1.0)
            )
387 388 389 390 391
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
392 393 394
                default_initializer=Constant(1.0),
            )
            self.weight.stop_gradient = (
395
                self._weight_attr is not None
396 397
                and self._weight_attr.learning_rate == 0.0
            )
398

399
        if bias_attr is False:
400 401 402 403 404 405
            self.bias = self.create_parameter(
                attr=None,
                shape=param_shape,
                default_initializer=Constant(0.0),
                is_bias=True,
            )
406 407
            self.bias.stop_gradient = True
        else:
408 409 410 411
            self.bias = self.create_parameter(
                attr=self._bias_attr, shape=param_shape, is_bias=True
            )
            self.bias.stop_gradient = (
412 413
                self._bias_attr is not None
                and self._bias_attr.learning_rate == 0.0
414
            )
415 416

    def forward(self, input):
417
        if in_dygraph_mode():
418
            return _C_ops.group_norm(
419 420 421 422 423
                input,
                self.weight,
                self.bias,
                self._epsilon,
                self._num_groups,
424
                self._data_format,
425
            )
426

427 428 429 430 431 432
        mean_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True
        )
        variance_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True
        )
433

434
        if _in_legacy_dygraph():
435
            pre_act, _, _ = _legacy_C_ops.group_norm(
436 437 438 439 440 441 442 443
                input,
                self.weight,
                self.bias,
                mean_out,
                variance_out,
                'epsilon',
                self._epsilon,
                'groups',
444 445
                self._num_groups,
            )
446
            return pre_act
447

448 449 450 451 452 453 454 455
        inputs = {'X': input}
        if self.bias is not None:
            inputs['Bias'] = self.bias
        if self.weight is not None:
            inputs['Scale'] = self.weight

        # create output
        group_norm_out = self._helper.create_variable_for_type_inference(
456 457 458 459 460 461 462 463 464 465 466 467 468
            dtype=input.dtype
        )

        self._helper.append_op(
            type="group_norm",
            inputs=inputs,
            outputs={
                "Y": group_norm_out,
                "Mean": mean_out,
                "Variance": variance_out,
            },
            attrs={"epsilon": self._epsilon, "groups": self._num_groups},
        )
469 470 471

        return self._helper.append_activation(group_norm_out, None)

472 473
    def extra_repr(self):
        return 'num_groups={}, num_channels={}, epsilon={}'.format(
474 475
            self._num_groups, self._num_channels, self._epsilon
        )
476

477

Z
zhiboniu 已提交
478
class LayerNorm(Layer):
479
    r"""
480
    Construct a callable object of the ``LayerNorm`` class.
481 482 483 484 485 486 487 488
    For more details, refer to code examples.
    It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
    Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_

    The formula is as follows:

    ..  math::

489
        \mu & = \frac{1}{H}\sum_{i=1}^{H} x_i
490

491
        \sigma & = \sqrt{\frac{1}{H}\sum_{i=1}^{H}{(x_i - \mu)^2} + \epsilon}
492

493
        y & = f(\frac{g}{\sigma}(x - \mu) + b)
494 495 496

    - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
    - :math:`H`: the number of hidden units in a layers
497
    - :math:`\epsilon`: the small value added to the variance to prevent division by zero.
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
    - :math:`g`: the trainable scale parameter.
    - :math:`b`: the trainable bias parameter.

    Parameters:
        normalized_shape(int|list|tuple): Input shape from an expected input of
            size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
            If it is a single integer, this module will normalize over the last dimension
            which is expected to be of that specific size.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero. Default: 1e-05.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            gain :math:`g`. If False, weight is None. If is None, a default :code:`ParamAttr` would be added as scale. The
            :attr:`param_attr` is initialized as 1 if it is added. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            bias :math:`b`. If is False, bias is None. If is None, a default :code:`ParamAttr` would be added as bias. The
            :attr:`bias_attr` is initialized as 0 if it is added. Default: None.
        name(str, optional): Name for the LayerNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 2-D, 3-D, 4-D or 5-D tensor.
        - output: same shape as input x.

    Returns:
        None

    Examples:

        .. code-block:: python

          import paddle

529 530
          x = paddle.rand((2, 2, 2, 3))
          layer_norm = paddle.nn.LayerNorm(x.shape[1:])
531 532
          layer_norm_out = layer_norm(x)

Z
zhang wenhui 已提交
533
          print(layer_norm_out)
534 535
    """

536 537 538 539 540 541 542 543
    def __init__(
        self,
        normalized_shape,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        name=None,
    ):
544
        super().__init__()
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = [normalized_shape]

        self._normalized_shape = list(normalized_shape)
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        param_shape = [np.prod(self._normalized_shape)]

        if weight_attr is False:
            self.weight = None
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
560 561
                default_initializer=Constant(1.0),
            )
562 563 564 565

        if bias_attr is False:
            self.bias = None
        else:
566 567 568
            self.bias = self.create_parameter(
                attr=self._bias_attr, shape=param_shape, is_bias=True
            )
569 570

    def forward(self, input):
571 572 573 574 575 576 577
        return layer_norm(
            input,
            normalized_shape=self._normalized_shape,
            weight=self.weight,
            bias=self.bias,
            epsilon=self._epsilon,
        )
578

579
    def extra_repr(self):
580 581 582
        return 'normalized_shape={}, epsilon={}'.format(
            self._normalized_shape, self._epsilon
        )
583

584

Z
zhiboniu 已提交
585
class _BatchNormBase(Layer):
586 587 588 589
    """
    BatchNorm base .
    """

590 591 592 593 594 595 596 597 598 599 600
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        use_global_stats=None,
        name=None,
    ):
601
        super().__init__()
602 603 604
        self._num_features = num_features
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
C
ceci3 已提交
605
        self._use_global_stats = use_global_stats
606 607

        if get_default_dtype() == 'float16':
G
Guoxia Wang 已提交
608 609 610
            self._dtype = 'float32'
        else:
            self._dtype = get_default_dtype()
611 612 613 614

        param_shape = [num_features]

        # create parameter
615
        if weight_attr is False:
616
            self.weight = self.create_parameter(
G
Guoxia Wang 已提交
617 618 619
                attr=None,
                shape=param_shape,
                dtype=self._dtype,
620 621
                default_initializer=Constant(1.0),
            )
622 623 624 625 626
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
G
Guoxia Wang 已提交
627
                dtype=self._dtype,
628 629 630
                default_initializer=Constant(1.0),
            )
            self.weight.stop_gradient = (
631
                self._weight_attr is not None
632 633
                and self._weight_attr.learning_rate == 0.0
            )
634

635
        if bias_attr is False:
636 637 638 639 640 641 642
            self.bias = self.create_parameter(
                attr=None,
                shape=param_shape,
                dtype=self._dtype,
                default_initializer=Constant(0.0),
                is_bias=True,
            )
643 644
            self.bias.stop_gradient = True
        else:
645 646 647 648 649 650 651
            self.bias = self.create_parameter(
                attr=self._bias_attr,
                shape=param_shape,
                dtype=self._dtype,
                is_bias=True,
            )
            self.bias.stop_gradient = (
652 653
                self._bias_attr is not None
                and self._bias_attr.learning_rate == 0.0
654
            )
655 656 657 658 659 660 661 662

        moving_mean_name = None
        moving_variance_name = None

        if name is not None:
            moving_mean_name = name + "_mean"
            moving_variance_name = name + "_variance"

663 664 665 666 667 668 669 670 671 672
        self._mean = self.create_parameter(
            dtype=self._dtype,
            attr=ParamAttr(
                name=moving_mean_name,
                initializer=Constant(0.0),
                trainable=False,
                do_model_average=True,
            ),
            shape=param_shape,
        )
673 674
        self._mean.stop_gradient = True

675 676 677 678 679 680 681 682 683 684
        self._variance = self.create_parameter(
            dtype=self._dtype,
            attr=ParamAttr(
                name=moving_variance_name,
                initializer=Constant(1.0),
                trainable=False,
                do_model_average=True,
            ),
            shape=param_shape,
        )
685 686
        self._variance.stop_gradient = True

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
        # TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op
        if 'npu' in get_all_custom_device_type():
            with no_grad():
                weight_trans = _C_ops.npu_identity(
                    self.weight, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                bias_trans = _C_ops.npu_identity(
                    self.bias, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                mean_trans = _C_ops.npu_identity(
                    self._mean, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                var_trans = _C_ops.npu_identity(
                    self._variance, 3
                )  # ACL_FORMAT_NC1HWC0 = 3
                weight_trans._share_underline_tensor_to(self.weight)
                bias_trans._share_underline_tensor_to(self.bias)
                mean_trans._share_underline_tensor_to(self._mean)
                var_trans._share_underline_tensor_to(self._variance)

707 708 709 710 711
        self._data_format = data_format
        self._in_place = False
        self._momentum = momentum
        self._epsilon = epsilon
        self._fuse_with_relu = False
712
        self._name = name
713 714 715 716

    def _check_input_dim(self, input):
        raise NotImplementedError("BatchNorm Base error")

717 718 719
    def _check_data_format(self, input):
        raise NotImplementedError("BatchNorm Base data format error")

720 721
    def forward(self, input):

722 723
        self._check_data_format(self._data_format)

724 725
        self._check_input_dim(input)

726
        if self.training:
727
            warnings.warn(
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
                "When training, we now always track global mean and variance."
            )

        return batch_norm(
            input,
            self._mean,
            self._variance,
            weight=self.weight,
            bias=self.bias,
            training=self.training,
            momentum=self._momentum,
            epsilon=self._epsilon,
            data_format=self._data_format,
            use_global_stats=self._use_global_stats,
        )
743

744 745
    def extra_repr(self):
        main_str = 'num_features={}, momentum={}, epsilon={}'.format(
746 747
            self._num_features, self._momentum, self._epsilon
        )
748
        if self._data_format != 'NCHW':
749 750 751 752 753
            main_str += ', data_format={}'.format(self._data_format)
        if self._name is not None:
            main_str += ', name={}'.format(self._name)
        return main_str

754

C
cnn 已提交
755
class BatchNorm1D(_BatchNormBase):
756
    r"""
757 758
    Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

759 760
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
761 762 763 764
    Calculated as follows:

    ..  math::

765 766 767 768
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
769

770 771
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
772 773 774 775
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
776 777
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
778 779 780 781 782

    The normalization function formula is as follows:

    ..  math::

783 784
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
785

786 787 788
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
789 790 791 792 793 794 795

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
796
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
797
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
798 799
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
800
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
801
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
802
        data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
C
ceci3 已提交
803
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
804 805 806
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
807 808
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length) when data_format is "NC" or "NCL",
            (batch, length, num_features) when data_format is "NLC".
809 810 811 812
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.
813

814 815 816 817 818 819

    Examples:
        .. code-block:: python

          import paddle

820
          x = paddle.rand((2, 1, 3))
C
cnn 已提交
821
          batch_norm = paddle.nn.BatchNorm1D(1)
822 823
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
824
          print(batch_norm_out)
825 826
    """

827 828 829 830 831 832 833 834 835 836 837
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCL',
        use_global_stats=None,
        name=None,
    ):
838
        super().__init__(
839 840 841 842 843 844 845 846 847
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            use_global_stats,
            name,
        )
C
ceci3 已提交
848

849 850 851
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NC' or input == 'NCL':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
852 853
        elif input == "NHWC" or input == 'NLC':
            self._data_format = "NHWC"
854
        else:
F
Feiyu Chan 已提交
855
            raise ValueError(
856 857
                'expected NC , NCL, NLC or None for data_format input'
            )
858

859 860
    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
861 862 863 864 865
            raise ValueError(
                'expected 2D or 3D input (got {}D input)'.format(
                    len(input.shape)
                )
            )
866 867


C
cnn 已提交
868
class BatchNorm2D(_BatchNormBase):
869
    r"""
870 871
    Applies Batch Normalization over a 4D input (a mini-batch of 2D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

872 873
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
874 875 876 877
    Calculated as follows:

    ..  math::

878 879
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//
        \ mini-batch\ mean \\
880
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i -
881
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
882

883 884
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
885 886 887 888
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
889 890
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
891 892 893 894 895

    The normalization function formula is as follows:

    ..  math::

896 897
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
898

899 900 901
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
902 903 904 905 906 907 908

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
909
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
910
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
911 912
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
913
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
914
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
F
Feiyu Chan 已提交
915
        data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
C
ceci3 已提交
916
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
917 918 919
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
920 921
        - x: 4-D tensor with shape: (batch, num_features, height, weight) when data_format is "NCHW",
            or (batch, height, weight, num_features) when data_format is "NHWC".
922 923 924 925 926 927 928 929 930 931
        - output: 4-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

932
          x = paddle.rand((2, 1, 2, 3))
C
cnn 已提交
933
          batch_norm = paddle.nn.BatchNorm2D(1)
934 935
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
936
          print(batch_norm_out)
937 938
    """

939
    def _check_data_format(self, input):
940
        if input == 'NCHW':
941
            self._data_format = input
F
Feiyu Chan 已提交
942 943
        elif input == "NHWC":
            self._data_format = input
944
        else:
F
Feiyu Chan 已提交
945
            raise ValueError('expected NCHW or NHWC for data_format input')
946

947 948
    def _check_input_dim(self, input):
        if len(input.shape) != 4:
949 950 951
            raise ValueError(
                'expected 4D input (got {}D input)'.format(len(input.shape))
            )
952 953


C
cnn 已提交
954
class BatchNorm3D(_BatchNormBase):
955
    r"""
956 957
    Applies Batch Normalization over a 5D input (a mini-batch of 3D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

958 959
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
960 961 962 963
    Calculated as follows:

    ..  math::

964 965 966 967
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
968

C
ceci3 已提交
969
    When use_global_stats = True, the :math:`\\mu_{\\beta}`
970 971 972 973 974
    and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
975 976
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
977 978 979 980 981

    The normalization function formula is as follows:

    ..  math::

982 983
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
984

985 986 987
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
988 989 990 991 992 993 994

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
995
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
996
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
997 998
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
999
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
1000
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
F
Feiyu Chan 已提交
1001
        data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW.
C
ceci3 已提交
1002
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
1003 1004 1005
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
1006 1007
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight) when data_format is "NCDHW",
            or (batch, dims, height, weight, num_features) when data_format is "NDHWC".
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
        - output: 5-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

1018
          x = paddle.rand((2, 1, 2, 2, 3))
C
cnn 已提交
1019
          batch_norm = paddle.nn.BatchNorm3D(1)
1020 1021
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
1022
          print(batch_norm_out)
1023 1024
    """

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCDHW',
        use_global_stats=None,
        name=None,
    ):
1036
        super().__init__(
1037 1038 1039 1040 1041 1042 1043 1044 1045
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            use_global_stats,
            name,
        )
C
ceci3 已提交
1046

1047 1048 1049
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NCDHW':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
1050 1051
        elif input == "NHWC" or input == "NDHWC":
            self._data_format = 'NHWC'
1052
        else:
F
Feiyu Chan 已提交
1053
            raise ValueError(
1054 1055
                'expected NCDHW, NDHWC or None for data_format input'
            )
1056

1057 1058
    def _check_input_dim(self, input):
        if len(input.shape) != 5:
1059 1060 1061
            raise ValueError(
                'expected 5D input (got {}D input)'.format(len(input.shape))
            )
1062 1063


1064
class SyncBatchNorm(_BatchNormBase):
1065
    r"""
1066

C
ceci3 已提交
1067
    This interface is used to construct a callable object of the ``SyncBatchNorm`` class.
1068 1069
    It implements the function of the Cross-GPU Synchronized Batch Normalization Layer, and can
    be used as a normalizer function for other operations, such as conv2d and fully connected
C
ceci3 已提交
1070 1071 1072 1073 1074 1075 1076
    operations.
    The data is normalized by the mean and variance of the channel based on whole mini-batch
    , which including data in all gpus.
    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.

1077
    When model in training mode, the :math:`\\mu_{\\beta}`
C
ceci3 已提交
1078 1079 1080 1081 1082
    and :math:`\\sigma_{\\beta}^{2}` are the statistics of whole mini-batch data in all gpus.
    Calculated as follows:

    ..  math::

1083 1084 1085 1086
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
C
ceci3 已提交
1087 1088 1089 1090 1091

    - :math:`x` : whole mini-batch data in all gpus
    - :math:`m` : the size of the whole mini-batch data

    When model in evaluation mode, the :math:`\\mu_{\\beta}`
1092
    and :math:`\sigma_{\beta}^{2}` are global statistics (moving_mean and moving_variance,
C
ceci3 已提交
1093 1094 1095
    which usually got from the pre-trained model). Global statistics calculated as follows:

    .. math::
1096 1097
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
C
ceci3 已提交
1098 1099

    The formula of normalization is as follows:
1100

C
ceci3 已提交
1101 1102
    ..  math::

1103 1104 1105
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
C
ceci3 已提交
1106

1107 1108
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable scale parameter vector
1109
    - :math:`\beta` : trainable shift parameter vector
C
ceci3 已提交
1110

1111
    Note:
1112 1113 1114
        If you want to use container to pack your model and has :ref:`api_paddle_nn_SyncBatchNorm` in the
        evaluation phase, please use :ref:`api_paddle_nn_LayerList` or :ref:`api_paddle_nn_Sequential` instead of
        :ref:`api_paddle_hub_list` to pack the model.
1115

C
ceci3 已提交
1116 1117 1118 1119 1120 1121 1122
    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
             of this layer. If it is set to None or one attribute of ParamAttr, this layerr
             will create ParamAttr as param_attr. If the Initializer of the param_attr
1123
             is not set, the parameter is initialized with ones. If it is set to False,
C
ceci3 已提交
1124 1125 1126 1127
             this layer will not have trainable scale parameter. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of this layer.
             If it is set to None or one attribute of ParamAttr, this layer
             will create ParamAttr as bias_attr. If the Initializer of the bias_attr
1128
             is not set, the bias is initialized zero. If it is set to False, this layer will not
C
ceci3 已提交
1129 1130 1131
             have trainable bias parameter. Default: None.

    Shapes:
1132 1133
        - input: Tensor that the dimension from 2 to 5.
        - output: Tensor with the same shape as input.
C
ceci3 已提交
1134 1135 1136 1137

    Examples:
        .. code-block:: python

1138
            # required: gpu
1139

1140 1141
            import paddle
            import paddle.nn as nn
C
ceci3 已提交
1142

1143
            x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
C
ceci3 已提交
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
            if paddle.is_compiled_with_cuda():
                sync_batch_norm = nn.SyncBatchNorm(2)
                hidden1 = sync_batch_norm(x)
                print(hidden1)
                # Tensor(shape=[1, 2, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
                #        [[[[ 0.26824948,  1.09363246],
                #           [ 0.26824948, -1.63013160]],

                #          [[ 0.80956620, -0.66528702],
                #           [-1.27446556,  1.13018656]]]])
1155

C
ceci3 已提交
1156 1157
    """

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
    def __init__(
        self,
        num_features,
        momentum=0.9,
        epsilon=1e-05,
        weight_attr=None,
        bias_attr=None,
        data_format='NCHW',
        name=None,
    ):
1168
        super().__init__(
1169 1170 1171 1172 1173 1174 1175 1176 1177
            num_features,
            momentum,
            epsilon,
            weight_attr,
            bias_attr,
            data_format,
            None,
            name,
        )
C
ceci3 已提交
1178

C
ceci3 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
    def _check_data_format(self):
        if self._data_format in ['NCHW', 'NCDHW', 'NC', 'NCL']:
            self._data_format = 'NCHW'
        elif self._data_format in ["NHWC", "NDHWC", 'NLC']:
            self._data_format = 'NHWC'
        else:
            raise ValueError(
                'expected \'NCDHW\', \'NDHWC\', \'NCL\', \'NLC\', \'NC\', \'NCHW\', \'NHWC\' for data_format'
            )

C
ceci3 已提交
1189
    def forward(self, x):
C
ceci3 已提交
1190
        self._check_data_format()
C
ceci3 已提交
1191 1192 1193 1194 1195 1196
        # create output
        # mean and mean_out share the same memory
        mean_out = self._mean
        # variance and variance out share the same memory
        variance_out = self._variance

1197 1198
        # train mode: use mini-batch stats, eval mode: use global stats
        # use_global_stats only support False in sync_batch_norm
1199
        if in_dygraph_mode():
1200
            sync_batch_norm_out, _, _, _, _, _ = _C_ops.sync_batch_norm_(
1201 1202 1203
                x,
                self._mean,
                self._variance,
1204 1205 1206
                self.weight,
                self.bias,
                not self.training,
1207 1208 1209 1210 1211 1212
                self._momentum,
                self._epsilon,
                self._data_format,
                False,
                False,
            )
1213 1214 1215
            return sync_batch_norm_out

        elif in_dynamic_mode():
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
            attrs = (
                "momentum",
                self._momentum,
                "epsilon",
                self._epsilon,
                "is_test",
                not self.training,
                "data_layout",
                self._data_format,
                "use_mkldnn",
                False,
                "fuse_with_relu",
                False,
                "use_global_stats",
                False,
                'trainable_statistics',
                False,
            )
1234
            sync_batch_norm_out, _, _, _, _, _ = _legacy_C_ops.sync_batch_norm(
1235 1236 1237 1238 1239 1240 1241 1242 1243
                x,
                self.weight,
                self.bias,
                self._mean,
                self._variance,
                mean_out,
                variance_out,
                *attrs
            )
C
ceci3 已提交
1244 1245
            return sync_batch_norm_out

1246 1247 1248
        check_variable_and_dtype(
            x, 'input', ['float16', 'float32', 'float64'], 'SyncBatchNorm'
        )
C
ceci3 已提交
1249 1250 1251 1252 1253

        attrs = {
            "momentum": self._momentum,
            "epsilon": self._epsilon,
            "is_test": not self.training,
1254
            "data_layout": self._data_format,
C
ceci3 已提交
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
            "use_mkldnn": False,
            "fuse_with_relu": False,
            "use_global_stats": False,
            "trainable_statistics": False,
        }

        inputs = {
            "X": [x],
            "Scale": [self.weight],
            "Bias": [self.bias],
            "Mean": [self._mean],
1266
            "Variance": [self._variance],
C
ceci3 已提交
1267 1268 1269
        }

        saved_mean = self._helper.create_variable_for_type_inference(
1270 1271
            dtype=self._dtype, stop_gradient=True
        )
C
ceci3 已提交
1272
        saved_variance = self._helper.create_variable_for_type_inference(
1273 1274
            dtype=self._dtype, stop_gradient=True
        )
C
ceci3 已提交
1275
        sync_batch_norm_out = self._helper.create_variable_for_type_inference(
1276 1277
            self._dtype
        )
C
ceci3 已提交
1278 1279 1280 1281 1282 1283

        outputs = {
            "Y": [sync_batch_norm_out],
            "MeanOut": [mean_out],
            "VarianceOut": [variance_out],
            "SavedMean": [saved_mean],
1284
            "SavedVariance": [saved_variance],
C
ceci3 已提交
1285 1286
        }

1287 1288 1289
        self._helper.append_op(
            type="sync_batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
        )
C
ceci3 已提交
1290
        return sync_batch_norm_out
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304

    @classmethod
    def convert_sync_batchnorm(cls, layer):
        """
        Helper function to convert :class: `paddle.nn.BatchNorm*d` layers in the model to :class: `paddle.nn.SyncBatchNorm` layers.

        Parameters:
            layer(paddle.nn.Layer): model containing one or more `BatchNorm*d` layers.

        Returns:
            The original model with converted SyncBatchNorm layers. If BatchNorm*d layer in the model, use SyncBatchNorm layer instead.

        Examples:
            .. code-block:: python
1305

1306 1307 1308
                import paddle
                import paddle.nn as nn

C
cnn 已提交
1309
                model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5))
1310 1311 1312 1313 1314
                sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)

        """
        layer_output = layer
        if isinstance(layer, _BatchNormBase):
1315
            if (
1316
                layer._weight_attr is not None
1317
                and not isinstance(layer._weight_attr, bool)
1318
                and layer._weight_attr.name is not None
1319
            ):
C
ceci3 已提交
1320
                layer._weight_attr.name = layer._weight_attr.name + '_sync'
1321
            if (
1322
                layer._bias_attr is not None
1323
                and not isinstance(layer._bias_attr, bool)
1324
                and layer._bias_attr.name is not None
1325
            ):
C
ceci3 已提交
1326 1327
                layer._bias_attr.name = layer._bias_attr.name + '_sync'

1328 1329 1330 1331 1332 1333 1334 1335 1336
            layer_output = SyncBatchNorm(
                layer._num_features,
                layer._momentum,
                layer._epsilon,
                layer._weight_attr,
                layer._bias_attr,
                layer._data_format,
                layer._name,
            )
1337

1338 1339 1340 1341
            if (
                layer._weight_attr is not False
                and layer._bias_attr is not False
            ):
1342 1343 1344 1345 1346 1347
                with no_grad():
                    layer_output.weight = layer.weight
                    layer_output.bias = layer.bias
            layer_output._mean = layer._mean
            layer_output._variance = layer._variance

C
ceci3 已提交
1348
        for name, sublayer in layer.named_children():
1349 1350 1351
            layer_output.add_sublayer(
                name, cls.convert_sync_batchnorm(sublayer)
            )
1352 1353
        del layer
        return layer_output
1354 1355


Z
zhiboniu 已提交
1356
class LocalResponseNorm(Layer):
1357
    """
1358 1359
    Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions.
    For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
1360

1361
    See more details in :ref:`api_paddle_nn_functional_local_response_norm` .
1362

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
    Parameters:
        size (int): The number of channels to sum over.
        alpha (float, optional): The scaling parameter, positive. Default:1e-4
        beta (float, optional): The exponent, positive. Default:0.75
        k (float, optional): An offset, positive. Default: 1.0
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:
            If input is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`,
            the data is stored in the order of: `[batch_size, input_channels, feature_length]`.
            If input is 4-D Tensor, the string could be  `"NCHW"`, `"NHWC"`. When it is `"NCHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`.
            If input is 5-D Tensor, the string could be  `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name (str, optional): Name for the operation (optional, default is None). For more information,
            please refer to :ref:`api_guide_Name`.
1378

1379 1380 1381
    Shape:
        - input: 3-D/4-D/5-D tensor.
        - output: 3-D/4-D/5-D tensor, the same shape as input.
1382

1383
    Examples:
1384

1385
    .. code-block:: python
1386

1387 1388 1389 1390 1391 1392 1393
        import paddle

        x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32")
        m = paddle.nn.LocalResponseNorm(size=5)
        y = m(x)
        print(y.shape)  # [3, 3, 112, 112]
    """
1394

1395 1396 1397 1398 1399 1400 1401 1402 1403
    def __init__(
        self,
        size,
        alpha=0.0001,
        beta=0.75,
        k=1.0,
        data_format="NCHW",
        name=None,
    ):
1404
        super().__init__()
1405 1406 1407 1408 1409 1410 1411 1412
        self.size = size
        self.alpha = alpha
        self.beta = beta
        self.k = k
        self.data_format = data_format
        self.name = name

    def forward(self, input):
1413 1414 1415 1416 1417 1418 1419 1420 1421
        out = F.local_response_norm(
            input,
            self.size,
            self.alpha,
            self.beta,
            self.k,
            self.data_format,
            self.name,
        )
1422
        return out
1423 1424 1425

    def extra_repr(self):
        main_str = 'size={}, alpha={}, beta={}, k={}'.format(
1426 1427
            self.size, self.alpha, self.beta, self.k
        )
1428
        if self.data_format != 'NCHW':
1429 1430 1431 1432
            main_str += ', data_format={}'.format(self.data_format)
        if self.name is not None:
            main_str += ', name={}'.format(self.name)
        return main_str