norm.py 53.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19 20 21 22 23 24 25 26 27
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

28
# TODO: define normalization api
29

Z
zhiboniu 已提交
30 31
from ...fluid.dygraph import BatchNorm  # noqa: F401
from ...fluid.dygraph import SpectralNorm  # noqa: F401
C
ceci3 已提交
32

33
from ...framework import get_default_dtype
C
ceci3 已提交
34

Z
zhiboniu 已提交
35 36
from ..initializer import Constant
from ...framework import ParamAttr
37
from ...fluid.data_feeder import check_variable_and_dtype
Z
zhiboniu 已提交
38
from ...fluid import dygraph_utils
39 40 41 42 43 44

from ..functional import batch_norm, layer_norm, instance_norm

import numpy as np
import numbers
import warnings
Z
zhiboniu 已提交
45
from ...framework import no_grad
46
from .. import functional as F
47
from paddle import _C_ops, _legacy_C_ops
Z
zhiboniu 已提交
48
from .. import Layer
Z
zhiboniu 已提交
49
from paddle import in_dynamic_mode
50
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
51

52 53
__all__ = []

C
ceci3 已提交
54

Z
zhiboniu 已提交
55
class _InstanceNormBase(Layer):
56
    """
57
    This class is based class for InstanceNorm1D, 2d, 3d.
58

C
cnn 已提交
59
    See InstaceNorm1D, InstanceNorm2D or InstanceNorm3D for more details.
60 61 62 63 64 65 66 67 68 69 70 71 72
    """

    def __init__(self,
                 num_features,
                 epsilon=1e-5,
                 momentum=0.9,
                 weight_attr=None,
                 bias_attr=None,
                 data_format="NCHW",
                 name=None):
        super(_InstanceNormBase, self).__init__()

        if weight_attr == False or bias_attr == False:
73
            assert weight_attr == bias_attr, "weight_attr and bias_attr must be set to Fasle at the same time in InstanceNorm"
74 75 76
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
77
        self._num_features = num_features
78 79 80 81 82 83 84

        if weight_attr != False and bias_attr != False:
            self.scale = self.create_parameter(
                attr=self._weight_attr,
                shape=[num_features],
                default_initializer=Constant(1.0),
                is_bias=False)
85 86 87 88
            self.bias = self.create_parameter(attr=self._bias_attr,
                                              shape=[num_features],
                                              default_initializer=Constant(0.0),
                                              is_bias=True)
89 90 91 92 93 94 95 96 97 98
        else:
            self.scale = None
            self.bias = None

    def _check_input_dim(self, input):
        raise NotImplementedError("InstanceNorm Base error")

    def forward(self, input):
        self._check_input_dim(input)

99 100 101 102
        return instance_norm(input,
                             weight=self.scale,
                             bias=self.bias,
                             eps=self._epsilon)
103

104
    def extra_repr(self):
105
        return 'num_features={}, epsilon={}'.format(self._num_features,
106 107
                                                    self._epsilon)

108

C
cnn 已提交
109
class InstanceNorm1D(_InstanceNormBase):
110
    r"""
111
    Create a callable object of `InstanceNorm1D`. Applies Instance Normalization over a 3D input (a mini-batch of 1D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
112 113 114 115 116 117

    DataLayout: NCL `[batch, in_channels, length]`

    :math:`input` is the input features over a mini-batch.

    ..  math::
118

119 120 121 122 123 124 125
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
126

127
Where `H` means height of feature map, `W` means width of feature map.
128 129 130 131 132 133 134

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
135 136 137 138
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
139
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
140 141 142 143
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
            If it is set to False, will not create bias_attr. Default: None.
144
        data_format(str, optional): Specify the input data format, may be "NC", "NCL". Default "NCL".
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..


    Shape:
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length).
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

          import paddle

162
          x = paddle.rand((2, 2, 3))
C
cnn 已提交
163
          instance_norm = paddle.nn.InstanceNorm1D(2)
164 165
          instance_norm_out = instance_norm(x)

Z
zhang wenhui 已提交
166
          print(instance_norm_out)
167 168 169 170 171 172 173 174 175

    """

    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
            raise ValueError('expected 2D or 3D input (got {}D input)'.format(
                len(input.shape)))


C
cnn 已提交
176
class InstanceNorm2D(_InstanceNormBase):
177
    r"""
178
    Create a callable object of `InstanceNorm2D`. Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
179 180 181 182 183 184 185

    DataLayout: NCHW `[batch, in_channels, in_height, in_width]`


    :math:`input` is the input features over a mini-batch.

    ..  math::
186

187 188 189 190 191 192 193
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
194

195
Where `H` means height of feature map, `W` means width of feature map.
196 197 198 199 200 201 202

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
203 204 205 206
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
207
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
208 209 210 211
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
    `       If it is set to False, will not create bias_attr. Default: None.
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
        data_format(str, optional): Specify the input data format, could be "NCHW". Default: NCHW.
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 4-D tensor with shape: (batch, num_features, height, weight).
        - output: 4-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

227
            import paddle
228

229 230 231
            x = paddle.rand((2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm2D(2)
            instance_norm_out = instance_norm(x)
232

233
            print(instance_norm_out)
234 235 236 237 238 239 240 241
    """

    def _check_input_dim(self, input):
        if len(input.shape) != 4:
            raise ValueError('expected 4D input (got {}D input)'.format(
                len(input.shape)))


C
cnn 已提交
242
class InstanceNorm3D(_InstanceNormBase):
243
    r"""
244
    Create a callable object of `InstanceNorm3D`. Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
245 246 247 248 249 250 251

    DataLayout: NCHW `[batch, in_channels, D, in_height, in_width]`


    :math:`input` is the input features over a mini-batch.

    ..  math::
252

253 254 255 256 257 258 259
        \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//\
        \ mean\ of\ one\  feature\ map\ in\ mini-batch \\
        \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i - \
        \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
260

261
Where `H` means height of feature map, `W` means width of feature map.
262 263 264 265 266 267 268

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): A value added to the denominator for
            numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
269 270 271 272
            of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
            If the Initializer of the weight_attr is not set, the parameter is initialized
            one. If it is set to False, will not create weight_attr. Default: None.
273
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
274 275 276 277
            If it is set to None or one attribute of ParamAttr, instance_norm
            will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
            If the Initializer of the bias_attr is not set, the bias is initialized zero.
            If it is set to False, will not create bias_attr. Default: None.
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
        data_format(str, optional): Specify the input data format, could be "NCDHW". Default: NCDHW.
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight).
        - output: 5-D tensor with same shape as input x.

    Returns:
        None.


    Examples:

        .. code-block:: python

293
            import paddle
294

295 296 297
            x = paddle.rand((2, 2, 2, 2, 3))
            instance_norm = paddle.nn.InstanceNorm3D(2)
            instance_norm_out = instance_norm(x)
298

299
            print(instance_norm_out.numpy)
300 301 302 303 304 305 306 307
    """

    def _check_input_dim(self, input):
        if len(input.shape) != 5:
            raise ValueError('expected 5D input (got {}D input)'.format(
                len(input.shape)))


Z
zhiboniu 已提交
308
class GroupNorm(Layer):
309 310 311 312 313 314 315 316
    """
    This interface is used to construct a callable object of the ``GroupNorm`` class.
    For more details, refer to code examples.
    It implements the function of the Group Normalization Layer.
    Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .

    Parameters:
        num_groups(int): The number of groups that divided from channels.
317
        num_channels(int): The number of channels of input.
318
        epsilon(float, optional): The small value added to the variance to prevent
319
            division by zero. Default: 1e-05.
320
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
321 322
            scale :math:`g`. If it is set to False, no scale will be added to the output units.
            If it is set to None, the bias is initialized one. Default: None.
323
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
324 325
            bias :math:`b`. If it is set to False, no bias will be added to the output units.
            If it is set to None, the bias is initialized zero. Default: None.
326 327 328 329
        data_format(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
        name(str, optional): Name for the GroupNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
330 331
        - x: Tensor with shape: (batch, num_features, *).
        - output: The same shape as input x.
332 333 334 335 336 337

    Returns:
        None

    Examples:
        .. code-block:: python
Z
zhang wenhui 已提交
338

339 340
            import paddle
            import numpy as np
341

342 343 344 345 346 347
            paddle.disable_static()
            np.random.seed(123)
            x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32')
            x = paddle.to_tensor(x_data)
            group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
            group_norm_out = group_norm(x)
348

349
            print(group_norm_out.numpy())
350 351 352 353
    """

    def __init__(self,
                 num_groups,
354
                 num_channels,
355 356 357
                 epsilon=1e-05,
                 weight_attr=None,
                 bias_attr=None,
358
                 data_format='NCHW',
359 360 361 362 363 364 365
                 name=None):
        super(GroupNorm, self).__init__()
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self._epsilon = epsilon
        self._num_channels = num_channels
        self._num_groups = num_groups
366
        if data_format != 'NCHW':
367
            raise ValueError("unsupported data layout:" + data_format)
368 369 370

        param_shape = [self._num_channels]

371 372 373 374 375 376 377 378 379 380
        if weight_attr == False:
            self.weight = self.create_parameter(
                attr=None, shape=param_shape, default_initializer=Constant(1.0))
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
                default_initializer=Constant(1.0))
            self.weight.stop_gradient = self._weight_attr != None and self._weight_attr.learning_rate == 0.
381

382
        if bias_attr == False:
383 384 385 386
            self.bias = self.create_parameter(attr=None,
                                              shape=param_shape,
                                              default_initializer=Constant(0.0),
                                              is_bias=True)
387 388
            self.bias.stop_gradient = True
        else:
389 390 391
            self.bias = self.create_parameter(attr=self._bias_attr,
                                              shape=param_shape,
                                              is_bias=True)
392
            self.bias.stop_gradient = self._bias_attr != None and self._bias_attr.learning_rate == 0.
393 394

    def forward(self, input):
395 396 397 398 399
        mean_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True)
        variance_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype, stop_gradient=True)

400
        if in_dygraph_mode():
401 402
            pre_act = _C_ops.group_norm(input, self.weight, self.bias,
                                        self._epsilon, self._num_groups, "NCHW")
403 404 405 406 407

            return dygraph_utils._append_activation_in_dygraph(pre_act,
                                                               act=None)

        elif _in_legacy_dygraph():
408
            pre_act, _, _ = _legacy_C_ops.group_norm(
409 410 411 412 413 414 415 416
                input,
                self.weight,
                self.bias,
                mean_out,
                variance_out,
                'epsilon',
                self._epsilon,
                'groups',
417 418 419 420
                self._num_groups,
            )
            return dygraph_utils._append_activation_in_dygraph(pre_act,
                                                               act=None)
421

422 423 424 425 426 427 428 429 430 431
        inputs = {'X': input}
        if self.bias is not None:
            inputs['Bias'] = self.bias
        if self.weight is not None:
            inputs['Scale'] = self.weight

        # create output
        group_norm_out = self._helper.create_variable_for_type_inference(
            dtype=input.dtype)

432 433 434 435 436 437 438 439 440 441 442
        self._helper.append_op(type="group_norm",
                               inputs=inputs,
                               outputs={
                                   "Y": group_norm_out,
                                   "Mean": mean_out,
                                   "Variance": variance_out,
                               },
                               attrs={
                                   "epsilon": self._epsilon,
                                   "groups": self._num_groups
                               })
443 444 445

        return self._helper.append_activation(group_norm_out, None)

446 447 448 449
    def extra_repr(self):
        return 'num_groups={}, num_channels={}, epsilon={}'.format(
            self._num_groups, self._num_channels, self._epsilon)

450

Z
zhiboniu 已提交
451
class LayerNorm(Layer):
452
    r"""
453
    Construct a callable object of the ``LayerNorm`` class.
454 455 456 457 458 459 460 461
    For more details, refer to code examples.
    It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
    Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_

    The formula is as follows:

    ..  math::

462
        \mu & = \frac{1}{H}\sum_{i=1}^{H} x_i
463

464
        \sigma & = \sqrt{\frac{1}{H}\sum_{i=1}^{H}{(x_i - \mu)^2} + \epsilon}
465

466
        y & = f(\frac{g}{\sigma}(x - \mu) + b)
467 468 469

    - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
    - :math:`H`: the number of hidden units in a layers
470
    - :math:`\epsilon`: the small value added to the variance to prevent division by zero.
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
    - :math:`g`: the trainable scale parameter.
    - :math:`b`: the trainable bias parameter.

    Parameters:
        normalized_shape(int|list|tuple): Input shape from an expected input of
            size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
            If it is a single integer, this module will normalize over the last dimension
            which is expected to be of that specific size.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero. Default: 1e-05.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            gain :math:`g`. If False, weight is None. If is None, a default :code:`ParamAttr` would be added as scale. The
            :attr:`param_attr` is initialized as 1 if it is added. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the learnable
            bias :math:`b`. If is False, bias is None. If is None, a default :code:`ParamAttr` would be added as bias. The
            :attr:`bias_attr` is initialized as 0 if it is added. Default: None.
        name(str, optional): Name for the LayerNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: 2-D, 3-D, 4-D or 5-D tensor.
        - output: same shape as input x.

    Returns:
        None

    Examples:

        .. code-block:: python

          import paddle

502 503
          x = paddle.rand((2, 2, 2, 3))
          layer_norm = paddle.nn.LayerNorm(x.shape[1:])
504 505
          layer_norm_out = layer_norm(x)

Z
zhang wenhui 已提交
506
          print(layer_norm_out)
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
    """

    def __init__(self,
                 normalized_shape,
                 epsilon=1e-05,
                 weight_attr=None,
                 bias_attr=None,
                 name=None):
        super(LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = [normalized_shape]

        self._normalized_shape = list(normalized_shape)
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        param_shape = [np.prod(self._normalized_shape)]

        if weight_attr is False:
            self.weight = None
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
                default_initializer=Constant(1.0))

        if bias_attr is False:
            self.bias = None
        else:
536 537 538
            self.bias = self.create_parameter(attr=self._bias_attr,
                                              shape=param_shape,
                                              is_bias=True)
539 540

    def forward(self, input):
541 542 543 544 545
        return layer_norm(input,
                          normalized_shape=self._normalized_shape,
                          weight=self.weight,
                          bias=self.bias,
                          epsilon=self._epsilon)
546

547 548 549 550
    def extra_repr(self):
        return 'normalized_shape={}, epsilon={}'.format(self._normalized_shape,
                                                        self._epsilon)

551

Z
zhiboniu 已提交
552
class _BatchNormBase(Layer):
553 554 555 556 557 558 559 560 561 562 563
    """
    BatchNorm base .
    """

    def __init__(self,
                 num_features,
                 momentum=0.9,
                 epsilon=1e-05,
                 weight_attr=None,
                 bias_attr=None,
                 data_format='NCHW',
C
ceci3 已提交
564
                 use_global_stats=None,
565 566 567 568 569
                 name=None):
        super(_BatchNormBase, self).__init__()
        self._num_features = num_features
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
C
ceci3 已提交
570
        self._use_global_stats = use_global_stats
571 572

        if get_default_dtype() == 'float16':
G
Guoxia Wang 已提交
573 574 575
            self._dtype = 'float32'
        else:
            self._dtype = get_default_dtype()
576 577 578 579

        param_shape = [num_features]

        # create parameter
580 581
        if weight_attr == False:
            self.weight = self.create_parameter(
G
Guoxia Wang 已提交
582 583 584 585
                attr=None,
                shape=param_shape,
                dtype=self._dtype,
                default_initializer=Constant(1.0))
586 587 588 589 590
            self.weight.stop_gradient = True
        else:
            self.weight = self.create_parameter(
                attr=self._weight_attr,
                shape=param_shape,
G
Guoxia Wang 已提交
591
                dtype=self._dtype,
592 593
                default_initializer=Constant(1.0))
            self.weight.stop_gradient = self._weight_attr != None and self._weight_attr.learning_rate == 0.
594

595
        if bias_attr == False:
596 597 598 599 600
            self.bias = self.create_parameter(attr=None,
                                              shape=param_shape,
                                              dtype=self._dtype,
                                              default_initializer=Constant(0.0),
                                              is_bias=True)
601 602
            self.bias.stop_gradient = True
        else:
603 604 605 606
            self.bias = self.create_parameter(attr=self._bias_attr,
                                              shape=param_shape,
                                              dtype=self._dtype,
                                              is_bias=True)
607
            self.bias.stop_gradient = self._bias_attr != None and self._bias_attr.learning_rate == 0.
608 609 610 611 612 613 614 615

        moving_mean_name = None
        moving_variance_name = None

        if name is not None:
            moving_mean_name = name + "_mean"
            moving_variance_name = name + "_variance"

616 617 618 619 620 621 622
        self._mean = self.create_parameter(dtype=self._dtype,
                                           attr=ParamAttr(
                                               name=moving_mean_name,
                                               initializer=Constant(0.0),
                                               trainable=False,
                                               do_model_average=True),
                                           shape=param_shape)
623 624
        self._mean.stop_gradient = True

625 626 627 628 629 630 631
        self._variance = self.create_parameter(dtype=self._dtype,
                                               attr=ParamAttr(
                                                   name=moving_variance_name,
                                                   initializer=Constant(1.0),
                                                   trainable=False,
                                                   do_model_average=True),
                                               shape=param_shape)
632 633 634 635 636 637 638
        self._variance.stop_gradient = True

        self._data_format = data_format
        self._in_place = False
        self._momentum = momentum
        self._epsilon = epsilon
        self._fuse_with_relu = False
639
        self._name = name
640 641 642 643

    def _check_input_dim(self, input):
        raise NotImplementedError("BatchNorm Base error")

644 645 646
    def _check_data_format(self, input):
        raise NotImplementedError("BatchNorm Base data format error")

647 648
    def forward(self, input):

649 650
        self._check_data_format(self._data_format)

651 652
        self._check_input_dim(input)

653
        if self.training:
654 655 656
            warnings.warn(
                "When training, we now always track global mean and variance.")

657 658 659 660 661 662 663 664 665 666
        return batch_norm(input,
                          self._mean,
                          self._variance,
                          weight=self.weight,
                          bias=self.bias,
                          training=self.training,
                          momentum=self._momentum,
                          epsilon=self._epsilon,
                          data_format=self._data_format,
                          use_global_stats=self._use_global_stats)
667

668 669 670
    def extra_repr(self):
        main_str = 'num_features={}, momentum={}, epsilon={}'.format(
            self._num_features, self._momentum, self._epsilon)
671
        if self._data_format != 'NCHW':
672 673 674 675 676
            main_str += ', data_format={}'.format(self._data_format)
        if self._name is not None:
            main_str += ', name={}'.format(self._name)
        return main_str

677

C
cnn 已提交
678
class BatchNorm1D(_BatchNormBase):
679
    r"""
680 681
    Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

682 683
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
684 685 686 687
    Calculated as follows:

    ..  math::

688 689 690 691
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
692

693 694
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
695 696 697 698
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
699 700
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
701 702 703 704 705

    The normalization function formula is as follows:

    ..  math::

706 707
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
708

709 710 711
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
712 713 714 715 716 717 718 719

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
720
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
721 722 723 724
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
725
        data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
C
ceci3 已提交
726
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
727 728 729
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
730 731
        - x: 2-D or 3-D tensor with shape: (batch, num_features) or (batch, num_features, length) when data_format is "NC" or "NCL",
            (batch, length, num_features) when data_format is "NLC".
732 733 734 735
        - output: 3-D tensor with same shape as input x.

    Returns:
        None.
736

737 738 739 740 741 742

    Examples:
        .. code-block:: python

          import paddle

743
          x = paddle.rand((2, 1, 3))
C
cnn 已提交
744
          batch_norm = paddle.nn.BatchNorm1D(1)
745 746
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
747
          print(batch_norm_out)
748 749
    """

C
ceci3 已提交
750 751 752 753 754 755 756 757 758 759 760 761 762
    def __init__(self,
                 num_features,
                 momentum=0.9,
                 epsilon=1e-05,
                 weight_attr=None,
                 bias_attr=None,
                 data_format='NCL',
                 use_global_stats=None,
                 name=None):
        super(BatchNorm1D,
              self).__init__(num_features, momentum, epsilon, weight_attr,
                             bias_attr, data_format, use_global_stats, name)

763 764 765
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NC' or input == 'NCL':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
766 767
        elif input == "NHWC" or input == 'NLC':
            self._data_format = "NHWC"
768
        else:
F
Feiyu Chan 已提交
769 770
            raise ValueError(
                'expected NC , NCL, NLC or None for data_format input')
771

772 773 774 775 776 777
    def _check_input_dim(self, input):
        if len(input.shape) != 2 and len(input.shape) != 3:
            raise ValueError('expected 2D or 3D input (got {}D input)'.format(
                len(input.shape)))


C
cnn 已提交
778
class BatchNorm2D(_BatchNormBase):
779
    r"""
780 781
    Applies Batch Normalization over a 4D input (a mini-batch of 2D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

782 783
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
784 785 786 787
    Calculated as follows:

    ..  math::

788 789
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//
        \ mini-batch\ mean \\
790
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i -
791
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
792

793 794
    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
795 796 797 798
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
799 800
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
801 802 803 804 805

    The normalization function formula is as follows:

    ..  math::

806 807
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
808

809 810 811
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
812 813 814 815 816 817 818 819

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
820
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
821 822 823 824
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
F
Feiyu Chan 已提交
825
        data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
C
ceci3 已提交
826
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
827 828 829
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
830 831
        - x: 4-D tensor with shape: (batch, num_features, height, weight) when data_format is "NCHW",
            or (batch, height, weight, num_features) when data_format is "NHWC".
832 833 834 835 836 837 838 839 840 841
        - output: 4-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

842
          x = paddle.rand((2, 1, 2, 3))
C
cnn 已提交
843
          batch_norm = paddle.nn.BatchNorm2D(1)
844 845
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
846
          print(batch_norm_out)
847 848
    """

849
    def _check_data_format(self, input):
850
        if input == 'NCHW':
851
            self._data_format = input
F
Feiyu Chan 已提交
852 853
        elif input == "NHWC":
            self._data_format = input
854
        else:
F
Feiyu Chan 已提交
855
            raise ValueError('expected NCHW or NHWC for data_format input')
856

857 858 859 860 861 862
    def _check_input_dim(self, input):
        if len(input.shape) != 4:
            raise ValueError('expected 4D input (got {}D input)'.format(
                len(input.shape)))


C
cnn 已提交
863
class BatchNorm3D(_BatchNormBase):
864
    r"""
865 866
    Applies Batch Normalization over a 5D input (a mini-batch of 3D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

867 868
    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
869 870 871 872
    Calculated as follows:

    ..  math::

873 874 875 876
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
877

C
ceci3 已提交
878
    When use_global_stats = True, the :math:`\\mu_{\\beta}`
879 880 881 882 883
    and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
884 885
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
886 887 888 889 890

    The normalization function formula is as follows:

    ..  math::

891 892
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
893

894 895 896
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter
897 898 899 900 901 902 903 904

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
905
            If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
906 907 908 909
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
F
Feiyu Chan 已提交
910
        data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW.
C
ceci3 已提交
911
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
912 913 914
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
F
Feiyu Chan 已提交
915 916
        - x: 5-D tensor with shape: (batch, num_features, dims, height, weight) when data_format is "NCDHW",
            or (batch, dims, height, weight, num_features) when data_format is "NDHWC".
917 918 919 920 921 922 923 924 925 926
        - output: 5-D tensor with same shape as input x.

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle

927
          x = paddle.rand((2, 1, 2, 2, 3))
C
cnn 已提交
928
          batch_norm = paddle.nn.BatchNorm3D(1)
929 930
          batch_norm_out = batch_norm(x)

Z
zhang wenhui 已提交
931
          print(batch_norm_out)
932 933
    """

C
ceci3 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946
    def __init__(self,
                 num_features,
                 momentum=0.9,
                 epsilon=1e-05,
                 weight_attr=None,
                 bias_attr=None,
                 data_format='NCDHW',
                 use_global_stats=None,
                 name=None):
        super(BatchNorm3D,
              self).__init__(num_features, momentum, epsilon, weight_attr,
                             bias_attr, data_format, use_global_stats, name)

947 948 949
    def _check_data_format(self, input):
        if input == 'NCHW' or input == 'NCDHW':
            self._data_format = 'NCHW'
F
Feiyu Chan 已提交
950 951
        elif input == "NHWC" or input == "NDHWC":
            self._data_format = 'NHWC'
952
        else:
F
Feiyu Chan 已提交
953 954
            raise ValueError(
                'expected NCDHW, NDHWC or None for data_format input')
955

956 957 958 959 960 961
    def _check_input_dim(self, input):
        if len(input.shape) != 5:
            raise ValueError('expected 5D input (got {}D input)'.format(
                len(input.shape)))


962
class SyncBatchNorm(_BatchNormBase):
963
    r"""
C
ceci3 已提交
964
    This interface is used to construct a callable object of the ``SyncBatchNorm`` class.
965 966
    It implements the function of the Cross-GPU Synchronized Batch Normalization Layer, and can
    be used as a normalizer function for other operations, such as conv2d and fully connected
C
ceci3 已提交
967 968 969 970 971 972 973
    operations.
    The data is normalized by the mean and variance of the channel based on whole mini-batch
    , which including data in all gpus.
    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.

974
    When model in training mode, the :math:`\\mu_{\\beta}`
C
ceci3 已提交
975 976 977 978 979
    and :math:`\\sigma_{\\beta}^{2}` are the statistics of whole mini-batch data in all gpus.
    Calculated as follows:

    ..  math::

980 981 982 983
        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
C
ceci3 已提交
984 985 986 987 988

    - :math:`x` : whole mini-batch data in all gpus
    - :math:`m` : the size of the whole mini-batch data

    When model in evaluation mode, the :math:`\\mu_{\\beta}`
989
    and :math:`\sigma_{\beta}^{2}` are global statistics (moving_mean and moving_variance,
C
ceci3 已提交
990 991 992
    which usually got from the pre-trained model). Global statistics calculated as follows:

    .. math::
993 994
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\
C
ceci3 已提交
995 996

    The formula of normalization is as follows:
997

C
ceci3 已提交
998 999
    ..  math::

1000 1001 1002
        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
C
ceci3 已提交
1003

1004 1005
    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable scale parameter vector
1006
    - :math:`\beta` : trainable shift parameter vector
C
ceci3 已提交
1007

1008
    Note:
1009 1010 1011
        If you want to use container to pack your model and has ``SyncBatchNorm`` in the
        evaluation phase, please use ``nn.LayerList`` or ``nn.Sequential`` instead of
        ``list`` to pack the model.
1012

C
ceci3 已提交
1013 1014 1015 1016 1017 1018 1019
    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
             of this layer. If it is set to None or one attribute of ParamAttr, this layerr
             will create ParamAttr as param_attr. If the Initializer of the param_attr
1020
             is not set, the parameter is initialized with ones. If it is set to False,
C
ceci3 已提交
1021 1022 1023 1024
             this layer will not have trainable scale parameter. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of this layer.
             If it is set to None or one attribute of ParamAttr, this layer
             will create ParamAttr as bias_attr. If the Initializer of the bias_attr
1025
             is not set, the bias is initialized zero. If it is set to False, this layer will not
C
ceci3 已提交
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
             have trainable bias parameter. Default: None.

    Shapes:
        input: Tensor that the dimension from 2 to 5.
        output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

          import paddle
          import paddle.nn as nn
          import numpy as np

          x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
          x = paddle.to_tensor(x)
C
ceci3 已提交
1041 1042

          if paddle.is_compiled_with_cuda():
C
ceci3 已提交
1043 1044
              sync_batch_norm = nn.SyncBatchNorm(2)
              hidden1 = sync_batch_norm(x)
C
ceci3 已提交
1045
              print(hidden1)
C
ceci3 已提交
1046 1047 1048 1049 1050 1051
              # [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]]
    """

    def __init__(self,
                 num_features,
                 momentum=0.9,
1052
                 epsilon=1e-05,
C
ceci3 已提交
1053 1054 1055 1056
                 weight_attr=None,
                 bias_attr=None,
                 data_format='NCHW',
                 name=None):
1057 1058
        super(SyncBatchNorm,
              self).__init__(num_features, momentum, epsilon, weight_attr,
C
ceci3 已提交
1059
                             bias_attr, data_format, None, name)
C
ceci3 已提交
1060

C
ceci3 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
    def _check_data_format(self):
        if self._data_format in ['NCHW', 'NCDHW', 'NC', 'NCL']:
            self._data_format = 'NCHW'
        elif self._data_format in ["NHWC", "NDHWC", 'NLC']:
            self._data_format = 'NHWC'
        else:
            raise ValueError(
                'expected \'NCDHW\', \'NDHWC\', \'NCL\', \'NLC\', \'NC\', \'NCHW\', \'NHWC\' for data_format'
            )

C
ceci3 已提交
1071
    def forward(self, x):
C
ceci3 已提交
1072
        self._check_data_format()
C
ceci3 已提交
1073 1074 1075 1076 1077 1078 1079 1080
        # create output
        # mean and mean_out share the same memory
        mean_out = self._mean
        # variance and variance out share the same memory
        variance_out = self._variance

        ### train mode: use mini-batch stats, eval mode: use global stats
        ### use_global_stats only support False in sync_batch_norm
1081
        if in_dygraph_mode():
1082
            sync_batch_norm_out, _, _, _, _, _ = _C_ops.sync_batch_norm_(
1083 1084 1085 1086 1087 1088
                x, self.weight, self.bias, self._mean, self._variance,
                self._momentum, self._epsilon, self._data_format,
                not self.training, False, False, False)
            return sync_batch_norm_out

        elif in_dynamic_mode():
C
ceci3 已提交
1089 1090
            attrs = ("momentum", self._momentum, "epsilon", self._epsilon,
                     "is_test", not self.training, "data_layout",
1091
                     self._data_format, "use_mkldnn", False, "fuse_with_relu",
C
ceci3 已提交
1092 1093
                     False, "use_global_stats", False, 'trainable_statistics',
                     False)
1094
            sync_batch_norm_out, _, _, _, _, _ = _legacy_C_ops.sync_batch_norm(
C
ceci3 已提交
1095 1096 1097 1098 1099
                x, self.weight, self.bias, self._mean, self._variance, mean_out,
                variance_out, *attrs)
            return sync_batch_norm_out

        check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
1100
                                 'SyncBatchNorm')
C
ceci3 已提交
1101 1102 1103 1104 1105

        attrs = {
            "momentum": self._momentum,
            "epsilon": self._epsilon,
            "is_test": not self.training,
1106
            "data_layout": self._data_format,
C
ceci3 已提交
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
            "use_mkldnn": False,
            "fuse_with_relu": False,
            "use_global_stats": False,
            "trainable_statistics": False,
        }

        inputs = {
            "X": [x],
            "Scale": [self.weight],
            "Bias": [self.bias],
            "Mean": [self._mean],
            "Variance": [self._variance]
        }

        saved_mean = self._helper.create_variable_for_type_inference(
            dtype=self._dtype, stop_gradient=True)
        saved_variance = self._helper.create_variable_for_type_inference(
            dtype=self._dtype, stop_gradient=True)
        sync_batch_norm_out = self._helper.create_variable_for_type_inference(
            self._dtype)

        outputs = {
            "Y": [sync_batch_norm_out],
            "MeanOut": [mean_out],
            "VarianceOut": [variance_out],
            "SavedMean": [saved_mean],
            "SavedVariance": [saved_variance]
        }

1136 1137 1138 1139
        self._helper.append_op(type="sync_batch_norm",
                               inputs=inputs,
                               outputs=outputs,
                               attrs=attrs)
C
ceci3 已提交
1140
        return sync_batch_norm_out
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158

    @classmethod
    def convert_sync_batchnorm(cls, layer):
        """
        Helper function to convert :class: `paddle.nn.BatchNorm*d` layers in the model to :class: `paddle.nn.SyncBatchNorm` layers.

        Parameters:
            layer(paddle.nn.Layer): model containing one or more `BatchNorm*d` layers.

        Returns:
            The original model with converted SyncBatchNorm layers. If BatchNorm*d layer in the model, use SyncBatchNorm layer instead.

        Examples:

            .. code-block:: python
                import paddle
                import paddle.nn as nn

C
cnn 已提交
1159
                model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5))
1160 1161 1162 1163 1164
                sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)

        """
        layer_output = layer
        if isinstance(layer, _BatchNormBase):
C
ceci3 已提交
1165 1166 1167
            if layer._weight_attr != None and not isinstance(
                    layer._weight_attr,
                    bool) and layer._weight_attr.name != None:
C
ceci3 已提交
1168
                layer._weight_attr.name = layer._weight_attr.name + '_sync'
C
ceci3 已提交
1169 1170
            if layer._bias_attr != None and not isinstance(
                    layer._bias_attr, bool) and layer._bias_attr.name != None:
C
ceci3 已提交
1171 1172
                layer._bias_attr.name = layer._bias_attr.name + '_sync'

1173 1174 1175 1176
            layer_output = SyncBatchNorm(layer._num_features, layer._momentum,
                                         layer._epsilon, layer._weight_attr,
                                         layer._bias_attr, layer._data_format,
                                         layer._name)
1177 1178 1179 1180 1181 1182 1183 1184

            if layer._weight_attr != False and layer._bias_attr != False:
                with no_grad():
                    layer_output.weight = layer.weight
                    layer_output.bias = layer.bias
            layer_output._mean = layer._mean
            layer_output._variance = layer._variance

C
ceci3 已提交
1185
        for name, sublayer in layer.named_children():
1186 1187 1188 1189
            layer_output.add_sublayer(name,
                                      cls.convert_sync_batchnorm(sublayer))
        del layer
        return layer_output
1190 1191


Z
zhiboniu 已提交
1192
class LocalResponseNorm(Layer):
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
    """
        Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions.
        For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_

        See more details in :ref:`api_paddle_nn_functional_local_response_norm` .

        Parameters:
            size (int): The number of channels to sum over.
            alpha (float, optional): The scaling parameter, positive. Default:1e-4
            beta (float, optional): The exponent, positive. Default:0.75
            k (float, optional): An offset, positive. Default: 1.0
            data_format (str, optional): Specify the data format of the input, and the data format of the output
                will be consistent with that of the input. An optional string from:
                If input is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`,
                the data is stored in the order of: `[batch_size, input_channels, feature_length]`.
                If input is 4-D Tensor, the string could be  `"NCHW"`, `"NHWC"`. When it is `"NCHW"`,
                the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`.
                If input is 5-D Tensor, the string could be  `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`,
                the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
            name (str, optional): Name for the operation (optional, default is None). For more information,
                please refer to :ref:`api_guide_Name`.

        Shape:
            - input: 3-D/4-D/5-D tensor.
            - output: 3-D/4-D/5-D tensor, the same shape as input.

        Examples:

        .. code-block:: python

            import paddle

            x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32")
            m = paddle.nn.LocalResponseNorm(size=5)
            y = m(x)
            print(y.shape)  # [3, 3, 112, 112]
        """

    def __init__(self,
                 size,
                 alpha=0.0001,
                 beta=0.75,
                 k=1.0,
                 data_format="NCHW",
                 name=None):
        super(LocalResponseNorm, self).__init__()
        self.size = size
        self.alpha = alpha
        self.beta = beta
        self.k = k
        self.data_format = data_format
        self.name = name

    def forward(self, input):
        out = F.local_response_norm(input, self.size, self.alpha, self.beta,
                                    self.k, self.data_format, self.name)
        return out
1250 1251 1252 1253

    def extra_repr(self):
        main_str = 'size={}, alpha={}, beta={}, k={}'.format(
            self.size, self.alpha, self.beta, self.k)
1254
        if self.data_format != 'NCHW':
1255 1256 1257 1258
            main_str += ', data_format={}'.format(self.data_format)
        if self.name is not None:
            main_str += ', name={}'.format(self.name)
        return main_str