norm.py 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define normalization api
16 17 18 19
import paddle
import paddle.fluid as fluid
from ...fluid.data_feeder import check_variable_and_dtype, check_type
from ...fluid.layer_helper import LayerHelper
Z
zhiboniu 已提交
20
from ...fluid import dygraph_utils
21
import numbers
22
from paddle import _C_ops, _legacy_C_ops
Z
zhiboniu 已提交
23
from paddle import in_dynamic_mode
24
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
25

26 27
__all__ = []

28 29

def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
30
    r"""
31
    Normalize ``x`` along dimension ``axis`` using :math:`L_p` norm. This layer computes
32 33 34

    .. math::

35
        y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) }
36

37
    .. math::
38
        \lvert \lvert x \rvert \rvert_p = \left( \sum_i {\lvert x_i \rvert^p}  \right)^{1/p}
39

40
    where, :math:`\sum_i{\lvert x_i \rvert^p}` is calculated along the ``axis`` dimension.
41 42


N
Noel 已提交
43
    Parameters:
44
        x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
45
        p (float|int, optional): The exponent value in the norm formulation. Default: 2.
46
        axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension.
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
        epsilon (float, optional): Small float added to denominator to avoid dividing by zero. Default is 1e-12.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the output has the same shape and data type with ``x``.

    Examples:

        .. code-block:: python

            import numpy as np
            import paddle
            import paddle.nn.functional as F

            paddle.disable_static()
            x = np.arange(6, dtype=np.float32).reshape(2,3)
63
            x = paddle.to_tensor(x)
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
            y = F.normalize(x)
            print(y.numpy())
            # [[0.         0.4472136  0.8944272 ]
            # [0.42426404 0.5656854  0.7071067 ]]

            y = F.normalize(x, p=1.5)
            print(y.numpy())
            # [[0.         0.40862012 0.81724024]
            # [0.35684016 0.4757869  0.5947336 ]]

            y = F.normalize(x, axis=0)
            print(y.numpy())
            # [[0.         0.24253564 0.37139067]
            # [1.         0.97014254 0.9284767 ]]
    """
79 80
    if in_dygraph_mode():
        eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
81 82
        out = _C_ops.p_norm(x, float(p), axis, epsilon, True, False)
        return x / _C_ops.maximum(out, eps)
83 84

    if _in_legacy_dygraph():
85
        eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
86 87 88 89 90 91 92 93 94 95 96
        out = _legacy_C_ops.p_norm(
            x,
            'axis',
            axis,
            'porder',
            float(p),
            'keepdim',
            True,
            'epsilon',
            epsilon,
        )
97
        return x / _legacy_C_ops.elementwise_max(out, eps)
98 99 100

    check_type(p, 'p', (float, int), 'normalize')
    check_type(axis, 'axis', (int), 'normalize')
101 102 103
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64'], 'normalize'
    )
104 105
    if len(x.shape) == 1 and axis != 0 and axis != -1:
        raise ValueError(
106 107 108 109
            "Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".format(
                axis
            )
        )
110 111 112 113 114 115 116 117 118

    attrs = {
        'axis': axis,
        'porder': float(p),
        'keepdim': True,
        'epsilon': epsilon,
    }
    helper = LayerHelper('p_norm', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
119 120 121
    helper.append_op(
        type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs
    )
122
    eps = out.block.create_var(dtype=out.dtype)
Z
zhiboniu 已提交
123
    eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype)
124
    return paddle.divide(x, paddle.maximum(out, eps), name=name)
125 126


127 128 129 130 131 132 133 134 135 136 137 138 139
def batch_norm(
    x,
    running_mean,
    running_var,
    weight,
    bias,
    training=False,
    momentum=0.9,
    epsilon=1e-05,
    data_format="NCHW",
    use_global_stats=None,
    name=None,
):
140 141 142
    """
    Applies Batch Normalization as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

C
cnn 已提交
143
    nn.functional.batch_norm is uesd for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm.
144

145 146 147 148 149
    Parameters:
        x(Tesnor): input value. It's data type should be float32, float64.
        running_mean(Tensor): running mean.
        running_var(Tensor): running variance.
        weight(Tensor): The weight tensor of batch_norm, can not be None.
150
        bias(Tensor): The bias tensor of batch_norm can not be None.
151 152
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
153 154
        training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Default False.
        data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default "NCHW".
C
ceci3 已提交
155
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle
          import numpy as np

          x = np.random.seed(123)
          x = np.random.random(size=(2, 1, 2, 3)).astype('float32')
          running_mean = np.random.random(size=1).astype('float32')
          running_variance = np.random.random(size=1).astype('float32')
          weight_data = np.random.random(size=1).astype('float32')
          bias_data = np.random.random(size=1).astype('float32')
          x = paddle.to_tensor(x)
          rm = paddle.to_tensor(running_mean)
          rv = paddle.to_tensor(running_variance)
          w = paddle.to_tensor(weight_data)
          b = paddle.to_tensor(bias_data)
          batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b)
Z
zhang wenhui 已提交
179
          print(batch_norm_out)
180 181 182 183 184 185 186
    """
    assert len(x.shape) >= 2, "input dim must be larger than 1"

    # input ad out must share the memory
    mean_out = running_mean
    variance_out = running_var

F
Feiyu Chan 已提交
187
    true_data_format = ['NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC']
188 189
    if data_format not in true_data_format:
        raise ValueError(
F
Feiyu Chan 已提交
190
            "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
191 192
            "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)
        )
193

F
Feiyu Chan 已提交
194
    data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'
195

196
    if use_global_stats is None:
C
ceci3 已提交
197 198 199 200 201
        use_global_stats = not training
        trainable_statistics = False
    else:
        trainable_statistics = not use_global_stats

202
    if in_dygraph_mode():
203
        batch_norm_out, _, _, _, _, _ = _C_ops.batch_norm(
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
            x,
            weight,
            bias,
            running_mean,
            running_var,
            momentum,
            epsilon,
            data_format,
            not training,
            use_global_stats,
            trainable_statistics,
            False,
        )

        return dygraph_utils._append_activation_in_dygraph(
            batch_norm_out, act=None
        )
221 222 223

    elif _in_legacy_dygraph():
        # for dygraph need tuple
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
        attrs = (
            "momentum",
            momentum,
            "epsilon",
            epsilon,
            "is_test",
            not training,
            "data_layout",
            data_format,
            "use_mkldnn",
            False,
            "fuse_with_relu",
            False,
            "use_global_stats",
            use_global_stats,
            "trainable_statistics",
            trainable_statistics,
        )
242

243
        batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm(
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
            x,
            weight,
            bias,
            running_mean,
            running_var,
            None,
            mean_out,
            variance_out,
            *attrs
        )

        return dygraph_utils._append_activation_in_dygraph(
            batch_norm_out, act=None
        )

    check_variable_and_dtype(
        x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
    )
262 263 264 265 266

    # for static need dict
    attrs = {
        "momentum": momentum,
        "epsilon": epsilon,
267
        "is_test": not training,
268 269 270 271
        "data_layout": data_format,
        "use_mkldnn": False,
        "fuse_with_relu": False,
        "use_global_stats": use_global_stats,
C
ceci3 已提交
272
        "trainable_statistics": trainable_statistics,
273 274 275 276 277 278 279
    }

    inputs = {
        "X": [x],
        "Scale": [weight],
        "Bias": [bias],
        "Mean": [running_mean],
280
        "Variance": [running_var],
281 282 283 284
    }

    helper = LayerHelper('batch_norm', **locals())

285
    param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
286 287 288
    saved_mean = helper.create_variable_for_type_inference(
        dtype=param_dtype, stop_gradient=True
    )
289
    saved_variance = helper.create_variable_for_type_inference(
290 291
        dtype=param_dtype, stop_gradient=True
    )
292
    batch_norm_out = helper.create_variable_for_type_inference(x.dtype)
293 294 295 296 297 298

    outputs = {
        "Y": [batch_norm_out],
        "MeanOut": [running_mean],
        "VarianceOut": [running_var],
        "SavedMean": [saved_mean],
299
        "SavedVariance": [saved_variance],
300 301
    }

302 303 304
    if training or trainable_statistics:
        # reserve_space is only used for training.
        reserve_space = helper.create_variable_for_type_inference(
305 306
            dtype=x.dtype, stop_gradient=True
        )
307 308
        outputs["ReserveSpace"] = [reserve_space]

309 310 311
    helper.append_op(
        type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
    )
312 313 314 315

    return helper.append_activation(batch_norm_out)


316 317 318
def layer_norm(
    x, normalized_shape, weight=None, bias=None, epsilon=1e-05, name=None
):
319 320
    """
    see more detail in paddle.nn.LayerNorm
321

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
    Parameters:
        x(Tensor): Input Tensor. It's data type should be float32, float64.
        normalized_shape(int|list|tuple): Input shape from an expected input of
            size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
            If it is a single integer, this module will normalize over the last dimension
            which is expected to be of that specific size.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero. Default: 1e-05.
        weight(Tensor, optional): The weight tensor of batch_norm. Default: None.
        bias(Tensor, optional): The bias tensor of batch_norm. Default: None.
        name(str, optional): Name for the LayerNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Returns:
        None

    Examples:

        .. code-block:: python

          import paddle

343
          x = paddle.rand((2, 2, 2, 3))
C
Chen Long 已提交
344
          layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:])
Z
zhang wenhui 已提交
345
          print(layer_norm_out)
346 347 348
    """
    input_shape = list(x.shape)
    input_ndim = len(input_shape)
349 350 351 352 353 354
    if isinstance(normalized_shape, numbers.Integral):
        normalized_shape = [normalized_shape]
    elif isinstance(normalized_shape, tuple):
        normalized_shape = list(normalized_shape)
    elif not isinstance(normalized_shape, list):
        raise ValueError(
355 356
            "`normalized_shape` should be int, list of ints or tuple of ints."
        )
357

358 359
    normalized_ndim = len(normalized_shape)
    begin_norm_axis = input_ndim - normalized_ndim
360 361 362 363
    if (
        input_ndim < normalized_ndim
        or input_shape[begin_norm_axis:] != normalized_shape
    ):
364
        str_normalized_shape = str(normalized_shape)
365 366 367 368 369 370 371 372
        raise ValueError(
            'Given normalized_shape is '
            + str_normalized_shape
            + ', expected input with shape [*, '
            + str_normalized_shape[1:]
            + ', but got input shape '
            + str(input_shape)
        )
373

H
hong 已提交
374
    if in_dygraph_mode():
375 376 377 378 379
        (
            pre_act,
            _,
            _,
        ) = _C_ops.layer_norm(x, weight, bias, epsilon, begin_norm_axis, False)
H
hong 已提交
380 381 382 383

        return dygraph_utils._append_activation_in_dygraph(pre_act, act=None)

    if _in_legacy_dygraph():
384 385 386 387 388 389 390 391 392
        pre_act, _, _ = _legacy_C_ops.layer_norm(
            x,
            weight,
            bias,
            'epsilon',
            epsilon,
            'begin_norm_axis',
            begin_norm_axis,
        )
393 394
        return dygraph_utils._append_activation_in_dygraph(pre_act, act=None)

395 396 397
    check_variable_and_dtype(
        x, 'input', ['float16', 'float32', 'float64'], 'LayerNorm'
    )
398 399 400 401 402 403 404 405 406 407 408

    inputs = dict()
    inputs['X'] = [x]
    if weight:
        inputs['Scale'] = [weight]
    if bias:
        inputs['Bias'] = [bias]
    attrs = {"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}

    # create output
    helper = LayerHelper('layer_norm', **locals())
F
furnace 已提交
409 410

    dtype = x.dtype
411 412 413 414 415 416
    mean_out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True
    )
    variance_out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True
    )
F
furnace 已提交
417
    layer_norm_out = helper.create_variable_for_type_inference(dtype)
418

419 420 421 422 423 424 425 426 427 428
    helper.append_op(
        type="layer_norm",
        inputs=inputs,
        outputs={
            "Y": layer_norm_out,
            "Mean": mean_out,
            "Variance": variance_out,
        },
        attrs={"epsilon": epsilon, "begin_norm_axis": begin_norm_axis},
    )
429 430 431 432

    return helper.append_activation(layer_norm_out)


433 434 435 436 437 438 439 440 441 442 443 444
def instance_norm(
    x,
    running_mean=None,
    running_var=None,
    weight=None,
    bias=None,
    use_input_stats=True,
    momentum=0.9,
    eps=1e-05,
    data_format="NCHW",
    name=None,
):
445
    """
C
cnn 已提交
446
    See more detail in nn.layer.InstanceNorm2D.
447 448 449

    Parameters:
        x(Tensor): Input Tensor. It's data type should be float32, float64.
D
duanboqiang 已提交
450 451
        running_mean(Tensor, optional): running mean. Default None. Obsolete (that is, no longer usable).
        running_var(Tensor, optional): running variance. Default None. Obsolete (that is, no longer usable).
452
        weight(Tensor, optional): The weight tensor of instance_norm. Default: None.
D
duanboqiang 已提交
453
            If its value is None, this parameter will be initialized by one.
454
        bias(Tensor, optional): The bias tensor of instance_norm. Default: None.
D
duanboqiang 已提交
455
            If its value is None, this parameter will be initialized by zero.
456 457
        eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
D
duanboqiang 已提交
458
        use_input_stats(bool, optional): Default True. Obsolete (that is, no longer usable).
459
        data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW".
460 461 462 463 464 465 466 467 468 469 470
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Returns:
        None.

    Examples:

        .. code-block:: python

          import paddle

471
          x = paddle.rand((2, 2, 2, 3))
C
Chen Long 已提交
472
          instance_norm_out = paddle.nn.functional.instance_norm(x)
473

Z
zhang wenhui 已提交
474
          print(instance_norm_out)
475 476

    """
477
    if in_dygraph_mode():
478
        out = _C_ops.instance_norm(x, weight, bias, eps)
479 480
        return out
    if _in_legacy_dygraph():
481 482 483 484 485 486 487 488 489 490 491
        out, _, _ = _legacy_C_ops.instance_norm(
            x,
            weight,
            bias,
            "epsilon",
            eps,
            "momentum",
            momentum,
            "data_format",
            data_format,
        )
492 493 494 495 496 497 498 499 500 501 502 503
        return out

    check_variable_and_dtype(x, 'input', ['float32', 'float64'], "InstanceNorm")

    attrs = {"epsilon": eps, "momentum": momentum, "data_format": data_format}

    if weight and bias:
        inputs = {"X": [x], "Scale": [weight], "Bias": [bias]}
    else:
        inputs = {"X": [x]}

    helper = LayerHelper('instance_norm', **locals())
504 505 506
    saved_mean = helper.create_variable_for_type_inference(
        dtype=x.dtype, stop_gradient=True
    )
507
    saved_variance = helper.create_variable_for_type_inference(
508 509
        dtype=x.dtype, stop_gradient=True
    )
510 511 512 513 514
    instance_norm_out = helper.create_variable_for_type_inference(x.dtype)

    outputs = {
        "Y": [instance_norm_out],
        "SavedMean": [saved_mean],
515
        "SavedVariance": [saved_variance],
516 517
    }

518 519 520
    helper.append_op(
        type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs
    )
521
    return instance_norm_out
522 523


524 525 526
def local_response_norm(
    x, size, alpha=1e-4, beta=0.75, k=1.0, data_format="NCHW", name=None
):
527
    r"""
528 529
    Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions.
    For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
530

531
    The formula is as follows:
532

533
    .. math::
534

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
        Output(i, x, y) = Input(i, x, y) / \left(k + \alpha \sum\limits^{\min(C-1, i + size/2)}_{j = \max(0, i - size/2)}(Input(j, x, y))^2\right)^{\beta}

    In the above equation:

    - :math:`size` : The number of channels to sum over.
    - :math:`k` : The offset (avoid being divided by 0).
    - :math:`\\alpha` : The scaling parameter.
    - :math:`\\beta` : The exponent parameter.


    Args:
        x (Tensor): The input 3-D/4-D/5-D tensor. The data type is float32.
        size (int): The number of channels to sum over.
        alpha (float, optional): The scaling parameter, positive. Default:1e-4
        beta (float, optional): The exponent, positive. Default:0.75
        k (float, optional): An offset, positive. Default: 1.0
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:
            If x is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`,
            the data is stored in the order of: `[batch_size, input_channels, feature_length]`.
            If x is 4-D Tensor, the string could be  `"NCHW"`, `"NHWC"`. When it is `"NCHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`.
            If x is 5-D Tensor, the string could be  `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`,
            the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name (str, optional): Name for the operation (optional, default is None). For more information,
            please refer to :ref:`api_guide_Name`.
561

562 563
    Returns:
        A tensor storing the transformation result with the same shape and data type as input.
564 565


566
    Examples:
567

568
    .. code-block:: python
569

570
        import paddle
571

572 573 574 575
        x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32")
        y = paddle.nn.functional.local_response_norm(x, size=5)
        print(y.shape)  # [3, 3, 112, 112]
    """
Z
zhiboniu 已提交
576
    if not in_dynamic_mode():
577 578 579
        check_variable_and_dtype(x, 'x', ['float32'], 'local_response_norm')
    if data_format not in ['NCL', 'NLC', 'NCHW', 'NHWC', 'NCDHW', 'NDHWC']:
        raise ValueError(
580 581 582
            "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], "
            "but got {}".format(data_format)
        )
583 584 585 586 587

    sizes = x.shape
    dim = len(sizes)
    if dim < 3:
        raise ValueError(
588 589 590 591
            'Expected 3D or higher dimensionality input, but got {} dimensions'.format(
                dim
            )
        )
592

H
huangjun12 已提交
593
    for i, sz in enumerate(sizes):
H
huangjun12 已提交
594
        if not sz > 0 and i > 0:
595 596 597 598
            raise ValueError(
                "Expected every dim's size to be larger than 0, "
                "but the size of the {}-th dim is {}".format(i, sz)
            )
H
huangjun12 已提交
599

600 601
    channel_last = True if data_format[-1] == "C" else False

602
    from functools import reduce
603

604 605
    sum_sizes = reduce(lambda x, y: x * y, sizes[1:])

606 607 608 609
    div = paddle.unsqueeze(paddle.multiply(x, x), axis=1)
    if not channel_last:
        pad4d_shape = [0, 0, size // 2, (size - 1) // 2]
        pool2d_shape = (size, 1)
610
        reshape_shape = [
611 612 613 614 615
            sizes[0],
            1,
            sizes[1],
            sizes[2],
            int(sum_sizes / (sizes[1] * sizes[2])),
616
        ]
617 618 619 620 621
        pad5d_shape = [0, 0, 0, 0, size // 2, (size - 1) // 2]
        pool3d_shape = (size, 1, 1)
    else:
        pad4d_shape = [size // 2, (size - 1) // 2, 0, 0]
        pool2d_shape = (1, size)
622
        reshape_shape = [
623 624 625 626 627
            sizes[0],
            1,
            sizes[1],
            int(sum_sizes / (sizes[1] * sizes[-1])),
            sizes[-1],
628
        ]
629 630 631 632 633
        pad5d_shape = [size // 2, (size - 1) // 2, 0, 0, 0, 0]
        pool3d_shape = (1, 1, size)

    if dim == 3:
        div = paddle.nn.functional.pad(div, pad=pad4d_shape)
634 635 636
        div = paddle.nn.functional.avg_pool2d(
            div, kernel_size=pool2d_shape, stride=1
        )
637 638 639
        div = paddle.squeeze(div, axis=1)
    else:
        div = paddle.reshape(div, shape=reshape_shape)
640 641 642 643 644 645
        div = paddle.nn.functional.pad(
            div, pad=pad5d_shape, data_format='NCDHW'
        )
        div = paddle.nn.functional.avg_pool3d(
            div, kernel_size=pool3d_shape, stride=1
        )
646 647 648 649 650 651
        div = paddle.reshape(paddle.squeeze(div, axis=1), sizes)

    div = paddle.scale(div, scale=alpha, bias=k)
    div = paddle.pow(div, beta)
    res = paddle.divide(x, div, name=name)
    return res