norm.py 21.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define normalization api
16 17 18 19
import paddle
import paddle.fluid as fluid
from ...fluid.data_feeder import check_variable_and_dtype, check_type
from ...fluid.layer_helper import LayerHelper
20
from ...framework import create_parameter
21 22
from ..initializer import Constant
from ...framework import ParamAttr
Z
zhiboniu 已提交
23
from ...fluid import dygraph_utils
24
import numbers
W
wanghuancoder 已提交
25
from paddle import _C_ops
Z
zhiboniu 已提交
26
from paddle import in_dynamic_mode
27
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
28

29 30
__all__ = []

31 32

def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
33
    r"""
34 35 36 37
    This op normalizes ``x`` along dimension ``axis`` using :math:`L_p` norm. This layer computes

    .. math::

38
        y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) }
39

40
    .. math::
41
        \lvert \lvert x \rvert \rvert_p = \left( \sum_i {\lvert x_i \rvert^p}  \right)^{1/p}
42

43
    where, :math:`\sum_i{\lvert x_i \rvert^p}` is calculated along the ``axis`` dimension.
44 45


N
Noel 已提交
46
    Parameters:
47 48
        x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
        p (float|int, optional): The exponent value in the norm formulation. Default: 2
49
        axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension.
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
        epsilon (float, optional): Small float added to denominator to avoid dividing by zero. Default is 1e-12.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the output has the same shape and data type with ``x``.

    Examples:

        .. code-block:: python

            import numpy as np
            import paddle
            import paddle.nn.functional as F

            paddle.disable_static()
            x = np.arange(6, dtype=np.float32).reshape(2,3)
66
            x = paddle.to_tensor(x)
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
            y = F.normalize(x)
            print(y.numpy())
            # [[0.         0.4472136  0.8944272 ]
            # [0.42426404 0.5656854  0.7071067 ]]

            y = F.normalize(x, p=1.5)
            print(y.numpy())
            # [[0.         0.40862012 0.81724024]
            # [0.35684016 0.4757869  0.5947336 ]]

            y = F.normalize(x, axis=0)
            print(y.numpy())
            # [[0.         0.24253564 0.37139067]
            # [1.         0.97014254 0.9284767 ]]
    """
82 83 84 85 86 87
    if in_dygraph_mode():
        eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
        out = _C_ops.final_state_p_norm(x, float(p), axis, epsilon, True, False)
        return x / _C_ops.elementwise_max(out, eps)

    if _in_legacy_dygraph():
88
        eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
W
wanghuancoder 已提交
89 90 91
        out = _C_ops.p_norm(x, 'axis', axis, 'porder',
                            float(p), 'keepdim', True, 'epsilon', epsilon)
        return x / _C_ops.elementwise_max(out, eps)
92 93 94

    check_type(p, 'p', (float, int), 'normalize')
    check_type(axis, 'axis', (int), 'normalize')
G
Guoxia Wang 已提交
95 96
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'normalize')
97 98 99 100
    if len(x.shape) == 1 and axis != 0 and axis != -1:
        raise ValueError(
            "Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".
            format(axis))
101 102 103 104 105 106 107 108 109 110 111 112

    attrs = {
        'axis': axis,
        'porder': float(p),
        'keepdim': True,
        'epsilon': epsilon,
    }
    helper = LayerHelper('p_norm', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
    eps = out.block.create_var(dtype=out.dtype)
Z
zhiboniu 已提交
113
    eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype)
114
    return paddle.divide(x, paddle.maximum(out, eps), name=name)
115 116 117 118 119 120 121 122 123 124 125


def batch_norm(x,
               running_mean,
               running_var,
               weight,
               bias,
               training=False,
               momentum=0.9,
               epsilon=1e-05,
               data_format="NCHW",
C
ceci3 已提交
126
               use_global_stats=None,
127 128 129 130
               name=None):
    """
    Applies Batch Normalization as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

C
cnn 已提交
131
    nn.functional.batch_norm is uesd for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm.
132

133 134 135 136 137
    Parameters:
        x(Tesnor): input value. It's data type should be float32, float64.
        running_mean(Tensor): running mean.
        running_var(Tensor): running variance.
        weight(Tensor): The weight tensor of batch_norm, can not be None.
138
        bias(Tensor): The bias tensor of batch_norm can not be None.
139 140 141
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Defalut False.
F
Feiyu Chan 已提交
142
        data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Defalut "NCHW".
C
ceci3 已提交
143
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Returns:
        None

    Examples:
        .. code-block:: python

          import paddle
          import numpy as np

          x = np.random.seed(123)
          x = np.random.random(size=(2, 1, 2, 3)).astype('float32')
          running_mean = np.random.random(size=1).astype('float32')
          running_variance = np.random.random(size=1).astype('float32')
          weight_data = np.random.random(size=1).astype('float32')
          bias_data = np.random.random(size=1).astype('float32')
          x = paddle.to_tensor(x)
          rm = paddle.to_tensor(running_mean)
          rv = paddle.to_tensor(running_variance)
          w = paddle.to_tensor(weight_data)
          b = paddle.to_tensor(bias_data)
          batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b)
Z
zhang wenhui 已提交
167
          print(batch_norm_out)
168 169 170 171 172 173 174
    """
    assert len(x.shape) >= 2, "input dim must be larger than 1"

    # input ad out must share the memory
    mean_out = running_mean
    variance_out = running_var

F
Feiyu Chan 已提交
175
    true_data_format = ['NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC']
176 177
    if data_format not in true_data_format:
        raise ValueError(
F
Feiyu Chan 已提交
178 179
            "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
            "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format))
180

F
Feiyu Chan 已提交
181
    data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'
182

C
ceci3 已提交
183 184 185 186 187 188
    if use_global_stats == None:
        use_global_stats = not training
        trainable_statistics = False
    else:
        trainable_statistics = not use_global_stats

H
hong 已提交
189 190 191 192 193 194
    if in_dygraph_mode():
        batch_norm_out, _, _, _, _, _ = _C_ops.final_state_batch_norm(
            x, weight, bias, running_mean, running_var, momentum, epsilon,
            data_format, not training, use_global_stats, trainable_statistics,
            False)
        return batch_norm_out
H
hong 已提交
195

H
hong 已提交
196 197
    if _in_legacy_dygraph():
        # for dygraph need tuple
198 199 200 201
        attrs = ("momentum", momentum, "epsilon", epsilon, "is_test",
                 not training, "data_layout", data_format, "use_mkldnn", False,
                 "fuse_with_relu", False, "use_global_stats", use_global_stats,
                 "trainable_statistics", trainable_statistics)
H
hong 已提交
202

W
wanghuancoder 已提交
203
        batch_norm_out, _, _, _, _, _ = _C_ops.batch_norm(
204 205
            x, weight, bias, running_mean, running_var, mean_out, variance_out,
            *attrs)
H
hong 已提交
206

207 208 209 210 211 212 213 214 215 216
        return dygraph_utils._append_activation_in_dygraph(
            batch_norm_out, act=None)

    check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
                             'BatchNorm')

    # for static need dict
    attrs = {
        "momentum": momentum,
        "epsilon": epsilon,
217
        "is_test": not training,
218 219 220 221
        "data_layout": data_format,
        "use_mkldnn": False,
        "fuse_with_relu": False,
        "use_global_stats": use_global_stats,
C
ceci3 已提交
222
        "trainable_statistics": trainable_statistics,
223 224 225 226 227 228 229 230 231 232 233 234
    }

    inputs = {
        "X": [x],
        "Scale": [weight],
        "Bias": [bias],
        "Mean": [running_mean],
        "Variance": [running_var]
    }

    helper = LayerHelper('batch_norm', **locals())

235
    param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
236
    saved_mean = helper.create_variable_for_type_inference(
237
        dtype=param_dtype, stop_gradient=True)
238
    saved_variance = helper.create_variable_for_type_inference(
239 240
        dtype=param_dtype, stop_gradient=True)
    batch_norm_out = helper.create_variable_for_type_inference(x.dtype)
241 242 243 244 245 246

    outputs = {
        "Y": [batch_norm_out],
        "MeanOut": [running_mean],
        "VarianceOut": [running_var],
        "SavedMean": [saved_mean],
247
        "SavedVariance": [saved_variance]
248 249
    }

250 251 252 253 254 255
    if training or trainable_statistics:
        # reserve_space is only used for training.
        reserve_space = helper.create_variable_for_type_inference(
            dtype=x.dtype, stop_gradient=True)
        outputs["ReserveSpace"] = [reserve_space]

256 257 258 259 260 261 262 263 264 265 266 267 268 269
    helper.append_op(
        type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)

    return helper.append_activation(batch_norm_out)


def layer_norm(x,
               normalized_shape,
               weight=None,
               bias=None,
               epsilon=1e-05,
               name=None):
    """
    see more detail in paddle.nn.LayerNorm
270

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
    Parameters:
        x(Tensor): Input Tensor. It's data type should be float32, float64.
        normalized_shape(int|list|tuple): Input shape from an expected input of
            size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
            If it is a single integer, this module will normalize over the last dimension
            which is expected to be of that specific size.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero. Default: 1e-05.
        weight(Tensor, optional): The weight tensor of batch_norm. Default: None.
        bias(Tensor, optional): The bias tensor of batch_norm. Default: None.
        name(str, optional): Name for the LayerNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Returns:
        None

    Examples:

        .. code-block:: python

          import paddle
          import numpy as np

          np.random.seed(123)
          x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
295
          x = paddle.to_tensor(x_data)
C
Chen Long 已提交
296
          layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:])
Z
zhang wenhui 已提交
297
          print(layer_norm_out)
298 299 300
    """
    input_shape = list(x.shape)
    input_ndim = len(input_shape)
301 302 303 304 305 306 307 308
    if isinstance(normalized_shape, numbers.Integral):
        normalized_shape = [normalized_shape]
    elif isinstance(normalized_shape, tuple):
        normalized_shape = list(normalized_shape)
    elif not isinstance(normalized_shape, list):
        raise ValueError(
            "`normalized_shape` should be int, list of ints or tuple of ints.")

309 310 311 312 313 314 315 316 317 318
    normalized_ndim = len(normalized_shape)
    begin_norm_axis = input_ndim - normalized_ndim
    if input_ndim < normalized_ndim or input_shape[
            begin_norm_axis:] != normalized_shape:
        str_normalized_shape = str(normalized_shape)
        raise ValueError('Given normalized_shape is ' + str_normalized_shape +
                         ', expected input with shape [*, ' +
                         str_normalized_shape[
                             1:] + ', but got input shape ' + str(input_shape))

Z
zhiboniu 已提交
319
    if in_dynamic_mode():
W
wanghuancoder 已提交
320 321
        pre_act, _, _ = _C_ops.layer_norm(x, weight, bias, 'epsilon', epsilon,
                                          'begin_norm_axis', begin_norm_axis)
322 323
        return dygraph_utils._append_activation_in_dygraph(pre_act, act=None)

F
furnace 已提交
324 325
    check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
                             'LayerNorm')
326 327 328 329 330 331 332 333 334 335 336

    inputs = dict()
    inputs['X'] = [x]
    if weight:
        inputs['Scale'] = [weight]
    if bias:
        inputs['Bias'] = [bias]
    attrs = {"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}

    # create output
    helper = LayerHelper('layer_norm', **locals())
F
furnace 已提交
337 338

    dtype = x.dtype
339
    mean_out = helper.create_variable_for_type_inference(
F
furnace 已提交
340
        dtype=dtype, stop_gradient=True)
341
    variance_out = helper.create_variable_for_type_inference(
F
furnace 已提交
342 343
        dtype=dtype, stop_gradient=True)
    layer_norm_out = helper.create_variable_for_type_inference(dtype)
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

    helper.append_op(
        type="layer_norm",
        inputs=inputs,
        outputs={
            "Y": layer_norm_out,
            "Mean": mean_out,
            "Variance": variance_out,
        },
        attrs={"epsilon": epsilon,
               "begin_norm_axis": begin_norm_axis})

    return helper.append_activation(layer_norm_out)


def instance_norm(x,
                  running_mean=None,
                  running_var=None,
                  weight=None,
                  bias=None,
                  use_input_stats=True,
                  momentum=0.9,
                  eps=1e-05,
                  data_format="NCHW",
                  name=None):
    """
C
cnn 已提交
370
    See more detail in nn.layer.InstanceNorm2D.
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395

    Parameters:
        x(Tensor): Input Tensor. It's data type should be float32, float64.
        running_mean(Tensor): running mean. Default None.
        running_var(Tensor): running variance. Default None.
        weight(Tensor, optional): The weight tensor of instance_norm. Default: None.
        bias(Tensor, optional): The bias tensor of instance_norm. Default: None.
        eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        use_input_stats(bool): Default True.
        data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW".
        name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Returns:
        None.

    Examples:

        .. code-block:: python

          import paddle
          import numpy as np

          np.random.seed(123)
          x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
396
          x = paddle.to_tensor(x_data)
C
Chen Long 已提交
397
          instance_norm_out = paddle.nn.functional.instance_norm(x)
398

Z
zhang wenhui 已提交
399
          print(instance_norm_out)
400 401 402

    """

Z
zhiboniu 已提交
403
    if in_dynamic_mode():
W
wanghuancoder 已提交
404 405 406
        out, _, _ = _C_ops.instance_norm(x, weight, bias, "epsilon", eps,
                                         "momentum", momentum, "data_format",
                                         data_format)
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
        return out

    check_variable_and_dtype(x, 'input', ['float32', 'float64'], "InstanceNorm")

    attrs = {"epsilon": eps, "momentum": momentum, "data_format": data_format}

    if weight and bias:
        inputs = {"X": [x], "Scale": [weight], "Bias": [bias]}
    else:
        inputs = {"X": [x]}

    helper = LayerHelper('instance_norm', **locals())
    saved_mean = helper.create_variable_for_type_inference(
        dtype=x.dtype, stop_gradient=True)
    saved_variance = helper.create_variable_for_type_inference(
        dtype=x.dtype, stop_gradient=True)
    instance_norm_out = helper.create_variable_for_type_inference(x.dtype)

    outputs = {
        "Y": [instance_norm_out],
        "SavedMean": [saved_mean],
        "SavedVariance": [saved_variance]
    }

    helper.append_op(
        type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs)
    return instance_norm_out
434 435 436 437 438 439 440 441 442


def local_response_norm(x,
                        size,
                        alpha=1e-4,
                        beta=0.75,
                        k=1.,
                        data_format="NCHW",
                        name=None):
443
    r"""
444 445 446 447 448 449 450
        Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions.
        For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_

        The formula is as follows:

        .. math::

451
            Output(i, x, y) = Input(i, x, y) / \left(k + \alpha \sum\limits^{\min(C-1, i + size/2)}_{j = \max(0, i - size/2)}(Input(j, x, y))^2\right)^{\beta}
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491

        In the above equation:

        - :math:`size` : The number of channels to sum over.
        - :math:`k` : The offset (avoid being divided by 0).
        - :math:`\\alpha` : The scaling parameter.
        - :math:`\\beta` : The exponent parameter.


        Args:
            x (Tensor): The input 3-D/4-D/5-D tensor. The data type is float32.
            size (int): The number of channels to sum over.
            alpha (float, optional): The scaling parameter, positive. Default:1e-4
            beta (float, optional): The exponent, positive. Default:0.75
            k (float, optional): An offset, positive. Default: 1.0
            data_format (str, optional): Specify the data format of the input, and the data format of the output
                will be consistent with that of the input. An optional string from:
                If x is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`,
                the data is stored in the order of: `[batch_size, input_channels, feature_length]`.
                If x is 4-D Tensor, the string could be  `"NCHW"`, `"NHWC"`. When it is `"NCHW"`,
                the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`.
                If x is 5-D Tensor, the string could be  `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`,
                the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
            name (str, optional): Name for the operation (optional, default is None). For more information,
                please refer to :ref:`api_guide_Name`.

        Returns:
            A tensor storing the transformation result with the same shape and data type as input.


        Examples:

        .. code-block:: python

            import paddle

            x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32")
            y = paddle.nn.functional.local_response_norm(x, size=5)
            print(y.shape)  # [3, 3, 112, 112]
        """
Z
zhiboniu 已提交
492
    if not in_dynamic_mode():
493 494 495 496 497 498 499 500 501 502 503 504 505
        check_variable_and_dtype(x, 'x', ['float32'], 'local_response_norm')
    if data_format not in ['NCL', 'NLC', 'NCHW', 'NHWC', 'NCDHW', 'NDHWC']:
        raise ValueError(
            "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \
            "but got {}".format(data_format))

    sizes = x.shape
    dim = len(sizes)
    if dim < 3:
        raise ValueError(
            'Expected 3D or higher dimensionality input, but got {} dimensions'.
            format(dim))

H
huangjun12 已提交
506
    for i, sz in enumerate(sizes):
H
huangjun12 已提交
507
        if not sz > 0 and i > 0:
H
huangjun12 已提交
508 509
            raise ValueError("Expected every dim's size to be larger than 0, "
                             "but the size of the {}-th dim is {}".format(i,
H
huangjun12 已提交
510
                                                                          sz))
H
huangjun12 已提交
511

512 513
    channel_last = True if data_format[-1] == "C" else False

514 515 516
    from functools import reduce
    sum_sizes = reduce(lambda x, y: x * y, sizes[1:])

517 518 519 520
    div = paddle.unsqueeze(paddle.multiply(x, x), axis=1)
    if not channel_last:
        pad4d_shape = [0, 0, size // 2, (size - 1) // 2]
        pool2d_shape = (size, 1)
521 522 523 524
        reshape_shape = [
            sizes[0], 1, sizes[1], sizes[2],
            int(sum_sizes / (sizes[1] * sizes[2]))
        ]
525 526 527 528 529
        pad5d_shape = [0, 0, 0, 0, size // 2, (size - 1) // 2]
        pool3d_shape = (size, 1, 1)
    else:
        pad4d_shape = [size // 2, (size - 1) // 2, 0, 0]
        pool2d_shape = (1, size)
530 531 532 533
        reshape_shape = [
            sizes[0], 1, sizes[1], int(sum_sizes / (sizes[1] * sizes[-1])),
            sizes[-1]
        ]
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
        pad5d_shape = [size // 2, (size - 1) // 2, 0, 0, 0, 0]
        pool3d_shape = (1, 1, size)

    if dim == 3:
        div = paddle.nn.functional.pad(div, pad=pad4d_shape)
        div = paddle.nn.functional.avg_pool2d(
            div, kernel_size=pool2d_shape, stride=1)
        div = paddle.squeeze(div, axis=1)
    else:
        div = paddle.reshape(div, shape=reshape_shape)
        div = paddle.nn.functional.pad(div,
                                       pad=pad5d_shape,
                                       data_format='NCDHW')
        div = paddle.nn.functional.avg_pool3d(
            div, kernel_size=pool3d_shape, stride=1)
        div = paddle.reshape(paddle.squeeze(div, axis=1), sizes)

    div = paddle.scale(div, scale=alpha, bias=k)
    div = paddle.pow(div, beta)
    res = paddle.divide(x, div, name=name)
    return res