pooling.py 86.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# TODO: define pooling functions
16 17
from ...fluid.layers import utils, LayerHelper
from ...tensor.manipulation import unsqueeze, squeeze
18
from ...fluid.data_feeder import check_type, check_variable_and_dtype
W
wanghuancoder 已提交
19
from paddle import _C_ops
Z
zhiboniu 已提交
20
from paddle import in_dynamic_mode
F
From00 已提交
21 22
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.framework import in_dygraph_mode
23

24 25
__all__ = []

26

27 28 29 30 31
def _is_list_or_tuple(input):
    return isinstance(input, (list, tuple))


def _check_input(x, dimension):
32
    if len(x.shape) != dimension:
33 34 35
        raise ValueError(
            "Excepted Input X is {}-D tensor, but received {}-D {}".format(
                dimension, len(x.shape), type(x)))
36 37


38
def _check_instance(x, x_name, types=(int, float)):
39 40

    if not isinstance(x, types):
41 42 43
        raise ValueError(
            "Excepted {} type for {} but received type: {}. ".format(
                types, x_name, type(x)))
44 45


D
Double_V 已提交
46
def _check_value_limitation(x, x_name, min_limit=1e-3):
47

D
Double_V 已提交
48 49 50
    def _check_value(x, x_name, min_limit=1e-3):
        if isinstance(x, int) and min_limit is not None and x < min_limit:
            raise ValueError(
51 52
                "Excepted the input {} to be greater than {} but received x: {}. "
                .format(x_name, min_limit, x))
D
Double_V 已提交
53 54 55 56 57

    for ele in x:
        _check_value(ele, x_name)


58 59 60
def _zero_padding_in_batch_and_channel(padding, channel_last):
    if channel_last:
        return list(padding[0]) == [0, 0] and list(padding[-1]) == [0, 0]
61
    else:
62
        return list(padding[0]) == [0, 0] and list(padding[1]) == [0, 0]
63 64


65 66 67 68
def _exclude_padding_in_batch_and_channel(padding, channel_last):
    padding_ = padding[1:-1] if channel_last else padding[2:]
    padding_ = [elem for pad_a_dim in padding_ for elem in pad_a_dim]
    return padding_
69 70


71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
def _channel_last(data_format, num_dims):
    if num_dims == 1:
        if data_format not in ['NCL', 'NLC']:
            raise ValueError(
                "Attr(data_format) should be 'NCL' or 'NLC'. Received "
                "Attr(data_format): %s" % str(data_format))
        else:
            return True if data_format == "NLC" else False
    if num_dims == 2:
        if data_format not in ['NCHW', 'NHWC']:
            raise ValueError(
                "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
                "Attr(data_format): %s" % str(data_format))
        else:
            return True if data_format == "NHWC" else False
    if num_dims == 3:
        if data_format not in ['NCDHW', 'NDHWC']:
            raise ValueError(
                "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
                "Attr(data_format): %s" % str(data_format))
        else:
            return True if data_format == "NDHWC" else False
93 94


95 96 97 98 99 100 101 102 103
def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False):
    if isinstance(padding, str):
        padding = padding.upper()
        if padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".
                format(padding))
        if padding == "VALID":
            if ceil_mode != False:
104
                raise ValueError(
105 106 107 108 109 110 111 112 113 114 115 116 117 118
                    "When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. "
                    "Received ceil_mode: True.")

            padding_algorithm = "VALID"
            padding = [0] * num_dims
        else:
            padding_algorithm = "SAME"
            padding = [0] * num_dims
    elif _is_list_or_tuple(padding):
        # for padding like
        # [(pad_before, pad_after), (pad_before, pad_after), ...]
        # padding for batch_dim and channel_dim included
        if len(padding) == 2 + num_dims and _is_list_or_tuple(padding[0]):
            if not _zero_padding_in_batch_and_channel(padding, channel_last):
119
                raise ValueError(
120 121 122
                    "Non-zero padding({}) in the batch or channel dimensions "
                    "is not supported.".format(padding))
            padding_algorithm = "EXPLICIT"
123 124
            padding = _exclude_padding_in_batch_and_channel(
                padding, channel_last)
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
            if utils._is_symmetric_padding(padding, num_dims):
                padding = padding[0::2]
        # for padding like [pad_before, pad_after, pad_before, pad_after, ...]
        elif len(padding) == 2 * num_dims and isinstance(padding[0], int):
            padding_algorithm = "EXPLICIT"
            padding = utils.convert_to_list(padding, 2 * num_dims, 'padding')
            if utils._is_symmetric_padding(padding, num_dims):
                padding = padding[0::2]
        # for padding like [pad_d1, pad_d2, ...]
        elif len(padding) == num_dims and isinstance(padding[0], int):
            padding_algorithm = "EXPLICIT"
            padding = utils.convert_to_list(padding, num_dims, 'padding')
        else:
            raise ValueError("Invalid padding: {}".format(padding))
    # for integer padding
140
    else:
141 142 143 144
        padding_algorithm = "EXPLICIT"
        padding = utils.convert_to_list(padding, num_dims, 'padding')
    return padding, padding_algorithm

145

146 147 148 149 150 151 152 153
def _expand_low_nd_padding(padding):
    #1d to 2d fake input
    if len(padding) == 2:
        padding = [0] * 2 + padding
    elif len(padding) == 1:
        padding = [0] + padding
    else:
        raise ValueError(
154 155
            "The size of padding's dimmention should be 1 or 2. But got padding={}"
            .format(padding))
156 157 158 159 160 161 162
    return padding


def avg_pool1d(x,
               kernel_size,
               stride=None,
               padding=0,
163
               exclusive=True,
164 165
               ceil_mode=False,
               name=None):
D
Double_V 已提交
166
    """
167 168
    This API implements average pooling 1d operation,
    See more details in :ref:`api_nn_pooling_AvgPool1d` .
169 170 171 172

    Args:
        x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
                          shape [N, C, L]. where `N` is batch size, `C` is the number of channels,
173
                          `L` is the length of the feature. The data type is float32 or float64.
174
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
175
            it must contain an integer.
176
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
177 178 179 180 181 182 183 184
            it must contain an integer.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
            4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
185
        exclusive (bool): Whether to exclude padding points in average pooling
186
                          mode, default is `True`.
187
        ceil_mode (bool): ${ceil_mode_comment}Whether to use the ceil function to calculate output height and width.
188
            If it is set to False, the floor function will be used. The default value is False.
189 190 191 192 193 194 195 196
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.

    Examples:
        .. code-block:: python
C
Chen Long 已提交
197 198
          
            import paddle
199
            import paddle.nn as nn
C
Chen Long 已提交
200

201 202 203 204
            data = paddle.uniform([1, 3, 32], paddle.float32)
            AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
            pool_out = AvgPool1D(data)
            # pool_out shape: [1, 3, 16]
205 206 207
    """
    """NCL to NCHW"""
    data_format = "NCHW"
Z
zhiboniu 已提交
208
    if not in_dynamic_mode():
209
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool1d')
210
    _check_input(x, 3)
211
    x = unsqueeze(x, [2])
212
    kernel_size = utils.convert_to_list(kernel_size, 1, 'kernel_size')
213 214 215 216 217 218 219
    kernel_size = [1] + kernel_size
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 1, 'pool_stride')
        stride = [1] + stride

D
Double_V 已提交
220 221 222
    _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
    _check_value_limitation(stride, "stride", min_limit=1e-3)

223
    channel_last = _channel_last("NCL", 1)
224 225 226 227
    padding, padding_algorithm = _update_padding_nd(padding,
                                                    1,
                                                    channel_last=channel_last,
                                                    ceil_mode=ceil_mode)
228

229 230
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)
231

Z
zhiboniu 已提交
232
    if in_dynamic_mode():
233 234 235 236 237 238 239
        output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', kernel_size,
                               'global_pooling', False, 'strides', stride,
                               'paddings', padding, 'padding_algorithm',
                               padding_algorithm, 'use_cudnn', True,
                               'ceil_mode', ceil_mode, 'use_mkldnn', False,
                               'exclusive', exclusive, 'data_format',
                               data_format)
240 241 242 243
        return squeeze(output, [2])

    op_type = 'pool2d'
    helper = LayerHelper(op_type, **locals())
244
    dtype = helper.input_dtype(input_param_name='x')
245 246
    pool_out = helper.create_variable_for_type_inference(dtype)

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
    helper.append_op(type=op_type,
                     inputs={"X": x},
                     outputs={"Out": pool_out},
                     attrs={
                         "pooling_type": 'avg',
                         "ksize": kernel_size,
                         "global_pooling": False,
                         "strides": stride,
                         "paddings": padding,
                         "padding_algorithm": padding_algorithm,
                         "use_cudnn": True,
                         "ceil_mode": ceil_mode,
                         "use_mkldnn": False,
                         "exclusive": exclusive,
                         "data_format": data_format,
                     })
263 264 265 266

    return squeeze(pool_out, [2])


267
def avg_pool2d(x,
268 269 270 271
               kernel_size,
               stride=None,
               padding=0,
               ceil_mode=False,
272
               exclusive=True,
273 274
               divisor_override=None,
               data_format="NCHW",
275 276
               name=None):
    """
277 278
    This API implements average pooling 2d operation.
    See more details in :ref:`api_nn_pooling_AvgPool2d` .
D
Double_V 已提交
279

280
    Args:
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
        x (Tensor): The input tensor of pooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
                          `"NHWC"`, where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The pool kernel size. If it is a tuple or list,
            it must contain two integers, (kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be a square of an int.
        stride (int|list|tuple): The stride size. If it is a tuple or list,
            it must contain two integers, (stride_Height, stride_Width).
            Otherwise, the stride size will be a square of an int.

        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 2, [pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 4. [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): when True, will use `ceil` instead of `floor` to compute the output shape
301
        exclusive (bool): Whether to exclude padding points in average pooling
302 303 304 305 306
                          mode, default is `true`.
        divisor_override (float): if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
        data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
                        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_height, input_width]`.
307 308 309
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
C
Chen Long 已提交
310
    
311 312
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
C
Chen Long 已提交
313
    
314 315
    Examples:
        .. code-block:: python
C
Chen Long 已提交
316 317 318 319 320
          
            import paddle
            import paddle.nn.functional as F
            
            # avg pool2d
321
            x = paddle.uniform([1, 3, 32, 32], paddle.float32)
C
Chen Long 已提交
322 323 324 325
            out = F.avg_pool2d(x,
                            kernel_size=2,
                            stride=2, padding=0)
            # out.shape [1, 3, 16, 16]
326
    """
327
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
328 329 330
    if stride is None:
        stride = kernel_size
    else:
331
        stride = utils.convert_to_list(stride, 2, 'pool_stride')
332

D
Double_V 已提交
333 334 335
    _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
    _check_value_limitation(stride, "stride", min_limit=1e-3)

336
    channel_last = _channel_last(data_format, 2)
337 338 339 340
    padding, padding_algorithm = _update_padding_nd(padding,
                                                    2,
                                                    channel_last,
                                                    ceil_mode=ceil_mode)
341

F
From00 已提交
342 343
    if in_dygraph_mode() or _in_legacy_dygraph():
        if in_dygraph_mode():
344 345 346 347
            output = _C_ops.final_state_pool2d(x, kernel_size, stride, padding,
                                               ceil_mode, exclusive,
                                               data_format, 'avg', False, False,
                                               padding_algorithm)
F
From00 已提交
348
        else:
349 350 351 352 353 354 355
            output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize',
                                   kernel_size, 'global_pooling', False,
                                   'padding_algorithm', padding_algorithm,
                                   'strides', stride, 'paddings', padding,
                                   'use_cudnn', True, 'ceil_mode', ceil_mode,
                                   'use_mkldnn', False, 'exclusive', exclusive,
                                   'data_format', data_format)
356 357 358 359 360
        if divisor_override is None:
            return output
        else:
            _check_instance(divisor_override, "divisor_override")
            return output * (kernel_size[0] * kernel_size[1]) / divisor_override
361

362
    op_type = 'pool2d'
363
    helper = LayerHelper(op_type, **locals())
364
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
365
    dtype = helper.input_dtype(input_param_name='x')
366 367
    pool_out = helper.create_variable_for_type_inference(dtype)

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
    helper.append_op(type=op_type,
                     inputs={"X": x},
                     outputs={"Out": pool_out},
                     attrs={
                         "pooling_type": "avg",
                         "ksize": kernel_size,
                         "global_pooling": False,
                         "strides": stride,
                         "paddings": padding,
                         "padding_algorithm": padding_algorithm,
                         "use_cudnn": True,
                         "ceil_mode": ceil_mode,
                         "use_mkldnn": False,
                         "exclusive": exclusive,
                         "data_format": data_format,
                     })
384

385 386 387 388 389
    if divisor_override is None:
        return pool_out
    else:
        _check_instance(divisor_override, "divisor_override")
        return pool_out * (kernel_size[0] * kernel_size[1]) / divisor_override
390 391


392 393 394 395 396
def avg_pool3d(x,
               kernel_size,
               stride=None,
               padding=0,
               ceil_mode=False,
397
               exclusive=True,
398 399 400
               divisor_override=None,
               data_format="NCDHW",
               name=None):
401
    """
402 403
    This API implements average pooling 3d operation.
    See more details in :ref:`api_nn_pooling_AvgPool3d` .
404 405

    Args:
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
        x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
                          shape [N, C, D, H, W], where `N` represents the batch size, `C` represents
                          the number of channels, `D`, `H` and `W` represent the depth, height and width of the feature respectively.
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size
            is a tuple or list, it must contain three integers,
            (kernel_size_Depth, kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be the cube of an int.
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain three integers, [stride_Depth, stride_Height, stride_Width).
            Otherwise, the pool stride size will be a cube of an int.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): ${ceil_mode_comment}
424
        exclusive (bool): Whether to exclude padding points in average pooling
425 426 427 428 429
                          mode, default is True.
        divisor_override (int|float) if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
        data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
430
        name(str, optional): For detailed information, please refer
431 432
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
C
Chen Long 已提交
433
    
434
    Returns:
435
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
436

437 438
    Examples:
        .. code-block:: python
C
Chen Long 已提交
439
          
440
          import paddle
C
Chen Long 已提交
441

442
          x = paddle.uniform([1, 3, 32, 32, 32], paddle.float32)
443 444 445 446 447 448 449
          # avg pool3d
          out = paddle.nn.functional.avg_pool3d(
                                            x,
                                            kernel_size = 2,
                                            stride = 2,
                                            padding=0)
          # out.shape: [1, 3, 16, 16, 16]
450
    """
451 452 453 454 455
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')
456

457
    channel_last = _channel_last(data_format, 3)
458 459 460 461
    padding, padding_algorithm = _update_padding_nd(padding,
                                                    3,
                                                    channel_last=channel_last,
                                                    ceil_mode=ceil_mode)
462

D
Double_V 已提交
463 464 465
    _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
    _check_value_limitation(stride, "stride", min_limit=1e-3)

F
From00 已提交
466 467
    if in_dygraph_mode() or _in_legacy_dygraph():
        if in_dygraph_mode():
468 469 470 471
            output = _C_ops.final_state_pool3d(x, kernel_size, stride, padding,
                                               ceil_mode, exclusive,
                                               data_format, 'avg', False, False,
                                               padding_algorithm)
F
From00 已提交
472
        if _in_legacy_dygraph():
473 474 475 476 477 478 479
            output = _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize',
                                   kernel_size, 'strides', stride, 'paddings',
                                   padding, 'global_pooling', False,
                                   'padding_algorithm', padding_algorithm,
                                   'use_cudnn', True, 'ceil_mode', ceil_mode,
                                   'use_mkldnn', False, 'exclusive', exclusive,
                                   'data_format', data_format)
480 481 482 483 484 485
        if divisor_override is None:
            return output
        else:
            _check_instance(divisor_override, "divisor_override")
            return output * (kernel_size[0] * kernel_size[1] *
                             kernel_size[2]) / divisor_override
486

487 488
    op_type = "pool3d"
    helper = LayerHelper(op_type, **locals())
489
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
490
    dtype = helper.input_dtype(input_param_name='x')
491 492
    pool_out = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out}
493

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
    helper.append_op(type=op_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": 'avg',
                         "ksize": kernel_size,
                         "global_pooling": False,
                         "strides": stride,
                         "paddings": padding,
                         "padding_algorithm": padding_algorithm,
                         "use_cudnn": True,
                         "ceil_mode": ceil_mode,
                         "use_mkldnn": False,
                         "exclusive": exclusive,
                         "data_format": data_format,
                     })
510

511 512 513 514 515 516
    if divisor_override is None:
        return pool_out
    else:
        _check_instance(divisor_override, "divisor_override")
        return pool_out * (kernel_size[0] * kernel_size[1] *
                           kernel_size[2]) / divisor_override
517 518


519
def max_pool1d(x,
520 521 522
               kernel_size,
               stride=None,
               padding=0,
523
               return_mask=False,
524 525 526
               ceil_mode=False,
               name=None):
    """
527 528
    This API implements max pooling 1d opereation.
    See more details in :ref:`api_nn_pooling_MaxPool1d` .
529 530

    Args:
531 532 533
        x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
                          shape [N, C, L], where `N` is batch size, `C` is the number of channels,
                          `L` is the length of the feature. The data type if float32 or float64.
534
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
535
            it must contain an integer.
536
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
537 538 539 540 541 542 543 544
            it must contain an integer.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An integer, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
            4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
545
        return_mask (bool): Whether return the max indices along with the outputs. default is `False`.
546 547
        ceil_mode (bool): Whether to use the ceil function to calculate output height and width. False is the default.
            If it is set to False, the floor function will be used. Default False.
548 549 550 551 552
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
553

554 555 556
    Raises:
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
557
        ShapeError: If the input is not a 3-D tensor.
558
        ShapeError: If the output's shape calculated is not greater than 0.
559

560 561
    Examples:
        .. code-block:: python
562

563 564
          import paddle
          import paddle.nn.functional as F
C
Chen Long 已提交
565

566
          data = paddle.uniform([1, 3, 32], paddle.float32)
567 568
          pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0)
          # pool_out shape: [1, 3, 16]
569
          pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
570
          # pool_out shape: [1, 3, 16],  indices shape: [1, 3, 16]
571
    """
572 573
    """NCL to NCHW"""
    data_format = "NCHW"
Z
zhiboniu 已提交
574
    if not in_dynamic_mode():
575
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool1d')
576 577 578
    _check_input(x, 3)
    x = unsqueeze(x, [2])
    kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
579 580 581
    if stride is None:
        stride = kernel_size
    else:
582
        stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride')
583

584 585 586
    padding, padding_algorithm = _update_padding_nd(padding,
                                                    1,
                                                    ceil_mode=ceil_mode)
587

588 589
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)
590

F
From00 已提交
591 592 593 594 595
    if in_dygraph_mode():
        if return_mask:
            pool_out = _C_ops.final_state_max_pool2d_with_index(
                x, kernel_size, stride, padding, False, False)
            return (squeeze(pool_out[0], [2]),
596 597
                    squeeze(pool_out[1], [2])) if return_mask else squeeze(
                        pool_out[0], [2])
F
From00 已提交
598
        else:
599 600 601 602
            pool_out = _C_ops.final_state_pool2d(x, kernel_size, stride,
                                                 padding, ceil_mode, True,
                                                 data_format, 'max', False,
                                                 False, padding_algorithm)
F
From00 已提交
603 604 605
            return squeeze(pool_out, [2])

    if _in_legacy_dygraph():
606
        if return_mask:
W
wanghuancoder 已提交
607
            pool_out = _C_ops.max_pool2d_with_index(
D
Double_V 已提交
608 609 610 611 612
                x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
                stride, 'paddings', padding, 'padding_algorithm',
                padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
                'use_mkldnn', False, 'exclusive', True, 'data_format',
                data_format)
613
            return (squeeze(pool_out[0], [2]),
614 615
                    squeeze(pool_out[1], [2])) if return_mask else squeeze(
                        pool_out[0], [2])
D
Double_V 已提交
616
        else:
617 618 619 620 621 622 623
            pool_out = _C_ops.pool2d(x, 'pooling_type', 'max', 'ksize',
                                     kernel_size, 'global_pooling', False,
                                     'padding_algorithm', padding_algorithm,
                                     'strides', stride, 'paddings', padding,
                                     'use_cudnn', True, 'ceil_mode', ceil_mode,
                                     'use_mkldnn', False, 'exclusive', True,
                                     'data_format', data_format)
D
Double_V 已提交
624 625
            return squeeze(pool_out, [2])

626
    op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
627
    helper = LayerHelper(op_type, **locals())
628
    dtype = helper.input_dtype(input_param_name='x')
629
    pool_out = helper.create_variable_for_type_inference(dtype)
630
    mask = helper.create_variable_for_type_inference('int32')
631 632
    outputs = {"Out": pool_out, "Mask": mask}

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
    helper.append_op(type=op_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": 'max',
                         "ksize": kernel_size,
                         "global_pooling": False,
                         "strides": stride,
                         "paddings": padding,
                         "padding_algorithm": padding_algorithm,
                         "use_cudnn": True,
                         "ceil_mode": ceil_mode,
                         "use_mkldnn": False,
                         "exclusive": True,
                         "data_format": data_format,
                     })
649

650
    return (squeeze(pool_out, [2]),
651
            squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2])
652 653


654 655 656 657
def _unpool_output_size(x, kernel_size, stride, padding, output_size):
    input_size = x.shape
    default_size = []
    for d in range(len(kernel_size)):
658 659
        default_size.append((input_size[-len(kernel_size) + d] - 1) *
                            stride[d] + kernel_size[d] - 2 * padding[d])
660 661 662 663 664 665 666 667 668
    if output_size is None:
        ret = default_size
    else:
        if len(output_size) == len(kernel_size) + 2:
            output_size = output_size[2:]
        if len(output_size) != len(kernel_size):
            raise ValueError(
                "output_size should be a sequence containing "
                "{} or {} elements, but it has a length of '{}'".format(
669 670
                    len(kernel_size),
                    len(kernel_size) + 2, len(output_size)))
671 672 673 674 675
        for d in range(len(kernel_size)):
            min_size = default_size[d] - stride[d]
            max_size = default_size[d] + stride[d]
            if not (min_size < output_size[d] < max_size):
                raise ValueError(
676 677
                    'invalid output_size "{}" (dim {} must be between {} and {})'
                    .format(output_size, d, min_size, max_size))
678 679 680 681 682

        ret = output_size
    return ret


683 684 685 686 687 688 689 690
def max_unpool1d(x,
                 indices,
                 kernel_size,
                 stride=None,
                 padding=0,
                 data_format="NCL",
                 output_size=None,
                 name=None):
691
    r"""
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
    This API implements max unpooling 1d opereation.
    `max_unpool1d` accepts the output of `max_pool1d` as input, 
    including the indices of the maximum value and calculate the partial inverse. 
    All non-maximum values ​​are set to zero.

    - Input: :math:`(N, C, L_{in})`
    - Output: :math:`(N, C, L_{out})`, where
    
    .. math::
        L_{out} = (L_{in} - 1) * stride - 2 * padding + kernel\_size

    or as given by :attr:`output_size` in the call operator.


    Args:
        x (Tensor): The input tensor of unpooling operator which is a 3-D tensor with
                          shape [N, C, L]. The format of input tensor is `"NCL"`, 
                          where `N` is batch size, `C` is the number of channels, `L` is
                          the length of the feature. The data type is float32 or float64.
        indices (Tensor): The indices given out by maxpooling1d which is a 3-D tensor with
                          shape [N, C, L]. The format of input tensor is `"NCL"` , 
                          where `N` is batch size, `C` is the number of channels, `L` is
                          the length of the featuree. The data type is float32 or float64.
        kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
            it must contain an integer.
        stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
            it must contain an integer.
        padding (int | tuple): Padding that was added to the input.
        output_size(list|tuple, optional): The target output size. If output_size is not specified, 
                           the actual output shape will be automatically calculated by (input_shape,
                           kernel_size, stride, padding).
        data_format (string): The data format of the input and output data.
                        The default is `"NCL"`. When it is `"NCL"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_length]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.

    Returns:
        Tensor: The output tensor of unpooling result. 

    Examples:
        .. code-block:: python
        
            import paddle
            import paddle.nn.functional as F

            data = paddle.rand(shape=[1, 3, 16])
            pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
            # pool_out shape: [1, 3, 8],  indices shape: [1, 3, 8]
            unpool_out = F.max_unpool1d(pool_out, indices, kernel_size=2, padding=0)
            # unpool_out shape: [1, 3, 16]

    """
    """NCL to NCHW"""
    if data_format not in ["NCL"]:
        raise ValueError("Attr(data_format) should be 'NCL'. Received "
                         "Attr(data_format): %s." % str(data_format))
    data_format = "NCHW"
    x = unsqueeze(x, [2])
    indices = unsqueeze(indices, [2])
    kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride')
    padding, padding_algorithm = _update_padding_nd(padding, 1)
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)

    output_size = _unpool_output_size(x, kernel_size, stride, padding,
                                      output_size)

X
xiaoting 已提交
765 766 767 768 769
    if in_dygraph_mode():
        output = _C_ops.final_state_unpool(x, indices, kernel_size, stride,
                                           padding, output_size, data_format)
        return squeeze(output, [2])
    elif in_dynamic_mode():
770 771 772 773 774 775 776 777 778 779 780
        output = _C_ops.unpool(x, indices, 'unpooling_type', 'max', 'ksize',
                               kernel_size, 'strides', stride, 'paddings',
                               padding, "output_size", output_size,
                               "data_format", data_format)
        return squeeze(output, [2])

    op_type = "unpool"
    helper = LayerHelper(op_type, **locals())
    dtype = helper.input_dtype(input_param_name="x")
    unpool_out = helper.create_variable_for_type_inference(dtype)

781 782 783 784 785 786 787 788 789 790 791 792 793
    helper.append_op(type=op_type,
                     inputs={
                         "X": x,
                         "Indices": indices
                     },
                     outputs={"Out": unpool_out},
                     attrs={
                         "unpooling_type": "max",
                         "ksize": kernel_size,
                         "strides": stride,
                         "paddings": padding,
                         "output_size": output_size
                     })
794 795 796
    return squeeze(unpool_out, [2])


797 798 799 800 801 802 803 804
def max_unpool2d(x,
                 indices,
                 kernel_size,
                 stride=None,
                 padding=0,
                 data_format="NCHW",
                 output_size=None,
                 name=None):
805
    r"""
806
    This API implements max unpooling 2d opereation.
807
    See more details in :ref:`api_nn_pooling_MaxUnPool2D` .
808

809 810

    Args:
811 812 813
        x (Tensor): The input tensor of unpooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"`, 
                          where `N` is batch size, `C` is the number of channels,
814 815
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
816 817 818 819 820 821 822 823 824 825 826 827 828
        indices (Tensor): The indices given out by maxpooling2d which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` , 
                          where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
            it must contain an integer.
        stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
            it must contain an integer.
        padding (int | tuple): Padding that was added to the input.
        output_size(list|tuple, optional): The target output size. If output_size is not specified, 
                           the actual output shape will be automatically calculated by (input_shape,
                           kernel_size, padding).
829 830 831
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
832

833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855

        - Input: :math:`(N, C, H_{in}, W_{in})`
        - Output: :math:`(N, C, H_{out}, W_{out})`, where

          .. math::
            H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}

          .. math::
            W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}

          or as given by :attr:`output_size` in the call operator

        Returns:
            Tensor: The output tensor of unpooling result. 

        Raises:
            ValueError: If the input is not a 4-D tensor.
            ValueError: If indeces shape is not equal input shape.
            

        Examples:
            .. code-block:: python
          
C
Chen Long 已提交
856 857
            import paddle
            import paddle.nn.functional as F
858

859
            data = paddle.rand(shape=[1,1,6,6])
860 861 862 863 864 865 866 867 868
            pool_out, indices = F.max_pool2d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
            # pool_out shape: [1, 1, 3, 3],  indices shape: [1, 1, 3, 3]
            unpool_out = F.max_unpool2d(pool_out, indices, kernel_size=2, padding=0)
            # unpool_out shape: [1, 1, 6, 6]

            # specify a different output size than input size 
            unpool_out = F.max_unpool2d(pool_out, indices, kernel_size=2, padding=0, output_size=[7,7])
            # unpool_out shape: [1, 1, 7, 7] 

869 870
    """
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
871 872 873 874 875 876 877 878 879 880 881 882 883
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 2, 'pool_stride')
    padding = utils.convert_to_list(padding, 2, 'padding')

    if data_format not in ["NCHW"]:
        raise ValueError("Attr(data_format) should be 'NCHW'. Received "
                         "Attr(data_format): %s." % str(data_format))

    output_size = _unpool_output_size(x, kernel_size, stride, padding,
                                      output_size)

X
xiaoting 已提交
884 885 886 887 888
    if in_dygraph_mode():
        output = _C_ops.final_state_unpool(x, indices, kernel_size, stride,
                                           padding, output_size, data_format)

    elif in_dynamic_mode():
889 890 891 892 893 894 895 896 897 898 899
        output = _C_ops.unpool(x, indices, 'unpooling_type', 'max', 'ksize',
                               kernel_size, 'strides', stride, 'paddings',
                               padding, "output_size", output_size,
                               "data_format", data_format)
        return output

    op_type = "unpool"
    helper = LayerHelper(op_type, **locals())
    dtype = helper.input_dtype(input_param_name="x")
    unpool_out = helper.create_variable_for_type_inference(dtype)

900 901 902 903 904 905 906 907 908 909 910 911 912
    helper.append_op(type=op_type,
                     inputs={
                         "X": x,
                         "Indices": indices
                     },
                     outputs={"Out": unpool_out},
                     attrs={
                         "unpooling_type": "max",
                         "ksize": kernel_size,
                         "strides": stride,
                         "paddings": padding,
                         "output_size": output_size
                     })
913 914 915
    return unpool_out


916 917 918 919 920 921 922 923
def max_unpool3d(x,
                 indices,
                 kernel_size,
                 stride=None,
                 padding=0,
                 data_format="NCDHW",
                 output_size=None,
                 name=None):
924
    r"""
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
    This API implements max unpooling 3d opereation.
    `max_unpool3d` accepts the output of `max_pool3d` as input, 
    including the indices of the maximum value and calculate the partial inverse. 
    All non-maximum values ​​are set to zero.

    - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
    - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
    
    .. math::
        D_{out} = (D_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0]

    .. math::
        H_{out} = (H_{in} - 1) * stride[1] - 2 * padding[1] + kernel\_size[1]

    .. math::
        W_{out} = (W_{in} - 1) * stride[2] - 2 * padding[2] + kernel\_size[2]

    or as given by :attr:`output_size` in the call operator


    Args:
        x (Tensor): The input tensor of unpooling operator which is a 5-D tensor with
                          shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"`, 
                          where `N` is batch size, `C` is the number of channels, `D` is
                          the depth of the feature, `H` is the height of the feature, 
                          and `W` is the width of the feature. The data type is float32 or float64.
        indices (Tensor): The indices given out by maxpooling3d which is a 5-D tensor with
                          shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` , 
                          where `N` is batch size, `C` is the number of channels, `D` is
                          the depth of the feature, `H` is the height of the feature, 
                          and `W` is the width of the feature. The data type is float32 or float64.
        kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
            it must contain an integer.
        stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
            it must contain an integer.
        padding (int | tuple): Padding that was added to the input.
        output_size(list|tuple, optional): The target output size. If output_size is not specified, 
                           the actual output shape will be automatically calculated by (input_shape,
                           kernel_size, stride, padding).
        data_format (string): The data format of the input and output data.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.

    Returns:
        Tensor: The output tensor of unpooling result. 

    Examples:
        .. code-block:: python
        
            import paddle
            import paddle.nn.functional as F

            data = paddle.rand(shape=[1, 1, 4, 4, 6])
            pool_out, indices = F.max_pool3d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
            # pool_out shape: [1, 1, 2, 2, 3],  indices shape: [1, 1, 2, 2, 3]
            unpool_out = F.max_unpool3d(pool_out, indices, kernel_size=2, padding=0)
            # unpool_out shape: [1, 1, 4, 4, 6]

    """
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')
    padding = utils.convert_to_list(padding, 3, 'padding')

    if data_format not in ["NCDHW"]:
        raise ValueError("Attr(data_format) should be 'NCDHW'. Received "
                         "Attr(data_format): %s." % str(data_format))

    output_size = _unpool_output_size(x, kernel_size, stride, padding,
                                      output_size)

X
xiaoting 已提交
1001 1002 1003 1004
    if in_dygraph_mode():
        output = _C_ops.final_state_unpool3d(x, indices, kernel_size, stride,
                                             padding, output_size, data_format)
    elif in_dynamic_mode():
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
        output = _C_ops.unpool3d(x, indices, 'unpooling_type', 'max', 'ksize',
                                 kernel_size, 'strides', stride, 'paddings',
                                 padding, "output_size", output_size,
                                 "data_format", data_format)
        return output

    op_type = "unpool3d"
    helper = LayerHelper(op_type, **locals())
    dtype = helper.input_dtype(input_param_name="x")
    unpool_out = helper.create_variable_for_type_inference(dtype)

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
    helper.append_op(type=op_type,
                     inputs={
                         "X": x,
                         "Indices": indices
                     },
                     outputs={"Out": unpool_out},
                     attrs={
                         "unpooling_type": "max",
                         "ksize": kernel_size,
                         "strides": stride,
                         "paddings": padding,
                         "output_size": output_size
                     })
1029 1030 1031
    return unpool_out


1032 1033 1034 1035 1036 1037 1038 1039
def max_pool2d(x,
               kernel_size,
               stride=None,
               padding=0,
               return_mask=False,
               ceil_mode=False,
               data_format="NCHW",
               name=None):
W
Wei Shengyu 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
    """
    This API implements max pooling 2d operation.
    See more details in :ref:`api_nn_pooling_MaxPool2d` .
    Args:
        x (Tensor): The input tensor of pooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
                          `"NHWC"`, where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain two integers, (kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be a square of an int.
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain two integers, (stride_Height, stride_Width).
            Otherwise, the pool stride size will be a square of an int.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 2, [pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 4. [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): when True, will use `ceil` instead of `floor` to compute the output shape
        return_mask (bool): Whether to return the max indices along with the outputs. Default False, only support `"NCHW"` data format
        data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
                        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.

   Raises:
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
        ShapeError: If the output's shape calculated is not greater than 0.

    Examples:
        .. code-block:: python
            import paddle
            import paddle.nn.functional as F
            import numpy as np

            # max pool2d
            x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
            out = F.max_pool2d(x,
                                  kernel_size=2,
                                  stride=2, padding=0)
            # output.shape [1, 3, 16, 16]
            # for return_mask=True
            out, max_indices = F.max_pool2d(x,
                                               kernel_size=2,
                                               stride=2,
                                               padding=0,
                                               return_mask=True)
            # out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16],
    """
1098
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
1099 1100 1101 1102 1103 1104 1105 1106 1107
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 2, 'pool_stride')

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))
1108 1109 1110

    channel_last = True if data_format == "NHWC" else False

1111 1112 1113 1114
    padding, padding_algorithm = _update_padding_nd(padding,
                                                    num_dims=2,
                                                    channel_last=channel_last,
                                                    ceil_mode=ceil_mode)
1115

1116
    if data_format == "NHWC" and return_mask:
D
Double_V 已提交
1117
        raise ValueError(
1118
            "When setting return_mask to true, data_format must be set to NCHW in API:max_pool2d"
D
Double_V 已提交
1119 1120
        )

F
From00 已提交
1121 1122 1123 1124 1125 1126
    if in_dygraph_mode():
        if return_mask:
            output = _C_ops.final_state_max_pool2d_with_index(
                x, kernel_size, stride, padding, False, False)
            return output if return_mask else output[0]
        else:
1127 1128 1129 1130
            return _C_ops.final_state_pool2d(x, kernel_size, stride, padding,
                                             ceil_mode, True, data_format,
                                             'max', False, False,
                                             padding_algorithm)
F
From00 已提交
1131 1132

    if _in_legacy_dygraph():
1133
        if return_mask:
W
wanghuancoder 已提交
1134
            output = _C_ops.max_pool2d_with_index(
D
Double_V 已提交
1135 1136 1137 1138 1139
                x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
                stride, 'paddings', padding, 'padding_algorithm',
                padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
                'use_mkldnn', False, 'exclusive', True, 'data_format',
                data_format)
1140
            return output if return_mask else output[0]
D
Double_V 已提交
1141
        else:
1142 1143 1144 1145 1146 1147 1148
            output = _C_ops.pool2d(x, 'pooling_type', 'max', 'ksize',
                                   kernel_size, 'global_pooling', False,
                                   'padding_algorithm', padding_algorithm,
                                   'strides', stride, 'paddings', padding,
                                   'use_cudnn', True, 'ceil_mode', ceil_mode,
                                   'use_mkldnn', False, 'exclusive', True,
                                   'data_format', data_format)
D
Double_V 已提交
1149
            return output
1150

1151
    op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
1152
    helper = LayerHelper(op_type, **locals())
1153 1154
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'max_pool2d')
1155
    dtype = helper.input_dtype(input_param_name='x')
1156
    pool_out = helper.create_variable_for_type_inference(dtype)
1157
    mask = helper.create_variable_for_type_inference("int32")
1158
    outputs = {"Out": pool_out, "Mask": mask}
1159

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
    helper.append_op(type=op_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": 'max',
                         "ksize": kernel_size,
                         "global_pooling": False,
                         "strides": stride,
                         "paddings": padding,
                         "padding_algorithm": padding_algorithm,
                         "use_cudnn": True,
                         "ceil_mode": ceil_mode,
                         "use_mkldnn": False,
                         "exclusive": True,
                         "data_format": data_format,
                     })
1176

1177
    return (pool_out, mask) if return_mask else pool_out
1178 1179 1180 1181 1182 1183


def max_pool3d(x,
               kernel_size,
               stride=None,
               padding=0,
1184
               return_mask=False,
1185 1186 1187 1188
               ceil_mode=False,
               data_format="NCDHW",
               name=None):
    """
1189 1190
    This API implements max pooling 2d operation.
    See more details in :ref:`api_nn_pooling_MaxPool3d` .
1191 1192
    Args:
        x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
D
Double_V 已提交
1193
                          shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` or `"NDHWC"`, where N represents batch size, C represents the number of channels, D, H and W represent the depth, height and width of the feature respectively.
1194
        kernel_size (int|list|tuple): The pool kernel size. If the kernel size
1195
            is a tuple or list, it must contain three integers,
1196
            (kernel_size_Depth, kernel_size_Height, kernel_size_Width).
1197
            Otherwise, the pool kernel size will be the cube of an int.
1198 1199
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain three integers, [stride_Depth, stride_Height, stride_Width).
1200
            Otherwise, the pool stride size will be a cube of an int.
1201 1202 1203 1204 1205 1206 1207
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
1208
        ceil_mode (bool): ${ceil_mode_comment}
1209
        return_mask (bool): Whether to return the max indices along with the outputs. Default False. Only support "NDCHW" data_format.
1210 1211 1212 1213 1214 1215
        data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
C
Chen Long 已提交
1216
    
1217 1218
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
C
Chen Long 已提交
1219
    
1220 1221 1222 1223
    Raises:
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
        ShapeError: If the output's shape calculated is not greater than 0.
C
Chen Long 已提交
1224
    
1225 1226
    Examples:
        .. code-block:: python
1227

C
Chen Long 已提交
1228 1229
            import paddle
            import paddle.nn.functional as F
1230

C
Chen Long 已提交
1231
            # max pool3d
1232 1233
            x = paddle.uniform([1, 3, 32, 32, 32])
            output = F.max_pool3d(x,
C
Chen Long 已提交
1234 1235
                                  kernel_size=2,
                                  stride=2, padding=0)
1236
            # output.shape [1, 3, 16, 16, 16]
C
Chen Long 已提交
1237
            # for return_mask=True
1238
            x = paddle.uniform([1, 3, 32, 32, 32])
C
Chen Long 已提交
1239 1240 1241 1242 1243
            output, max_indices = paddle.nn.functional.max_pool3d(x,
                                          kernel_size = 2,
                                          stride = 2,
                                          padding=0,
                                          return_mask=True)
1244
            # output.shape [1, 3, 16, 16, 16], max_indices.shape [1, 3, 16, 16, 16]
1245 1246 1247 1248 1249 1250 1251
    """
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')

1252
    channel_last = _channel_last(data_format, 3)
1253

1254 1255 1256 1257
    padding, padding_algorithm = _update_padding_nd(padding,
                                                    3,
                                                    channel_last=channel_last,
                                                    ceil_mode=ceil_mode)
1258

1259
    if data_format == "NDHWC" and return_mask:
D
Double_V 已提交
1260
        raise ValueError(
1261
            "When setting return_mask to true, data_format must be set to NCDHW in API:max_pool3d"
D
Double_V 已提交
1262 1263
        )

F
From00 已提交
1264 1265 1266 1267 1268 1269
    if in_dygraph_mode():
        if return_mask:
            output = _C_ops.final_state_max_pool3d_with_index(
                x, kernel_size, stride, padding, False, False)
            return output if return_mask else output[0]
        else:
1270 1271 1272 1273
            return _C_ops.final_state_pool3d(x, kernel_size, stride, padding,
                                             ceil_mode, True, data_format,
                                             'max', False, False,
                                             padding_algorithm)
F
From00 已提交
1274 1275

    if _in_legacy_dygraph():
1276
        if return_mask:
W
wanghuancoder 已提交
1277
            output = _C_ops.max_pool3d_with_index(
D
Double_V 已提交
1278 1279 1280 1281 1282
                x, 'pooling_type', 'max', 'ksize', kernel_size, 'strides',
                stride, 'paddings', padding, 'global_pooling', False,
                'padding_algorithm', padding_algorithm, 'use_cudnn', True,
                'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True,
                'data_format', data_format)
1283
            return output if return_mask else output[0]
D
Double_V 已提交
1284
        else:
1285 1286 1287 1288 1289 1290 1291
            output = _C_ops.pool3d(x, 'pooling_type', 'max', 'ksize',
                                   kernel_size, 'global_pooling', False,
                                   'padding_algorithm', padding_algorithm,
                                   'strides', stride, 'paddings', padding,
                                   'use_cudnn', True, 'ceil_mode', ceil_mode,
                                   'use_mkldnn', False, 'exclusive', True,
                                   'data_format', data_format)
D
Double_V 已提交
1292
            return output
1293

1294
    op_type = "max_pool3d_with_index" if return_mask else "pool3d"
1295
    helper = LayerHelper(op_type, **locals())
1296
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
1297
    dtype = helper.input_dtype(input_param_name='x')
1298
    pool_out = helper.create_variable_for_type_inference(dtype)
1299
    mask = helper.create_variable_for_type_inference('int32')
1300 1301
    outputs = {"Out": pool_out, "Mask": mask}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
    helper.append_op(type=op_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": 'max',
                         "ksize": kernel_size,
                         "global_pooling": False,
                         "strides": stride,
                         "paddings": padding,
                         "padding_algorithm": padding_algorithm,
                         "use_cudnn": True,
                         "ceil_mode": ceil_mode,
                         "use_mkldnn": False,
                         "exclusive": False,
                         "data_format": data_format,
                     })
1318

1319
    return (pool_out, mask) if return_mask else pool_out
1320 1321


1322
def adaptive_avg_pool1d(x, output_size, name=None):
1323
    """
1324 1325 1326 1327
    Adaptive average pooling 1d operation on :attr:`x` according to :attr:`output_size`. 
    
    Notes:
        See more details in :ref:`api_nn_pooling_AdaptiveAvgPool1d` .
D
Double_V 已提交
1328

1329
    Args:
1330 1331 1332
        x (Tensor): The input Tensor of pooling, which is a 3-D tensor with shape :math:`[N, C, L]`, where :math:`N` is batch size, :math:`C` is the number of channels and :math:`L` is the length of the feature. The data type is float32 or float64.
        output_size (int): The target output size. Its data type must be int.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1333
    
1334
    Returns:
1335
        Tensor: The result of 1D adaptive average pooling. Its data type is same as input.
1336
    
1337 1338
    Examples:
        .. code-block:: python
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357

            # average adaptive pool1d
            # suppose input data in shape of [N, C, L], `output_size` is m or [m],
            # output shape is [N, C, m], adaptive pool divide L dimension
            # of input data into m grids averagely and performs poolings in each
            # grid to get output.
            # adaptive max pool performs calculations as follow:
            #
            #     for i in range(m):
            #         lstart = floor(i * L / m)
            #         lend = ceil((i + 1) * L / m)
            #         output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend)
            #
            import paddle
            import paddle.nn.functional as F

            data = paddle.uniform([1, 3, 32])
            pool_out = F.adaptive_avg_pool1d(data, output_size=16)
            # pool_out shape: [1, 3, 16])
1358 1359
    """
    pool_type = 'avg'
Z
zhiboniu 已提交
1360
    if not in_dynamic_mode():
1361 1362 1363
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'adaptive_pool2d')
        check_type(output_size, 'pool_size', (int), 'adaptive_pool1d')
1364 1365
    _check_input(x, 3)
    pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
1366

1367
    x = unsqueeze(x, [2])
Z
zhiboniu 已提交
1368
    if in_dynamic_mode():
W
wanghuancoder 已提交
1369 1370
        pool_out = _C_ops.pool2d(x, 'pooling_type', pool_type, 'ksize',
                                 pool_size, 'adaptive', True)
1371
        return squeeze(pool_out, [2])
1372

1373 1374
    l_type = "pool2d"

1375
    helper = LayerHelper(l_type, **locals())
1376
    dtype = helper.input_dtype(input_param_name='x')
1377 1378
    pool_out = helper.create_variable_for_type_inference(dtype)

1379
    outputs = {"Out": pool_out}
1380 1381 1382 1383 1384 1385 1386 1387
    helper.append_op(type=l_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": pool_type,
                         "ksize": pool_size,
                         "adaptive": True,
                     })
1388

1389
    return squeeze(pool_out, [2])
1390 1391


1392 1393
def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
    """
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
    Applies 2D adaptive avg pooling on input tensor. The h and w dimensions
    of the output tensor are determined by the parameter output_size.
    
    For avg adaptive pool2d:
    ..  math::
        hstart &= floor(i * H_{in} / H_{out})
        hend &= ceil((i + 1) * H_{in} / H_{out})
        wstart &= floor(j * W_{in} / W_{out})
        wend &= ceil((j + 1) * W_{in} / W_{out})
        Output(i ,j) &= \frac{\sum Input[hstart:hend, wstart:wend]}{(hend - hstart) * (wend - wstart)}
1404 1405 1406

    Args:
        x (Tensor): The input tensor of adaptive avg pool2d operator, which is a 4-D tensor.
1407
                          The data type can be float32 or float64.
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain two element, (H, W). H and W can be either a int, or None which means
            the size will be the same as that of the input.
        data_format (str): The data format of the input and output data. An optional string
            from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in
            the order of: [batch_size, input_channels, input_height, input_width].
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of avg adaptive pool2d result. The data type is same as input tensor.
1419

1420 1421
    Examples:
        .. code-block:: python
B
Bai Yifan 已提交
1422

1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
            # adaptive avg pool2d
            # suppose input data in shape of [N, C, H, W], `output_size` is [m, n],
            # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
            # of input data into m * n grids averagely and performs poolings in each
            # grid to get output.
            # adaptive avg pool performs calculations as follow:
            #
            #     for i in range(m):
            #         for j in range(n):
            #             hstart = floor(i * H / m)
            #             hend = ceil((i + 1) * H / m)
            #             wstart = floor(i * W / n)
            #             wend = ceil((i + 1) * W / n)
            #             output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
            #
            import paddle
            import numpy as np
1440

1441 1442 1443
            input_data = np.random.rand(2, 3, 32, 32)
            x = paddle.to_tensor(input_data)
            # x.shape is [2, 3, 32, 32]
1444
            out = paddle.nn.functional.adaptive_avg_pool2d(
1445 1446
                            x = x,
                            output_size=[3, 3])
1447
            # out.shape is [2, 3, 3, 3]
1448
    """
Z
zhiboniu 已提交
1449
    if not in_dynamic_mode():
1450
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
1451
                                 'adaptive_avg_pool2d')
1452
        check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    if data_format == "NCHW":
        in_h, in_w = x.shape[2:4]
    else:
        in_h, in_w = x.shape[1:3]

    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 2, 'output_size')
    else:
1467
        output_size = list(output_size)
1468 1469 1470 1471 1472
        if output_size[0] == None:
            output_size[0] = in_h
        if output_size[1] == None:
            output_size[1] = in_w

F
From00 已提交
1473
    if in_dygraph_mode():
1474 1475 1476 1477
        return _C_ops.final_state_pool2d_gpudnn_unused(x, output_size, [1, 1],
                                                       [0, 0], False, True,
                                                       data_format, 'avg',
                                                       False, True, "EXPLICIT")
F
From00 已提交
1478 1479 1480 1481 1482

    if _in_legacy_dygraph():
        return _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', output_size,
                             'global_pooling', False, 'adaptive', True,
                             'data_format', data_format)
1483 1484 1485 1486

    l_type = 'pool2d'

    helper = LayerHelper(l_type, **locals())
1487
    dtype = helper.input_dtype(input_param_name='x')
1488 1489 1490 1491
    pool_out = helper.create_variable_for_type_inference(dtype)

    outputs = {"Out": pool_out}

1492 1493 1494 1495 1496 1497 1498 1499 1500
    helper.append_op(type=l_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": "avg",
                         "ksize": output_size,
                         "adaptive": True,
                         "data_format": data_format,
                     })
1501 1502 1503 1504 1505 1506

    return pool_out


def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
    """
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
    This operation applies 3D adaptive avg pooling on input tensor. The h and w dimensions
    of the output tensor are determined by the parameter output_size.
    
    For avg adaptive pool3d:
    ..  math::
        dstart &= floor(i * D_{in} / D_{out})
        dend &= ceil((i + 1) * D_{in} / D_{out})
        hstart &= floor(j * H_{in} / H_{out})
        hend &= ceil((j + 1) * H_{in} / H_{out})
        wstart &= floor(k * W_{in} / W_{out})
        wend &= ceil((k + 1) * W_{in} / W_{out})
        Output(i ,j, k) &= \frac{\sum Input[dstart:dend, hstart:hend, wstart:wend]}
            {(dend - dstart) * (hend - hstart) * (wend - wstart)}
1520 1521 1522

    Args:
        x (Tensor): The input tensor of adaptive avg pool3d operator, which is a 5-D tensor.
1523
                          The data type can be float32, float64.
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means
            the size will be the same as that of the input.
        data_format (str): The data format of the input and output data. An optional string
            from: "NCDHW", "NDHWC". The default is "NCDHW". When it is "NCDHW", the data is stored in
            the order of: [batch_size, input_channels, input_depth, input_height, input_width].
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of avg adaptive pool3d result. The data type is same as input tensor.
1535

1536 1537
    Examples:
        .. code-block:: python
B
Bai Yifan 已提交
1538

1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
            # adaptive avg pool3d
            # suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n],
            # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
            # of input data into l * m * n grids averagely and performs poolings in each
            # grid to get output.
            # adaptive avg pool performs calculations as follow:
            #
            #     for i in range(l):
            #         for j in range(m):
            #             for k in range(n):
            #                 dstart = floor(i * D / l)
            #                 dend = ceil((i + 1) * D / l)
            #                 hstart = floor(j * H / m)
            #                 hend = ceil((j + 1) * H / m)
            #                 wstart = floor(k * W / n)
            #                 wend = ceil((k + 1) * W / n)
            #                 output[:, :, i, j, k] =
            #                     avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
            import paddle
1558 1559

            input_data = paddle.randn(shape=(2, 3, 8, 32, 32))
1560
            out = paddle.nn.functional.adaptive_avg_pool3d(
1561
                            x = input_data,
1562
                            output_size=[3, 3, 3])
1563
            # out.shape is [2, 3, 3, 3, 3]
1564
    """
Z
zhiboniu 已提交
1565
    if not in_dynamic_mode():
1566 1567
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_avg_pool3d')
1568
        check_type(data_format, 'data_format', str, 'adaptive_avg_pool3d')
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582

    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    if data_format == "NCDHW":
        in_l, in_h, in_w = x.shape[2:5]
    else:
        in_l, in_h, in_w = x.shape[1:4]

    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 3, 'output_size')
    else:
1583
        output_size = list(output_size)
1584 1585 1586 1587 1588 1589 1590
        if output_size[0] == None:
            output_size[0] = in_l
        if output_size[1] == None:
            output_size[1] = in_h
        if output_size[2] == None:
            output_size[2] = in_w

Z
zhiboniu 已提交
1591
    if in_dynamic_mode():
F
From00 已提交
1592 1593 1594
        return _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', output_size,
                             'global_pooling', False, 'adaptive', True,
                             'data_format', data_format)
1595 1596 1597 1598

    l_type = 'pool3d'

    helper = LayerHelper(l_type, **locals())
1599
    dtype = helper.input_dtype(input_param_name='x')
1600 1601 1602
    pool_out = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out}

1603 1604 1605 1606 1607 1608 1609 1610 1611
    helper.append_op(type=l_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": "avg",
                         "ksize": output_size,
                         "adaptive": True,
                         "data_format": data_format,
                     })
1612 1613

    return pool_out
1614 1615


1616
def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
1617 1618 1619 1620 1621 1622 1623 1624 1625
    """
    This API implements adaptive max pooling 1d operation.
    See more details in :ref:`api_nn_pooling_AdaptiveMaxPool1d` .

    Args:
        x (Tensor): The input tensor of pooling operator, which is a 3-D tensor
                              with shape [N, C, L].  The format of input tensor is NCL,
                              where N is batch size, C is the number of channels, L is the
                              length of the feature. The data type is float32 or float64.
1626
        output_size (int): The pool kernel size. The value should be an integer.
1627
        return_mask (bool): If true, the index of max pooling point will be returned along
1628 1629 1630 1631 1632 1633 1634 1635
                with outputs. It cannot be set in average pooling type. Default False.
        name(str, optional): For detailed information, please refer
                                 to :ref:`api_guide_Name`. Usually name is no need to set and
                                 None by default.
    Returns:
            Tensor: The output tensor of adaptive pooling result. The data type is same
                      as input tensor.
    Raises:
1636
            ValueError: 'output_size' should be an integer.
1637 1638
    Examples:
        .. code-block:: python
1639

1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
              # max adaptive pool1d
              # suppose input data in shape of [N, C, L], `output_size` is m or [m],
              # output shape is [N, C, m], adaptive pool divide L dimension
              # of input data into m grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(m):
              #         lstart = floor(i * L / m)
              #         lend = ceil((i + 1) * L / m)
              #         output[:, :, i] = max(input[:, :, lstart: lend])
              #
              import paddle
              import paddle.nn.functional as F
1654

1655
              data = paddle.uniform([1, 3, 32], paddle.float32)
1656 1657
              pool_out = F.adaptive_max_pool1d(data, output_size=16)
              # pool_out shape: [1, 3, 16])
1658
              pool_out, indices = F.adaptive_max_pool1d(data, output_size=16, return_mask=True)
1659 1660 1661
              # pool_out shape: [1, 3, 16] indices  shape: [1, 3, 16]
    """
    pool_type = 'max'
Z
zhiboniu 已提交
1662
    if not in_dynamic_mode():
1663 1664 1665 1666
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_max_pool1d')
        check_type(output_size, 'pool_size', int, 'adaptive_max_pool1d')
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool1d')
1667 1668 1669 1670 1671
    _check_input(x, 3)

    pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')

    x = unsqueeze(x, [2])
1672 1673 1674 1675 1676 1677
    if in_dygraph_mode():
        pool_out = _C_ops.final_state_max_pool2d_with_index(
            x, pool_size, [1, 1], [0, 0], False, True)
        return (squeeze(pool_out[0], [2]), squeeze(
            pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2])
    if _in_legacy_dygraph():
1678 1679 1680
        pool_out = _C_ops.max_pool2d_with_index(x, 'pooling_type', pool_type,
                                                'ksize', pool_size, 'adaptive',
                                                True)
1681
        return (squeeze(pool_out[0], [2]), squeeze(
1682
            pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2])
1683

1684 1685
    l_type = 'max_pool2d_with_index'

1686
    helper = LayerHelper(l_type, **locals())
1687
    dtype = helper.input_dtype(input_param_name='x')
1688 1689
    pool_out = helper.create_variable_for_type_inference(dtype)

1690
    mask = helper.create_variable_for_type_inference('int32')
1691 1692
    outputs = {"Out": pool_out, "Mask": mask}

1693 1694 1695 1696 1697 1698 1699 1700
    helper.append_op(type=l_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": pool_type,
                         "ksize": pool_size,
                         "adaptive": True,
                     })
1701 1702

    return (squeeze(pool_out, [2]),
1703
            squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2])
1704 1705


1706
def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
1707 1708 1709
    """
        This operation applies a 2D adaptive max pooling on input tensor.
        See more details in :ref:`api_nn_pooling_AdaptiveMaxPool2d` .
1710

1711 1712 1713
        Args:
            x (Tensor): The input tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type can be float16, float32, float64, int32 or int64.
            output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two elements, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input.
1714
            return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
1715
            name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
1716

1717 1718
        Returns:
            Tensor: The output tensor of adaptive max pool2d result. The data type is same as input tensor.
1719

1720 1721
        Examples:
            .. code-block:: python
1722

1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
              # max adaptive pool2d
              # suppose input data in the shape of [N, C, H, W], `output_size` is [m, n]
              # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
              # of input data into m*n grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(m):
              #         for j in range(n):
              #             hstart = floor(i * H / m)
              #             hend = ceil((i + 1) * H / m)
              #             wstart = floor(i * W / n)
              #             wend = ceil((i + 1) * W / n)
              #             output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
              #
              import paddle
1739

1740
              input_data = paddle.randn(shape=(2, 3, 32, 32))
1741
              out = paddle.nn.functional.adaptive_max_pool2d(
1742
                            x = input_data,
1743 1744 1745
                            output_size=[3, 3])
              # out.shape is [2, 3, 3, 3]
    """
Z
zhiboniu 已提交
1746
    if not in_dynamic_mode():
1747 1748
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_max_pool2d')
1749 1750
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool2d')
        #check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')
1751 1752 1753 1754 1755 1756
    _check_input(x, 4)

    in_h, in_w = x.shape[2:4]
    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 2, 'output_size')
    else:
1757
        output_size = list(output_size)
1758 1759 1760 1761
        if output_size[0] == None:
            output_size[0] = in_h
        if output_size[1] == None:
            output_size[1] = in_w
1762 1763 1764 1765 1766
    if in_dygraph_mode():
        pool_out = _C_ops.final_state_max_pool2d_with_index(
            x, output_size, [1, 1], [0, 0], False, True)
        return pool_out if return_mask else pool_out[0]
    if _in_legacy_dygraph():
1767 1768 1769
        pool_out = _C_ops.max_pool2d_with_index(x, 'pooling_type', 'max',
                                                'ksize', output_size,
                                                'adaptive', True)
1770
        return pool_out if return_mask else pool_out[0]
1771 1772 1773 1774

    l_type = 'max_pool2d_with_index'

    helper = LayerHelper(l_type, **locals())
1775
    dtype = helper.input_dtype(input_param_name='x')
1776 1777
    pool_out = helper.create_variable_for_type_inference(dtype)

1778
    mask = helper.create_variable_for_type_inference('int32')
1779 1780
    outputs = {"Out": pool_out, "Mask": mask}

1781 1782 1783 1784 1785 1786 1787 1788
    helper.append_op(type=l_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": 'max',
                         "ksize": output_size,
                         "adaptive": True,
                     })
1789
    #return (pool_out, mask) if return_mask else pool_out
1790 1791 1792
    return pool_out


1793
def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
1794 1795 1796
    """
        This operation applies a 3D adaptive max pooling on input tensor.
        See more details in :ref:`api_nn_pooling_AdaptiveMaxPool3d` .
1797

1798 1799 1800
        Args:
            x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64.
            output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means the size will be the same as that of the input.
1801
            return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
1802
            name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
1803

1804 1805
        Returns:
            Tensor: The output tensor of adaptive max pool3d result. The data type is same as input tensor.
1806

1807 1808
        Examples:
            .. code-block:: python
1809

1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
              # adaptive max pool3d
              # suppose input data in the shape of [N, C, D, H, W], `output_size` is [l, m, n]
              # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
              # of input data into m*n grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(l):
              #         for j in range(m):
              #             for k in range(n):
              #                 dstart = floor(i * D / l)
              #                 dend = ceil((i + 1) * D / l)
              #                 hstart = floor(i * H / m)
              #                 hend = ceil((i + 1) * H / m)
              #                 wstart = floor(i * W / n)
              #                 wend = ceil((i + 1) * W / n)
              #             output[:, :, i, j, k] = max(input[:, :, dstart: dend, hstart: hend, wstart: wend])
              #
              import paddle
1829

1830
              input_data = paddle.randn(shape=(2, 3, 8, 32, 32))
1831
              out = paddle.nn.functional.adaptive_max_pool3d(
1832
                            x = input_data,
1833 1834 1835 1836
                            output_size=[3, 3, 3])
              # out.shape is [2, 3, 3, 3, 3]
    """

Z
zhiboniu 已提交
1837
    if not in_dynamic_mode():
1838 1839
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_max_pool3d')
1840 1841
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool3d')
        #check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')
1842 1843 1844 1845 1846 1847
    _check_input(x, 5)

    in_l, in_h, in_w = x.shape[2:5]
    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 3, 'output_size')
    else:
1848
        output_size = list(output_size)
1849 1850 1851 1852 1853 1854 1855
        if output_size[0] == None:
            output_size[0] = in_l
        if output_size[1] == None:
            output_size[1] = in_h
        if output_size[2] == None:
            output_size[2] = in_w

Z
zhiboniu 已提交
1856
    if in_dynamic_mode():
1857 1858 1859
        pool_out = _C_ops.max_pool3d_with_index(x, 'pooling_type', 'max',
                                                'ksize', output_size,
                                                'adaptive', True)
1860
        return pool_out if return_mask else pool_out[0]
1861 1862 1863 1864

    l_type = 'max_pool3d_with_index'

    helper = LayerHelper(l_type, **locals())
1865
    dtype = helper.input_dtype(input_param_name='x')
1866 1867
    pool_out = helper.create_variable_for_type_inference(dtype)

1868
    mask = helper.create_variable_for_type_inference('int32')
1869 1870
    outputs = {"Out": pool_out, "Mask": mask}

1871 1872 1873 1874 1875 1876 1877 1878
    helper.append_op(type=l_type,
                     inputs={"X": x},
                     outputs=outputs,
                     attrs={
                         "pooling_type": 'max',
                         "ksize": output_size,
                         "adaptive": True,
                     })
1879

1880
    return (pool_out, mask) if return_mask else pool_out