pooling.py 78.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
姜永久 已提交
16
from paddle.fluid.framework import Variable, in_dygraph_mode
17

18
from ...fluid.data_feeder import check_type, check_variable_and_dtype
19 20 21 22

# TODO: define pooling functions
from ...fluid.layers import LayerHelper, utils
from ...tensor.manipulation import squeeze, unsqueeze
23

24 25
__all__ = []

26

27 28 29 30 31
def _is_list_or_tuple(input):
    return isinstance(input, (list, tuple))


def _check_input(x, dimension):
32
    if len(x.shape) != dimension:
33 34
        raise ValueError(
            "Excepted Input X is {}-D tensor, but received {}-D {}".format(
35 36 37
                dimension, len(x.shape), type(x)
            )
        )
38 39


40
def _check_instance(x, x_name, types=(int, float)):
41 42

    if not isinstance(x, types):
43 44
        raise ValueError(
            "Excepted {} type for {} but received type: {}. ".format(
45 46 47
                types, x_name, type(x)
            )
        )
48 49


D
Double_V 已提交
50 51 52 53
def _check_value_limitation(x, x_name, min_limit=1e-3):
    def _check_value(x, x_name, min_limit=1e-3):
        if isinstance(x, int) and min_limit is not None and x < min_limit:
            raise ValueError(
54 55 56 57
                "Excepted the input {} to be greater than {} but received x: {}. ".format(
                    x_name, min_limit, x
                )
            )
D
Double_V 已提交
58 59 60 61 62

    for ele in x:
        _check_value(ele, x_name)


63 64 65
def _zero_padding_in_batch_and_channel(padding, channel_last):
    if channel_last:
        return list(padding[0]) == [0, 0] and list(padding[-1]) == [0, 0]
66
    else:
67
        return list(padding[0]) == [0, 0] and list(padding[1]) == [0, 0]
68 69


70 71 72 73
def _exclude_padding_in_batch_and_channel(padding, channel_last):
    padding_ = padding[1:-1] if channel_last else padding[2:]
    padding_ = [elem for pad_a_dim in padding_ for elem in pad_a_dim]
    return padding_
74 75


76 77 78 79 80
def _channel_last(data_format, num_dims):
    if num_dims == 1:
        if data_format not in ['NCL', 'NLC']:
            raise ValueError(
                "Attr(data_format) should be 'NCL' or 'NLC'. Received "
81 82
                "Attr(data_format): %s" % str(data_format)
            )
83 84 85 86 87 88
        else:
            return True if data_format == "NLC" else False
    if num_dims == 2:
        if data_format not in ['NCHW', 'NHWC']:
            raise ValueError(
                "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
89 90
                "Attr(data_format): %s" % str(data_format)
            )
91 92 93 94 95 96
        else:
            return True if data_format == "NHWC" else False
    if num_dims == 3:
        if data_format not in ['NCDHW', 'NDHWC']:
            raise ValueError(
                "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
97 98
                "Attr(data_format): %s" % str(data_format)
            )
99 100
        else:
            return True if data_format == "NDHWC" else False
101 102


103 104 105 106 107
def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False):
    if isinstance(padding, str):
        padding = padding.upper()
        if padding not in ["SAME", "VALID"]:
            raise ValueError(
108 109 110 111
                "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".format(
                    padding
                )
            )
112
        if padding == "VALID":
113
            if ceil_mode is not False:
114
                raise ValueError(
115
                    "When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. "
116 117
                    "Received ceil_mode: True."
                )
118 119 120 121 122 123 124 125 126 127 128 129

            padding_algorithm = "VALID"
            padding = [0] * num_dims
        else:
            padding_algorithm = "SAME"
            padding = [0] * num_dims
    elif _is_list_or_tuple(padding):
        # for padding like
        # [(pad_before, pad_after), (pad_before, pad_after), ...]
        # padding for batch_dim and channel_dim included
        if len(padding) == 2 + num_dims and _is_list_or_tuple(padding[0]):
            if not _zero_padding_in_batch_and_channel(padding, channel_last):
130
                raise ValueError(
131
                    "Non-zero padding({}) in the batch or channel dimensions "
132 133
                    "is not supported.".format(padding)
                )
134
            padding_algorithm = "EXPLICIT"
135
            padding = _exclude_padding_in_batch_and_channel(
136 137
                padding, channel_last
            )
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
            if utils._is_symmetric_padding(padding, num_dims):
                padding = padding[0::2]
        # for padding like [pad_before, pad_after, pad_before, pad_after, ...]
        elif len(padding) == 2 * num_dims and isinstance(padding[0], int):
            padding_algorithm = "EXPLICIT"
            padding = utils.convert_to_list(padding, 2 * num_dims, 'padding')
            if utils._is_symmetric_padding(padding, num_dims):
                padding = padding[0::2]
        # for padding like [pad_d1, pad_d2, ...]
        elif len(padding) == num_dims and isinstance(padding[0], int):
            padding_algorithm = "EXPLICIT"
            padding = utils.convert_to_list(padding, num_dims, 'padding')
        else:
            raise ValueError("Invalid padding: {}".format(padding))
    # for integer padding
153
    else:
154 155 156 157
        padding_algorithm = "EXPLICIT"
        padding = utils.convert_to_list(padding, num_dims, 'padding')
    return padding, padding_algorithm

158

159
def _expand_low_nd_padding(padding):
160
    # 1d to 2d fake input
161 162 163 164 165 166
    if len(padding) == 2:
        padding = [0] * 2 + padding
    elif len(padding) == 1:
        padding = [0] + padding
    else:
        raise ValueError(
167 168 169 170
            "The size of padding's dimmention should be 1 or 2. But got padding={}".format(
                padding
            )
        )
171 172 173
    return padding


174 175 176 177 178 179 180 181 182
def avg_pool1d(
    x,
    kernel_size,
    stride=None,
    padding=0,
    exclusive=True,
    ceil_mode=False,
    name=None,
):
D
Double_V 已提交
183
    """
184 185
    This API implements average pooling 1d operation,
    See more details in :ref:`api_nn_pooling_AvgPool1d` .
186 187 188 189

    Args:
        x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
                          shape [N, C, L]. where `N` is batch size, `C` is the number of channels,
190
                          `L` is the length of the feature. The data type is float32 or float64.
191
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
192
            it must contain an integer.
193
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
194 195 196 197 198 199 200 201
            it must contain an integer.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
            4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
202
        exclusive (bool): Whether to exclude padding points in average pooling
203
                          mode, default is `True`.
204
        ceil_mode (bool): ${ceil_mode_comment}Whether to use the ceil function to calculate output height and width.
205
            If it is set to False, the floor function will be used. The default value is False.
206 207 208 209 210 211 212 213
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.

    Examples:
        .. code-block:: python
214

C
Chen Long 已提交
215
            import paddle
216
            import paddle.nn as nn
C
Chen Long 已提交
217

218 219 220 221
            data = paddle.uniform([1, 3, 32], paddle.float32)
            AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
            pool_out = AvgPool1D(data)
            # pool_out shape: [1, 3, 16]
222 223 224
    """
    """NCL to NCHW"""
    data_format = "NCHW"
Z
zhiboniu 已提交
225
    if not in_dynamic_mode():
226
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool1d')
227
    _check_input(x, 3)
228
    x = unsqueeze(x, [2])
229
    kernel_size = utils.convert_to_list(kernel_size, 1, 'kernel_size')
230 231 232 233 234 235 236
    kernel_size = [1] + kernel_size
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 1, 'pool_stride')
        stride = [1] + stride

D
Double_V 已提交
237 238 239
    _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
    _check_value_limitation(stride, "stride", min_limit=1e-3)

240
    channel_last = _channel_last("NCL", 1)
241 242 243
    padding, padding_algorithm = _update_padding_nd(
        padding, 1, channel_last=channel_last, ceil_mode=ceil_mode
    )
244

245 246
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)
247

248
    if in_dygraph_mode():
249 250 251 252 253 254 255 256 257 258 259 260 261
        output = _C_ops.pool2d(
            x,
            kernel_size,
            stride,
            padding,
            ceil_mode,
            exclusive,
            data_format,
            'avg',
            False,
            False,
            padding_algorithm,
        )
262 263
        return squeeze(output, [2])

姜永久 已提交
264 265 266 267 268
    else:
        op_type = 'pool2d'
        helper = LayerHelper(op_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
269

姜永久 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
        helper.append_op(
            type=op_type,
            inputs={"X": x},
            outputs={"Out": pool_out},
            attrs={
                "pooling_type": 'avg',
                "ksize": kernel_size,
                "global_pooling": False,
                "strides": stride,
                "paddings": padding,
                "padding_algorithm": padding_algorithm,
                "use_cudnn": True,
                "ceil_mode": ceil_mode,
                "use_mkldnn": False,
                "exclusive": exclusive,
                "data_format": data_format,
            },
        )
288

姜永久 已提交
289
        return squeeze(pool_out, [2])
290 291


292 293 294 295 296 297 298 299 300 301 302
def avg_pool2d(
    x,
    kernel_size,
    stride=None,
    padding=0,
    ceil_mode=False,
    exclusive=True,
    divisor_override=None,
    data_format="NCHW",
    name=None,
):
303
    """
304 305
    This API implements average pooling 2d operation.
    See more details in :ref:`api_nn_pooling_AvgPool2d` .
D
Double_V 已提交
306

307
    Args:
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
        x (Tensor): The input tensor of pooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
                          `"NHWC"`, where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The pool kernel size. If it is a tuple or list,
            it must contain two integers, (kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be a square of an int.
        stride (int|list|tuple): The stride size. If it is a tuple or list,
            it must contain two integers, (stride_Height, stride_Width).
            Otherwise, the stride size will be a square of an int.

        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 2, [pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 4. [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): when True, will use `ceil` instead of `floor` to compute the output shape
328
        exclusive (bool): Whether to exclude padding points in average pooling
329 330 331 332 333
                          mode, default is `true`.
        divisor_override (float): if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
        data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
                        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_height, input_width]`.
334 335 336
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
337

338 339
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
340

341 342
    Examples:
        .. code-block:: python
343

C
Chen Long 已提交
344 345
            import paddle
            import paddle.nn.functional as F
346

C
Chen Long 已提交
347
            # avg pool2d
348
            x = paddle.uniform([1, 3, 32, 32], paddle.float32)
C
Chen Long 已提交
349 350 351 352
            out = F.avg_pool2d(x,
                            kernel_size=2,
                            stride=2, padding=0)
            # out.shape [1, 3, 16, 16]
353
    """
354
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
355 356 357
    if stride is None:
        stride = kernel_size
    else:
358
        stride = utils.convert_to_list(stride, 2, 'pool_stride')
359

D
Double_V 已提交
360 361 362
    _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
    _check_value_limitation(stride, "stride", min_limit=1e-3)

363
    channel_last = _channel_last(data_format, 2)
364 365 366
    padding, padding_algorithm = _update_padding_nd(
        padding, 2, channel_last, ceil_mode=ceil_mode
    )
367

姜永久 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381
    if in_dygraph_mode():
        output = _C_ops.pool2d(
            x,
            kernel_size,
            stride,
            padding,
            ceil_mode,
            exclusive,
            data_format,
            'avg',
            False,
            False,
            padding_algorithm,
        )
382 383 384 385 386
        if divisor_override is None:
            return output
        else:
            _check_instance(divisor_override, "divisor_override")
            return output * (kernel_size[0] * kernel_size[1]) / divisor_override
姜永久 已提交
387 388 389 390 391 392
    else:
        op_type = 'pool2d'
        helper = LayerHelper(op_type, **locals())
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
393

姜永久 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
        helper.append_op(
            type=op_type,
            inputs={"X": x},
            outputs={"Out": pool_out},
            attrs={
                "pooling_type": "avg",
                "ksize": kernel_size,
                "global_pooling": False,
                "strides": stride,
                "paddings": padding,
                "padding_algorithm": padding_algorithm,
                "use_cudnn": True,
                "ceil_mode": ceil_mode,
                "use_mkldnn": False,
                "exclusive": exclusive,
                "data_format": data_format,
            },
        )
412

姜永久 已提交
413 414 415 416 417 418 419
        if divisor_override is None:
            return pool_out
        else:
            _check_instance(divisor_override, "divisor_override")
            return (
                pool_out * (kernel_size[0] * kernel_size[1]) / divisor_override
            )
420 421


422 423 424 425 426 427 428 429 430 431 432
def avg_pool3d(
    x,
    kernel_size,
    stride=None,
    padding=0,
    ceil_mode=False,
    exclusive=True,
    divisor_override=None,
    data_format="NCDHW",
    name=None,
):
433
    """
434 435
    This API implements average pooling 3d operation.
    See more details in :ref:`api_nn_pooling_AvgPool3d` .
436 437

    Args:
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
        x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
                          shape [N, C, D, H, W], where `N` represents the batch size, `C` represents
                          the number of channels, `D`, `H` and `W` represent the depth, height and width of the feature respectively.
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size
            is a tuple or list, it must contain three integers,
            (kernel_size_Depth, kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be the cube of an int.
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain three integers, [stride_Depth, stride_Height, stride_Width).
            Otherwise, the pool stride size will be a cube of an int.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): ${ceil_mode_comment}
456
        exclusive (bool): Whether to exclude padding points in average pooling
457 458 459 460 461
                          mode, default is True.
        divisor_override (int|float) if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
        data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
462
        name(str, optional): For detailed information, please refer
463 464
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
465

466
    Returns:
467
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
468

469 470
    Examples:
        .. code-block:: python
471

472
          import paddle
C
Chen Long 已提交
473

474
          x = paddle.uniform([1, 3, 32, 32, 32], paddle.float32)
475 476 477 478 479 480 481
          # avg pool3d
          out = paddle.nn.functional.avg_pool3d(
                                            x,
                                            kernel_size = 2,
                                            stride = 2,
                                            padding=0)
          # out.shape: [1, 3, 16, 16, 16]
482
    """
483 484 485 486 487
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')
488

489
    channel_last = _channel_last(data_format, 3)
490 491 492
    padding, padding_algorithm = _update_padding_nd(
        padding, 3, channel_last=channel_last, ceil_mode=ceil_mode
    )
493

D
Double_V 已提交
494 495 496
    _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
    _check_value_limitation(stride, "stride", min_limit=1e-3)

497
    if in_dygraph_mode():
498 499 500 501 502 503 504 505 506 507 508 509 510
        pool_out = _C_ops.pool3d(
            x,
            kernel_size,
            stride,
            padding,
            ceil_mode,
            exclusive,
            data_format,
            'avg',
            False,
            False,
            padding_algorithm,
        )
511 512 513 514 515 516 517 518
    else:
        op_type = "pool3d"
        helper = LayerHelper(op_type, **locals())
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
        outputs = {"Out": pool_out}

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
        helper.append_op(
            type=op_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": 'avg',
                "ksize": kernel_size,
                "global_pooling": False,
                "strides": stride,
                "paddings": padding,
                "padding_algorithm": padding_algorithm,
                "use_cudnn": True,
                "ceil_mode": ceil_mode,
                "use_mkldnn": False,
                "exclusive": exclusive,
                "data_format": data_format,
            },
        )
537

538 539 540 541
    if divisor_override is None:
        return pool_out
    else:
        _check_instance(divisor_override, "divisor_override")
542 543 544 545 546
        return (
            pool_out
            * (kernel_size[0] * kernel_size[1] * kernel_size[2])
            / divisor_override
        )
547 548


549 550 551 552 553 554 555 556 557
def max_pool1d(
    x,
    kernel_size,
    stride=None,
    padding=0,
    return_mask=False,
    ceil_mode=False,
    name=None,
):
558
    """
559 560
    This API implements max pooling 1d opereation.
    See more details in :ref:`api_nn_pooling_MaxPool1d` .
561 562

    Args:
563 564 565
        x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
                          shape [N, C, L], where `N` is batch size, `C` is the number of channels,
                          `L` is the length of the feature. The data type if float32 or float64.
566
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
567
            it must contain an integer.
568
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
569 570 571 572 573 574 575 576
            it must contain an integer.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An integer, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
            4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
577
        return_mask (bool): Whether return the max indices along with the outputs. default is `False`.
578 579
        ceil_mode (bool): Whether to use the ceil function to calculate output height and width. False is the default.
            If it is set to False, the floor function will be used. Default False.
580 581 582 583 584
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
585

586 587
    Examples:
        .. code-block:: python
588

589 590
          import paddle
          import paddle.nn.functional as F
C
Chen Long 已提交
591

592
          data = paddle.uniform([1, 3, 32], paddle.float32)
593 594
          pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0)
          # pool_out shape: [1, 3, 16]
595
          pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
596
          # pool_out shape: [1, 3, 16],  indices shape: [1, 3, 16]
597
    """
598 599
    """NCL to NCHW"""
    data_format = "NCHW"
Z
zhiboniu 已提交
600
    if not in_dynamic_mode():
601
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool1d')
602 603 604
    _check_input(x, 3)
    x = unsqueeze(x, [2])
    kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
605 606 607
    if stride is None:
        stride = kernel_size
    else:
608
        stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride')
609

610 611 612
    padding, padding_algorithm = _update_padding_nd(
        padding, 1, ceil_mode=ceil_mode
    )
613

614 615
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)
616

F
From00 已提交
617 618
    if in_dygraph_mode():
        if return_mask:
619 620 621 622 623 624 625 626
            pool_out = _C_ops.max_pool2d_with_index(
                x, kernel_size, stride, padding, False, False
            )
            return (
                (squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2]))
                if return_mask
                else squeeze(pool_out[0], [2])
            )
F
From00 已提交
627
        else:
628 629 630 631 632 633 634 635 636 637 638 639 640
            pool_out = _C_ops.pool2d(
                x,
                kernel_size,
                stride,
                padding,
                ceil_mode,
                True,
                data_format,
                'max',
                False,
                False,
                padding_algorithm,
            )
F
From00 已提交
641 642
            return squeeze(pool_out, [2])

姜永久 已提交
643 644 645 646 647 648 649
    else:
        op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
        helper = LayerHelper(op_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
        mask = helper.create_variable_for_type_inference('int32')
        outputs = {"Out": pool_out, "Mask": mask}
650

姜永久 已提交
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
        helper.append_op(
            type=op_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": 'max',
                "ksize": kernel_size,
                "global_pooling": False,
                "strides": stride,
                "paddings": padding,
                "padding_algorithm": padding_algorithm,
                "use_cudnn": True,
                "ceil_mode": ceil_mode,
                "use_mkldnn": False,
                "exclusive": True,
                "data_format": data_format,
            },
        )
669

姜永久 已提交
670 671 672 673 674
        return (
            (squeeze(pool_out, [2]), squeeze(mask, [2]))
            if return_mask
            else squeeze(pool_out, [2])
        )
675 676


677
def _unpool_output_size(x, kernel_size, stride, padding, output_size):
678 679 680
    assert output_size is None or isinstance(output_size, (list, tuple)), (
        "Required output_size is None|list|tuple, but received %s" % output_size
    )
681 682 683
    input_size = x.shape
    default_size = []
    for d in range(len(kernel_size)):
684 685 686 687 688
        default_size.append(
            (input_size[-len(kernel_size) + d] - 1) * stride[d]
            + kernel_size[d]
            - 2 * padding[d]
        )
689 690

    has_static_var = False
691
    if output_size is None:
692
        return default_size
693
    elif utils._contain_var(output_size):
姜永久 已提交
694
        if not in_dygraph_mode():
695 696 697 698 699 700
            has_static_var = True
            output_size = utils._convert_to_tensor_list(output_size)
        else:
            for i, var in enumerate(output_size):
                if isinstance(var, Variable):
                    output_size[i] = var.numpy()[0]
701 702 703 704 705 706 707

    if len(output_size) == len(kernel_size) + 2:
        output_size = output_size[2:]
    if len(output_size) != len(kernel_size):
        raise ValueError(
            "output_size should be a sequence containing "
            "{} or {} elements, but it has a length of '{}'".format(
708 709 710
                len(kernel_size), len(kernel_size) + 2, len(output_size)
            )
        )
711 712 713 714 715 716
    if not has_static_var:
        for d in range(len(kernel_size)):
            min_size = default_size[d] - stride[d]
            max_size = default_size[d] + stride[d]
            if not (min_size < output_size[d] < max_size):
                raise ValueError(
717 718 719 720
                    'invalid output_size "{}" (dim {} must be between {} and {})'.format(
                        output_size, d, min_size, max_size
                    )
                )
721 722

    return output_size
723 724


725 726 727 728 729 730 731 732 733 734
def max_unpool1d(
    x,
    indices,
    kernel_size,
    stride=None,
    padding=0,
    data_format="NCL",
    output_size=None,
    name=None,
):
735
    r"""
736
    This API implements max unpooling 1d opereation.
737 738
    `max_unpool1d` accepts the output of `max_pool1d` as input,
    including the indices of the maximum value and calculate the partial inverse.
739 740 741 742
    All non-maximum values ​​are set to zero.

    - Input: :math:`(N, C, L_{in})`
    - Output: :math:`(N, C, L_{out})`, where
743

744 745 746 747 748 749 750 751
    .. math::
        L_{out} = (L_{in} - 1) * stride - 2 * padding + kernel\_size

    or as given by :attr:`output_size` in the call operator.


    Args:
        x (Tensor): The input tensor of unpooling operator which is a 3-D tensor with
752
                          shape [N, C, L]. The format of input tensor is `"NCL"`,
753 754 755
                          where `N` is batch size, `C` is the number of channels, `L` is
                          the length of the feature. The data type is float32 or float64.
        indices (Tensor): The indices given out by maxpooling1d which is a 3-D tensor with
756
                          shape [N, C, L]. The format of input tensor is `"NCL"` ,
757 758 759 760 761 762 763
                          where `N` is batch size, `C` is the number of channels, `L` is
                          the length of the featuree. The data type is float32 or float64.
        kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
            it must contain an integer.
        stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
            it must contain an integer.
        padding (int | tuple): Padding that was added to the input.
764
        output_size(list|tuple, optional): The target output size. If output_size is not specified,
765 766 767 768 769 770 771 772 773 774
                           the actual output shape will be automatically calculated by (input_shape,
                           kernel_size, stride, padding).
        data_format (string): The data format of the input and output data.
                        The default is `"NCL"`. When it is `"NCL"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_length]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.

    Returns:
775
        Tensor: The output tensor of unpooling result.
776 777 778

    Examples:
        .. code-block:: python
779

780 781 782 783 784 785 786 787 788 789 790 791
            import paddle
            import paddle.nn.functional as F

            data = paddle.rand(shape=[1, 3, 16])
            pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
            # pool_out shape: [1, 3, 8],  indices shape: [1, 3, 8]
            unpool_out = F.max_unpool1d(pool_out, indices, kernel_size=2, padding=0)
            # unpool_out shape: [1, 3, 16]

    """
    """NCL to NCHW"""
    if data_format not in ["NCL"]:
792 793 794 795
        raise ValueError(
            "Attr(data_format) should be 'NCL'. Received "
            "Attr(data_format): %s." % str(data_format)
        )
796 797 798 799 800 801 802 803 804 805 806 807
    data_format = "NCHW"
    x = unsqueeze(x, [2])
    indices = unsqueeze(indices, [2])
    kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride')
    padding, padding_algorithm = _update_padding_nd(padding, 1)
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)

808 809 810
    output_size = _unpool_output_size(
        x, kernel_size, stride, padding, output_size
    )
811

X
xiaoting 已提交
812
    if in_dygraph_mode():
813 814 815
        output = _C_ops.unpool(
            x, indices, kernel_size, stride, padding, output_size, data_format
        )
X
xiaoting 已提交
816 817
        return squeeze(output, [2])
    elif in_dynamic_mode():
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
        output = _legacy_C_ops.unpool(
            x,
            indices,
            'unpooling_type',
            'max',
            'ksize',
            kernel_size,
            'strides',
            stride,
            'paddings',
            padding,
            "output_size",
            output_size,
            "data_format",
            data_format,
        )
834 835 836 837 838 839 840
        return squeeze(output, [2])

    op_type = "unpool"
    helper = LayerHelper(op_type, **locals())
    dtype = helper.input_dtype(input_param_name="x")
    unpool_out = helper.create_variable_for_type_inference(dtype)

841 842 843 844 845 846 847 848 849 850 851 852
    helper.append_op(
        type=op_type,
        inputs={"X": x, "Indices": indices},
        outputs={"Out": unpool_out},
        attrs={
            "unpooling_type": "max",
            "ksize": kernel_size,
            "strides": stride,
            "paddings": padding,
            "output_size": output_size,
        },
    )
853 854 855
    return squeeze(unpool_out, [2])


856 857 858 859 860 861 862 863 864 865
def max_unpool2d(
    x,
    indices,
    kernel_size,
    stride=None,
    padding=0,
    data_format="NCHW",
    output_size=None,
    name=None,
):
866
    r"""
867
    This API implements max unpooling 2d opereation.
868
    See more details in :ref:`api_nn_pooling_MaxUnPool2D` .
869

870 871

    Args:
872
        x (Tensor): The input tensor of unpooling operator which is a 4-D tensor with
873
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"`,
874
                          where `N` is batch size, `C` is the number of channels,
875 876
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
877
        indices (Tensor): The indices given out by maxpooling2d which is a 4-D tensor with
878
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` ,
879 880 881 882 883 884 885 886
                          where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
            it must contain an integer.
        stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
            it must contain an integer.
        padding (int | tuple): Padding that was added to the input.
887
        output_size(list|tuple, optional): The target output size. If output_size is not specified,
888 889
                           the actual output shape will be automatically calculated by (input_shape,
                           kernel_size, padding).
890 891 892
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
893

894 895 896 897 898 899 900 901 902 903 904 905 906

        - Input: :math:`(N, C, H_{in}, W_{in})`
        - Output: :math:`(N, C, H_{out}, W_{out})`, where

          .. math::
            H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}

          .. math::
            W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}

          or as given by :attr:`output_size` in the call operator

        Returns:
907
            Tensor: The output tensor of unpooling result.
908 909 910 911

        Raises:
            ValueError: If the input is not a 4-D tensor.
            ValueError: If indeces shape is not equal input shape.
912

913 914 915

        Examples:
            .. code-block:: python
916

C
Chen Long 已提交
917 918
            import paddle
            import paddle.nn.functional as F
919

920
            data = paddle.rand(shape=[1,1,6,6])
921 922 923 924 925
            pool_out, indices = F.max_pool2d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
            # pool_out shape: [1, 1, 3, 3],  indices shape: [1, 1, 3, 3]
            unpool_out = F.max_unpool2d(pool_out, indices, kernel_size=2, padding=0)
            # unpool_out shape: [1, 1, 6, 6]

926
            # specify a different output size than input size
927
            unpool_out = F.max_unpool2d(pool_out, indices, kernel_size=2, padding=0, output_size=[7,7])
928
            # unpool_out shape: [1, 1, 7, 7]
929

930 931
    """
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
932 933 934 935 936 937 938
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 2, 'pool_stride')
    padding = utils.convert_to_list(padding, 2, 'padding')

    if data_format not in ["NCHW"]:
939 940 941 942
        raise ValueError(
            "Attr(data_format) should be 'NCHW'. Received "
            "Attr(data_format): %s." % str(data_format)
        )
943

944 945 946
    output_size = _unpool_output_size(
        x, kernel_size, stride, padding, output_size
    )
947

X
xiaoting 已提交
948
    if in_dygraph_mode():
949 950 951
        output = _C_ops.unpool(
            x, indices, kernel_size, stride, padding, output_size, data_format
        )
952
        return output
X
xiaoting 已提交
953
    elif in_dynamic_mode():
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
        output = _legacy_C_ops.unpool(
            x,
            indices,
            'unpooling_type',
            'max',
            'ksize',
            kernel_size,
            'strides',
            stride,
            'paddings',
            padding,
            "output_size",
            output_size,
            "data_format",
            data_format,
        )
970 971 972 973 974 975 976
        return output

    op_type = "unpool"
    helper = LayerHelper(op_type, **locals())
    dtype = helper.input_dtype(input_param_name="x")
    unpool_out = helper.create_variable_for_type_inference(dtype)

977 978 979 980 981 982 983 984 985 986 987 988
    helper.append_op(
        type=op_type,
        inputs={"X": x, "Indices": indices},
        outputs={"Out": unpool_out},
        attrs={
            "unpooling_type": "max",
            "ksize": kernel_size,
            "strides": stride,
            "paddings": padding,
            "output_size": output_size,
        },
    )
989 990 991
    return unpool_out


992 993 994 995 996 997 998 999 1000 1001
def max_unpool3d(
    x,
    indices,
    kernel_size,
    stride=None,
    padding=0,
    data_format="NCDHW",
    output_size=None,
    name=None,
):
1002
    r"""
1003
    This API implements max unpooling 3d opereation.
1004 1005
    `max_unpool3d` accepts the output of `max_pool3d` as input,
    including the indices of the maximum value and calculate the partial inverse.
1006 1007 1008 1009
    All non-maximum values ​​are set to zero.

    - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
    - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
1010

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
    .. math::
        D_{out} = (D_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0]

    .. math::
        H_{out} = (H_{in} - 1) * stride[1] - 2 * padding[1] + kernel\_size[1]

    .. math::
        W_{out} = (W_{in} - 1) * stride[2] - 2 * padding[2] + kernel\_size[2]

    or as given by :attr:`output_size` in the call operator


    Args:
        x (Tensor): The input tensor of unpooling operator which is a 5-D tensor with
1025
                          shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"`,
1026
                          where `N` is batch size, `C` is the number of channels, `D` is
1027
                          the depth of the feature, `H` is the height of the feature,
1028 1029
                          and `W` is the width of the feature. The data type is float32 or float64.
        indices (Tensor): The indices given out by maxpooling3d which is a 5-D tensor with
1030
                          shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` ,
1031
                          where `N` is batch size, `C` is the number of channels, `D` is
1032
                          the depth of the feature, `H` is the height of the feature,
1033 1034 1035 1036 1037 1038
                          and `W` is the width of the feature. The data type is float32 or float64.
        kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
            it must contain an integer.
        stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
            it must contain an integer.
        padding (int | tuple): Padding that was added to the input.
1039
        output_size(list|tuple, optional): The target output size. If output_size is not specified,
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
                           the actual output shape will be automatically calculated by (input_shape,
                           kernel_size, stride, padding).
        data_format (string): The data format of the input and output data.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.

    Returns:
1050
        Tensor: The output tensor of unpooling result.
1051 1052 1053

    Examples:
        .. code-block:: python
1054

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
            import paddle
            import paddle.nn.functional as F

            data = paddle.rand(shape=[1, 1, 4, 4, 6])
            pool_out, indices = F.max_pool3d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
            # pool_out shape: [1, 1, 2, 2, 3],  indices shape: [1, 1, 2, 2, 3]
            unpool_out = F.max_unpool3d(pool_out, indices, kernel_size=2, padding=0)
            # unpool_out shape: [1, 1, 4, 4, 6]

    """
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')
    padding = utils.convert_to_list(padding, 3, 'padding')

    if data_format not in ["NCDHW"]:
1073 1074 1075 1076
        raise ValueError(
            "Attr(data_format) should be 'NCDHW'. Received "
            "Attr(data_format): %s." % str(data_format)
        )
1077

1078 1079 1080
    output_size = _unpool_output_size(
        x, kernel_size, stride, padding, output_size
    )
1081

X
xiaoting 已提交
1082
    if in_dygraph_mode():
1083 1084 1085
        output = _C_ops.unpool3d(
            x, indices, kernel_size, stride, padding, output_size, data_format
        )
1086
        return output
X
xiaoting 已提交
1087
    elif in_dynamic_mode():
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
        output = _legacy_C_ops.unpool3d(
            x,
            indices,
            'unpooling_type',
            'max',
            'ksize',
            kernel_size,
            'strides',
            stride,
            'paddings',
            padding,
            "output_size",
            output_size,
            "data_format",
            data_format,
        )
1104 1105 1106 1107 1108 1109 1110
        return output

    op_type = "unpool3d"
    helper = LayerHelper(op_type, **locals())
    dtype = helper.input_dtype(input_param_name="x")
    unpool_out = helper.create_variable_for_type_inference(dtype)

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
    helper.append_op(
        type=op_type,
        inputs={"X": x, "Indices": indices},
        outputs={"Out": unpool_out},
        attrs={
            "unpooling_type": "max",
            "ksize": kernel_size,
            "strides": stride,
            "paddings": padding,
            "output_size": output_size,
        },
    )
1123 1124 1125
    return unpool_out


1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
def max_pool2d(
    x,
    kernel_size,
    stride=None,
    padding=0,
    return_mask=False,
    ceil_mode=False,
    data_format="NCHW",
    name=None,
):
W
Wei Shengyu 已提交
1136 1137 1138
    """
    This API implements max pooling 2d operation.
    See more details in :ref:`api_nn_pooling_MaxPool2d` .
W
Wei Shengyu 已提交
1139

W
Wei Shengyu 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
    Args:
        x (Tensor): The input tensor of pooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
                          `"NHWC"`, where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain two integers, (kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be a square of an int.
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain two integers, (stride_Height, stride_Width).
            Otherwise, the pool stride size will be a square of an int.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 2, [pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 4. [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): when True, will use `ceil` instead of `floor` to compute the output shape
        return_mask (bool): Whether to return the max indices along with the outputs. Default False, only support `"NCHW"` data format
        data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
                        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.

    Examples:
        .. code-block:: python
W
Wei Shengyu 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

          import paddle
          import paddle.nn.functional as F

          # max pool2d
          x = paddle.uniform([1, 3, 32, 32], paddle.float32)
          out = F.max_pool2d(x, kernel_size=2, stride=2, padding=0)
          # output.shape [1, 3, 16, 16]
          # for return_mask=True
          out, max_indices = F.max_pool2d(x, kernel_size=2, stride=2, padding=0, return_mask=True)
          # out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16],
W
Wei Shengyu 已提交
1183
    """
W
Wei Shengyu 已提交
1184

1185
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
1186 1187 1188 1189 1190 1191 1192 1193
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 2, 'pool_stride')

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
1194 1195
            "Attr(data_format): %s." % str(data_format)
        )
1196 1197 1198

    channel_last = True if data_format == "NHWC" else False

1199 1200 1201
    padding, padding_algorithm = _update_padding_nd(
        padding, num_dims=2, channel_last=channel_last, ceil_mode=ceil_mode
    )
1202

1203
    if data_format == "NHWC" and return_mask:
D
Double_V 已提交
1204
        raise ValueError(
1205
            "When setting return_mask to true, data_format must be set to NCHW in API:max_pool2d"
D
Double_V 已提交
1206 1207
        )

F
From00 已提交
1208 1209
    if in_dygraph_mode():
        if return_mask:
1210 1211 1212
            output = _C_ops.max_pool2d_with_index(
                x, kernel_size, stride, padding, False, False
            )
F
From00 已提交
1213 1214
            return output if return_mask else output[0]
        else:
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
            return _C_ops.pool2d(
                x,
                kernel_size,
                stride,
                padding,
                ceil_mode,
                True,
                data_format,
                'max',
                False,
                False,
                padding_algorithm,
            )
F
From00 已提交
1228

姜永久 已提交
1229 1230 1231 1232 1233 1234 1235 1236 1237
    else:
        op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
        helper = LayerHelper(op_type, **locals())
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'max_pool2d'
        )
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)

1238
        if return_mask:
姜永久 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
            mask = helper.create_variable_for_type_inference("int32")
            outputs = {"Out": pool_out, "Mask": mask}

            helper.append_op(
                type="max_pool2d_with_index",
                inputs={"X": x},
                outputs=outputs,
                attrs={
                    "pooling_type": 'max',
                    "ksize": kernel_size,
                    "global_pooling": False,
                    "strides": stride,
                    "paddings": padding,
                    "padding_algorithm": padding_algorithm,
                    "use_cudnn": True,
                    "ceil_mode": ceil_mode,
                    "use_mkldnn": False,
                    "exclusive": True,
                    "data_format": data_format,
                },
1259
            )
姜永久 已提交
1260 1261
            return (pool_out, mask)

D
Double_V 已提交
1262
        else:
姜永久 已提交
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
            outputs = {"Out": pool_out}

            helper.append_op(
                type="pool2d",
                inputs={"X": x},
                outputs=outputs,
                attrs={
                    "pooling_type": 'max',
                    "ksize": kernel_size,
                    "global_pooling": False,
                    "strides": stride,
                    "paddings": padding,
                    "padding_algorithm": padding_algorithm,
                    "use_cudnn": True,
                    "ceil_mode": ceil_mode,
                    "use_mkldnn": False,
                    "exclusive": True,
                    "data_format": data_format,
                },
1282
            )
姜永久 已提交
1283
            return pool_out
1284 1285


1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
def max_pool3d(
    x,
    kernel_size,
    stride=None,
    padding=0,
    return_mask=False,
    ceil_mode=False,
    data_format="NCDHW",
    name=None,
):
1296
    """
1297 1298
    This API implements max pooling 2d operation.
    See more details in :ref:`api_nn_pooling_MaxPool3d` .
W
Wei Shengyu 已提交
1299

1300 1301
    Args:
        x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
D
Double_V 已提交
1302
                          shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` or `"NDHWC"`, where N represents batch size, C represents the number of channels, D, H and W represent the depth, height and width of the feature respectively.
1303
        kernel_size (int|list|tuple): The pool kernel size. If the kernel size
1304
            is a tuple or list, it must contain three integers,
1305
            (kernel_size_Depth, kernel_size_Height, kernel_size_Width).
1306
            Otherwise, the pool kernel size will be the cube of an int.
1307 1308
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain three integers, [stride_Depth, stride_Height, stride_Width).
1309
            Otherwise, the pool stride size will be a cube of an int.
1310 1311 1312 1313 1314 1315 1316
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
1317
        ceil_mode (bool): ${ceil_mode_comment}
1318
        return_mask (bool): Whether to return the max indices along with the outputs. Default False. Only support "NDCHW" data_format.
1319 1320 1321 1322 1323 1324
        data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
1325

1326 1327
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
W
Wei Shengyu 已提交
1328

1329 1330
    Examples:
        .. code-block:: python
1331

W
Wei Shengyu 已提交
1332 1333
          import paddle
          import paddle.nn.functional as F
1334

W
Wei Shengyu 已提交
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
          # max pool3d
          x = paddle.uniform([1, 3, 32, 32, 32])
          output = F.max_pool3d(x,
                                kernel_size=2,
                                stride=2, padding=0)
          # output.shape [1, 3, 16, 16, 16]
          # for return_mask=True
          x = paddle.uniform([1, 3, 32, 32, 32])
          output, max_indices = paddle.nn.functional.max_pool3d(x,
                                                                kernel_size=2,
                                                                stride=2,
                                                                padding=0,
                                                                return_mask=True)

          # output.shape [1, 3, 16, 16, 16], max_indices.shape [1, 3, 16, 16, 16]
1350
    """
W
Wei Shengyu 已提交
1351

1352 1353 1354 1355 1356 1357
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')

1358
    channel_last = _channel_last(data_format, 3)
1359

1360 1361 1362
    padding, padding_algorithm = _update_padding_nd(
        padding, 3, channel_last=channel_last, ceil_mode=ceil_mode
    )
1363

1364
    if data_format == "NDHWC" and return_mask:
D
Double_V 已提交
1365
        raise ValueError(
1366
            "When setting return_mask to true, data_format must be set to NCDHW in API:max_pool3d"
D
Double_V 已提交
1367 1368
        )

F
From00 已提交
1369 1370
    if in_dygraph_mode():
        if return_mask:
1371 1372 1373
            output = _C_ops.max_pool3d_with_index(
                x, kernel_size, stride, padding, False, False
            )
F
From00 已提交
1374 1375
            return output if return_mask else output[0]
        else:
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
            return _C_ops.pool3d(
                x,
                kernel_size,
                stride,
                padding,
                ceil_mode,
                True,
                data_format,
                'max',
                False,
                False,
                padding_algorithm,
            )
F
From00 已提交
1389

姜永久 已提交
1390 1391 1392 1393 1394 1395 1396 1397
    else:
        op_type = "max_pool3d_with_index" if return_mask else "pool3d"
        helper = LayerHelper(op_type, **locals())
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
        mask = helper.create_variable_for_type_inference('int32')
        outputs = {"Out": pool_out, "Mask": mask}
1398

姜永久 已提交
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
        helper.append_op(
            type=op_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": 'max',
                "ksize": kernel_size,
                "global_pooling": False,
                "strides": stride,
                "paddings": padding,
                "padding_algorithm": padding_algorithm,
                "use_cudnn": True,
                "ceil_mode": ceil_mode,
                "use_mkldnn": False,
                "exclusive": False,
                "data_format": data_format,
            },
        )
1417

姜永久 已提交
1418
        return (pool_out, mask) if return_mask else pool_out
1419 1420


1421
def adaptive_avg_pool1d(x, output_size, name=None):
1422
    """
1423 1424
    Adaptive average pooling 1d operation on :attr:`x` according to :attr:`output_size`.

1425 1426
    Notes:
        See more details in :ref:`api_nn_pooling_AdaptiveAvgPool1d` .
D
Double_V 已提交
1427

1428
    Args:
1429 1430 1431
        x (Tensor): The input Tensor of pooling, which is a 3-D tensor with shape :math:`[N, C, L]`, where :math:`N` is batch size, :math:`C` is the number of channels and :math:`L` is the length of the feature. The data type is float32 or float64.
        output_size (int): The target output size. Its data type must be int.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1432

1433
    Returns:
1434
        Tensor: The result of 1D adaptive average pooling. Its data type is same as input.
1435

1436 1437
    Examples:
        .. code-block:: python
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456

            # average adaptive pool1d
            # suppose input data in shape of [N, C, L], `output_size` is m or [m],
            # output shape is [N, C, m], adaptive pool divide L dimension
            # of input data into m grids averagely and performs poolings in each
            # grid to get output.
            # adaptive max pool performs calculations as follow:
            #
            #     for i in range(m):
            #         lstart = floor(i * L / m)
            #         lend = ceil((i + 1) * L / m)
            #         output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend)
            #
            import paddle
            import paddle.nn.functional as F

            data = paddle.uniform([1, 3, 32])
            pool_out = F.adaptive_avg_pool1d(data, output_size=16)
            # pool_out shape: [1, 3, 16])
1457 1458 1459 1460
    """
    pool_type = 'avg'
    _check_input(x, 3)
    pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
1461

1462
    x = unsqueeze(x, [2])
1463
    if in_dygraph_mode():
1464
        x = x._use_gpudnn(False)
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
        pool_out = _C_ops.pool2d(
            x,
            pool_size,
            [1, 1],
            [0, 0],
            False,
            True,
            "NCHW",
            pool_type,
            False,
            True,
            "EXPLICIT",
        )
1478
        return squeeze(pool_out, [2])
姜永久 已提交
1479 1480
    else:
        l_type = "pool2d"
1481 1482 1483 1484
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'adaptive_pool2d'
        )
        check_type(output_size, 'pool_size', (int), 'adaptive_pool1d')
姜永久 已提交
1485 1486 1487
        helper = LayerHelper(l_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
1488

姜永久 已提交
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
        outputs = {"Out": pool_out}
        helper.append_op(
            type=l_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": pool_type,
                "ksize": pool_size,
                "adaptive": True,
            },
        )
1500

姜永久 已提交
1501
        return squeeze(pool_out, [2])
1502 1503


1504
def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
1505
    r"""
1506

1507 1508
    Applies 2D adaptive avg pooling on input tensor. The h and w dimensions
    of the output tensor are determined by the parameter output_size.
1509

1510
    For avg adaptive pool2d:
1511

1512
    ..  math::
1513 1514 1515 1516
        hstart &= floor(i * H_{in} / H_{out}) \\
        hend &= ceil((i + 1) * H_{in} / H_{out}) \\
        wstart &= floor(j * W_{in} / W_{out}) \\
        wend &= ceil((j + 1) * W_{in} / W_{out}) \\
1517
        Output(i ,j) &= \frac{\sum Input[hstart:hend, wstart:wend]}{(hend - hstart) * (wend - wstart)}
1518 1519 1520

    Args:
        x (Tensor): The input tensor of adaptive avg pool2d operator, which is a 4-D tensor.
1521
                          The data type can be float32 or float64.
1522 1523 1524
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain two element, (H, W). H and W can be either a int, or None which means
            the size will be the same as that of the input.
1525
        data_format (str, optional): The data format of the input and output data. An optional string
1526 1527 1528 1529 1530
            from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in
            the order of: [batch_size, input_channels, input_height, input_width].
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
1531

1532
    Returns:
1533
        Tensor, The output tensor of avg adaptive pool2d result. The data type is same as input tensor.
1534

1535 1536
    Examples:
        .. code-block:: python
B
Bai Yifan 已提交
1537

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
            # adaptive avg pool2d
            # suppose input data in shape of [N, C, H, W], `output_size` is [m, n],
            # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
            # of input data into m * n grids averagely and performs poolings in each
            # grid to get output.
            # adaptive avg pool performs calculations as follow:
            #
            #     for i in range(m):
            #         for j in range(n):
            #             hstart = floor(i * H / m)
            #             hend = ceil((i + 1) * H / m)
            #             wstart = floor(i * W / n)
            #             wend = ceil((i + 1) * W / n)
            #             output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
            #
            import paddle
1554

1555
            x = paddle.rand([2, 3, 32, 32])
1556
            # x.shape is [2, 3, 32, 32]
1557
            out = paddle.nn.functional.adaptive_avg_pool2d(
1558 1559
                            x = x,
                            output_size=[3, 3])
1560
            # out.shape is [2, 3, 3, 3]
1561

1562 1563 1564 1565
    """
    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
1566 1567
            "Attr(data_format): %s." % str(data_format)
        )
1568 1569 1570 1571 1572 1573 1574 1575 1576

    if data_format == "NCHW":
        in_h, in_w = x.shape[2:4]
    else:
        in_h, in_w = x.shape[1:3]

    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 2, 'output_size')
    else:
1577
        output_size = list(output_size)
1578
        if output_size[0] is None:
1579
            output_size[0] = in_h
1580
        if output_size[1] is None:
1581 1582
            output_size[1] = in_w

姜永久 已提交
1583
    if in_dygraph_mode():
1584 1585 1586 1587
        output_size = [
            item.numpy().item(0) if isinstance(item, Variable) else item
            for item in output_size
        ]
1588
    # output_size support Variable in static graph mode
1589 1590 1591
    elif utils._contain_var(output_size):
        output_size = utils._convert_to_tensor_list(output_size)

F
From00 已提交
1592
    if in_dygraph_mode():
1593
        x = x._use_gpudnn(False)
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
        return _C_ops.pool2d(
            x,
            output_size,
            [1, 1],
            [0, 0],
            False,
            True,
            data_format,
            'avg',
            False,
            True,
            "EXPLICIT",
        )
F
From00 已提交
1607

姜永久 已提交
1608 1609
    else:
        l_type = 'pool2d'
1610 1611 1612 1613
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'adaptive_avg_pool2d'
        )
        check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')
姜永久 已提交
1614 1615 1616
        helper = LayerHelper(l_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
1617

姜永久 已提交
1618
        outputs = {"Out": pool_out}
1619

姜永久 已提交
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
        helper.append_op(
            type=l_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": "avg",
                "ksize": output_size,
                "adaptive": True,
                "data_format": data_format,
            },
        )
1631

姜永久 已提交
1632
        return pool_out
1633 1634 1635


def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
1636
    r"""
1637

1638 1639
    This operation applies 3D adaptive avg pooling on input tensor. The h and w dimensions
    of the output tensor are determined by the parameter output_size.
1640

1641
    For avg adaptive pool3d:
1642

1643
    ..  math::
1644 1645 1646 1647 1648 1649
        dstart &= floor(i * D_{in} / D_{out}) \\
        dend &= ceil((i + 1) * D_{in} / D_{out}) \\
        hstart &= floor(j * H_{in} / H_{out}) \\
        hend &= ceil((j + 1) * H_{in} / H_{out}) \\
        wstart &= floor(k * W_{in} / W_{out}) \\
        wend &= ceil((k + 1) * W_{in} / W_{out}) \\
1650 1651
        Output(i ,j, k) &= \frac{\sum Input[dstart:dend, hstart:hend, wstart:wend]}
            {(dend - dstart) * (hend - hstart) * (wend - wstart)}
1652 1653 1654

    Args:
        x (Tensor): The input tensor of adaptive avg pool3d operator, which is a 5-D tensor.
1655 1656 1657 1658 1659
            The data type can be float32, float64.
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or
            list, it must contain three elements, (D, H, W). D, H and W can be either a int,
            or None which means the size will be the same as that of the input.
        data_format (str, optional): The data format of the input and output data. An optional string
1660 1661
            from: "NCDHW", "NDHWC". The default is "NCDHW". When it is "NCDHW", the data is stored in
            the order of: [batch_size, input_channels, input_depth, input_height, input_width].
1662 1663 1664
        name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`.
            Usually name is no need to set and None by default.

1665
    Returns:
1666
        Tensor, The output tensor of avg adaptive pool3d result. The data type is same as input tensor.
1667

1668 1669
    Examples:
        .. code-block:: python
B
Bai Yifan 已提交
1670

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
            # adaptive avg pool3d
            # suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n],
            # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
            # of input data into l * m * n grids averagely and performs poolings in each
            # grid to get output.
            # adaptive avg pool performs calculations as follow:
            #
            #     for i in range(l):
            #         for j in range(m):
            #             for k in range(n):
            #                 dstart = floor(i * D / l)
            #                 dend = ceil((i + 1) * D / l)
            #                 hstart = floor(j * H / m)
            #                 hend = ceil((j + 1) * H / m)
            #                 wstart = floor(k * W / n)
            #                 wend = ceil((k + 1) * W / n)
            #                 output[:, :, i, j, k] =
            #                     avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
            import paddle
1690 1691

            input_data = paddle.randn(shape=(2, 3, 8, 32, 32))
1692
            out = paddle.nn.functional.adaptive_avg_pool3d(
1693
                            x = input_data,
1694
                            output_size=[3, 3, 3])
1695
            # out.shape is [2, 3, 3, 3, 3]
1696

1697 1698 1699 1700
    """
    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
1701 1702
            "Attr(data_format): %s." % str(data_format)
        )
1703 1704 1705 1706 1707 1708 1709 1710 1711

    if data_format == "NCDHW":
        in_l, in_h, in_w = x.shape[2:5]
    else:
        in_l, in_h, in_w = x.shape[1:4]

    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 3, 'output_size')
    else:
1712
        output_size = list(output_size)
1713
        if output_size[0] is None:
1714
            output_size[0] = in_l
1715
        if output_size[1] is None:
1716
            output_size[1] = in_h
1717
        if output_size[2] is None:
1718 1719
            output_size[2] = in_w

1720
    if in_dygraph_mode():
1721
        x = x._use_gpudnn(False)
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
        return _C_ops.pool3d(
            x,
            output_size,
            [1, 1, 1],
            [0, 0, 0],
            False,
            True,
            data_format,
            'avg',
            False,
            True,
            "EXPLICIT",
        )
姜永久 已提交
1735 1736
    else:
        l_type = 'pool3d'
1737

1738 1739 1740 1741 1742
        check_variable_and_dtype(
            x, 'x', ['float16', 'float32', 'float64'], 'adaptive_avg_pool2d'
        )
        check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')

姜永久 已提交
1743 1744 1745 1746
        helper = LayerHelper(l_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
        outputs = {"Out": pool_out}
1747

姜永久 已提交
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
        helper.append_op(
            type=l_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": "avg",
                "ksize": output_size,
                "adaptive": True,
                "data_format": data_format,
            },
        )
1759

姜永久 已提交
1760
        return pool_out
1761 1762


1763
def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
1764 1765 1766 1767 1768 1769 1770 1771 1772
    """
    This API implements adaptive max pooling 1d operation.
    See more details in :ref:`api_nn_pooling_AdaptiveMaxPool1d` .

    Args:
        x (Tensor): The input tensor of pooling operator, which is a 3-D tensor
                              with shape [N, C, L].  The format of input tensor is NCL,
                              where N is batch size, C is the number of channels, L is the
                              length of the feature. The data type is float32 or float64.
1773
        output_size (int): The pool kernel size. The value should be an integer.
1774
        return_mask (bool): If true, the index of max pooling point will be returned along
1775 1776 1777 1778 1779 1780 1781
                with outputs. It cannot be set in average pooling type. Default False.
        name(str, optional): For detailed information, please refer
                                 to :ref:`api_guide_Name`. Usually name is no need to set and
                                 None by default.
    Returns:
            Tensor: The output tensor of adaptive pooling result. The data type is same
                      as input tensor.
1782

1783 1784
    Examples:
        .. code-block:: python
1785

1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
              # max adaptive pool1d
              # suppose input data in shape of [N, C, L], `output_size` is m or [m],
              # output shape is [N, C, m], adaptive pool divide L dimension
              # of input data into m grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(m):
              #         lstart = floor(i * L / m)
              #         lend = ceil((i + 1) * L / m)
              #         output[:, :, i] = max(input[:, :, lstart: lend])
              #
              import paddle
              import paddle.nn.functional as F
1800

1801
              data = paddle.uniform([1, 3, 32], paddle.float32)
1802 1803
              pool_out = F.adaptive_max_pool1d(data, output_size=16)
              # pool_out shape: [1, 3, 16])
1804
              pool_out, indices = F.adaptive_max_pool1d(data, output_size=16, return_mask=True)
1805 1806 1807 1808 1809 1810 1811
              # pool_out shape: [1, 3, 16] indices  shape: [1, 3, 16]
    """
    _check_input(x, 3)

    pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')

    x = unsqueeze(x, [2])
1812
    if in_dygraph_mode():
1813 1814 1815 1816 1817 1818 1819 1820
        pool_out = _C_ops.max_pool2d_with_index(
            x, pool_size, [1, 1], [0, 0], False, True
        )
        return (
            (squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2]))
            if return_mask
            else squeeze(pool_out[0], [2])
        )
姜永久 已提交
1821 1822
    else:
        l_type = 'max_pool2d_with_index'
1823

1824 1825 1826 1827 1828 1829
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64'], 'adaptive_max_pool1d'
        )
        check_type(output_size, 'pool_size', int, 'adaptive_max_pool1d')
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool1d')

姜永久 已提交
1830 1831 1832
        helper = LayerHelper(l_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
1833

姜永久 已提交
1834 1835
        mask = helper.create_variable_for_type_inference('int32')
        outputs = {"Out": pool_out, "Mask": mask}
1836

姜永久 已提交
1837 1838 1839 1840 1841
        helper.append_op(
            type=l_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
1842
                "pooling_type": 'max',
姜永久 已提交
1843 1844 1845 1846
                "ksize": pool_size,
                "adaptive": True,
            },
        )
1847

姜永久 已提交
1848 1849 1850 1851 1852
        return (
            (squeeze(pool_out, [2]), squeeze(mask, [2]))
            if return_mask
            else squeeze(pool_out, [2])
        )
1853 1854


1855
def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
1856
    """
1857 1858
    This operation applies a 2D adaptive max pooling on input tensor.
    See more details in :ref:`api_nn_pooling_AdaptiveMaxPool2d` .
1859

1860 1861 1862 1863 1864
    Args:
        x (Tensor): The input tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type can be float16, float32, float64, int32 or int64.
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two elements, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input.
        return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
        name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
1865

1866 1867
    Returns:
        Tensor: The output tensor of adaptive max pool2d result. The data type is same as input tensor.
1868

1869 1870
    Examples:
        .. code-block:: python
1871

1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
          # max adaptive pool2d
          # suppose input data in the shape of [N, C, H, W], `output_size` is [m, n]
          # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
          # of input data into m*n grids averagely and performs poolings in each
          # grid to get output.
          # adaptive max pool performs calculations as follow:
          #
          #     for i in range(m):
          #         for j in range(n):
          #             hstart = floor(i * H / m)
          #             hend = ceil((i + 1) * H / m)
          #             wstart = floor(i * W / n)
          #             wend = ceil((i + 1) * W / n)
          #             output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
          #
          import paddle
1888

1889 1890 1891 1892 1893
          input_data = paddle.randn(shape=(2, 3, 32, 32))
          out = paddle.nn.functional.adaptive_max_pool2d(
                        x = input_data,
                        output_size=[3, 3])
          # out.shape is [2, 3, 3, 3]
1894 1895 1896 1897 1898 1899 1900
    """
    _check_input(x, 4)

    in_h, in_w = x.shape[2:4]
    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 2, 'output_size')
    else:
1901
        output_size = list(output_size)
1902
        if output_size[0] is None:
1903
            output_size[0] = in_h
1904
        if output_size[1] is None:
1905
            output_size[1] = in_w
1906
    if in_dygraph_mode():
1907 1908 1909
        pool_out = _C_ops.max_pool2d_with_index(
            x, output_size, [1, 1], [0, 0], False, True
        )
1910
        return pool_out if return_mask else pool_out[0]
姜永久 已提交
1911 1912
    else:
        l_type = 'max_pool2d_with_index'
1913

1914 1915 1916 1917 1918 1919
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64'], 'adaptive_max_pool2d'
        )
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool2d')
        # check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')

姜永久 已提交
1920 1921 1922
        helper = LayerHelper(l_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
1923

姜永久 已提交
1924 1925
        mask = helper.create_variable_for_type_inference('int32')
        outputs = {"Out": pool_out, "Mask": mask}
1926

姜永久 已提交
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
        helper.append_op(
            type=l_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": 'max',
                "ksize": output_size,
                "adaptive": True,
            },
        )
        # return (pool_out, mask) if return_mask else pool_out
        return pool_out
1939 1940


1941
def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
1942
    """
1943 1944
    This operation applies a 3D adaptive max pooling on input tensor.
    See more details in :ref:`api_nn_pooling_AdaptiveMaxPool3d` .
1945

1946 1947 1948 1949 1950
    Args:
        x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64.
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means the size will be the same as that of the input.
        return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
        name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
1951

1952 1953
    Returns:
        Tensor: The output tensor of adaptive max pool3d result. The data type is same as input tensor.
1954

1955 1956
    Examples:
        .. code-block:: python
1957

1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
          # adaptive max pool3d
          # suppose input data in the shape of [N, C, D, H, W], `output_size` is [l, m, n]
          # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
          # of input data into m*n grids averagely and performs poolings in each
          # grid to get output.
          # adaptive max pool performs calculations as follow:
          #
          #     for i in range(l):
          #         for j in range(m):
          #             for k in range(n):
          #                 dstart = floor(i * D / l)
          #                 dend = ceil((i + 1) * D / l)
          #                 hstart = floor(i * H / m)
          #                 hend = ceil((i + 1) * H / m)
          #                 wstart = floor(i * W / n)
          #                 wend = ceil((i + 1) * W / n)
          #             output[:, :, i, j, k] = max(input[:, :, dstart: dend, hstart: hend, wstart: wend])
          #
          import paddle
1977

1978 1979 1980 1981 1982
          input_data = paddle.randn(shape=(2, 3, 8, 32, 32))
          out = paddle.nn.functional.adaptive_max_pool3d(
                        x = input_data,
                        output_size=[3, 3, 3])
          # out.shape is [2, 3, 3, 3, 3]
1983 1984 1985 1986 1987 1988 1989
    """
    _check_input(x, 5)

    in_l, in_h, in_w = x.shape[2:5]
    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 3, 'output_size')
    else:
1990
        output_size = list(output_size)
1991
        if output_size[0] is None:
1992
            output_size[0] = in_l
1993
        if output_size[1] is None:
1994
            output_size[1] = in_h
1995
        if output_size[2] is None:
1996 1997
            output_size[2] = in_w

姜永久 已提交
1998 1999 2000 2001 2002
    if in_dygraph_mode():
        # By default, strides is [1,1,1] and paddings is [0, 0, 0]
        pool_out = _C_ops.max_pool3d_with_index(
            x, output_size, [1, 1, 1], [0, 0, 0], False, True
        )
2003
        return pool_out if return_mask else pool_out[0]
姜永久 已提交
2004 2005
    else:
        l_type = 'max_pool3d_with_index'
2006

2007 2008 2009 2010 2011 2012
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64'], 'adaptive_max_pool3d'
        )
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool3d')
        # check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')

姜永久 已提交
2013 2014 2015
        helper = LayerHelper(l_type, **locals())
        dtype = helper.input_dtype(input_param_name='x')
        pool_out = helper.create_variable_for_type_inference(dtype)
2016

姜永久 已提交
2017 2018
        mask = helper.create_variable_for_type_inference('int32')
        outputs = {"Out": pool_out, "Mask": mask}
2019

姜永久 已提交
2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
        helper.append_op(
            type=l_type,
            inputs={"X": x},
            outputs=outputs,
            attrs={
                "pooling_type": 'max',
                "ksize": output_size,
                "adaptive": True,
            },
        )
2030

姜永久 已提交
2031
        return (pool_out, mask) if return_mask else pool_out