pooling.py 63.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# TODO: define pooling functions
16
from ...fluid import core
17 18 19
from ...fluid.framework import in_dygraph_mode
from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype
20

21 22
__all__ = []

23

24 25 26 27 28
def _is_list_or_tuple(input):
    return isinstance(input, (list, tuple))


def _check_input(x, dimension):
29
    if len(x.shape) != dimension:
30 31 32
        raise ValueError(
            "Excepted Input X is {}-D tensor, but received {}-D {}".format(
                dimension, len(x.shape), type(x)))
33 34


35
def _check_instance(x, x_name, types=(int, float)):
36 37 38 39 40 41

    if not isinstance(x, types):
        raise ValueError("Excepted {} type for {} but received type: {}. ".
                         format(types, x_name, type(x)))


42 43 44
def _zero_padding_in_batch_and_channel(padding, channel_last):
    if channel_last:
        return list(padding[0]) == [0, 0] and list(padding[-1]) == [0, 0]
45
    else:
46
        return list(padding[0]) == [0, 0] and list(padding[1]) == [0, 0]
47 48


49 50 51 52
def _exclude_padding_in_batch_and_channel(padding, channel_last):
    padding_ = padding[1:-1] if channel_last else padding[2:]
    padding_ = [elem for pad_a_dim in padding_ for elem in pad_a_dim]
    return padding_
53 54


55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
def _channel_last(data_format, num_dims):
    if num_dims == 1:
        if data_format not in ['NCL', 'NLC']:
            raise ValueError(
                "Attr(data_format) should be 'NCL' or 'NLC'. Received "
                "Attr(data_format): %s" % str(data_format))
        else:
            return True if data_format == "NLC" else False
    if num_dims == 2:
        if data_format not in ['NCHW', 'NHWC']:
            raise ValueError(
                "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
                "Attr(data_format): %s" % str(data_format))
        else:
            return True if data_format == "NHWC" else False
    if num_dims == 3:
        if data_format not in ['NCDHW', 'NDHWC']:
            raise ValueError(
                "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
                "Attr(data_format): %s" % str(data_format))
        else:
            return True if data_format == "NDHWC" else False
77 78


79 80 81 82 83 84 85 86 87
def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False):
    if isinstance(padding, str):
        padding = padding.upper()
        if padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".
                format(padding))
        if padding == "VALID":
            if ceil_mode != False:
88
                raise ValueError(
89 90 91 92 93 94 95 96 97 98 99 100 101 102
                    "When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. "
                    "Received ceil_mode: True.")

            padding_algorithm = "VALID"
            padding = [0] * num_dims
        else:
            padding_algorithm = "SAME"
            padding = [0] * num_dims
    elif _is_list_or_tuple(padding):
        # for padding like
        # [(pad_before, pad_after), (pad_before, pad_after), ...]
        # padding for batch_dim and channel_dim included
        if len(padding) == 2 + num_dims and _is_list_or_tuple(padding[0]):
            if not _zero_padding_in_batch_and_channel(padding, channel_last):
103
                raise ValueError(
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
                    "Non-zero padding({}) in the batch or channel dimensions "
                    "is not supported.".format(padding))
            padding_algorithm = "EXPLICIT"
            padding = _exclude_padding_in_batch_and_channel(padding,
                                                            channel_last)
            if utils._is_symmetric_padding(padding, num_dims):
                padding = padding[0::2]
        # for padding like [pad_before, pad_after, pad_before, pad_after, ...]
        elif len(padding) == 2 * num_dims and isinstance(padding[0], int):
            padding_algorithm = "EXPLICIT"
            padding = utils.convert_to_list(padding, 2 * num_dims, 'padding')
            if utils._is_symmetric_padding(padding, num_dims):
                padding = padding[0::2]
        # for padding like [pad_d1, pad_d2, ...]
        elif len(padding) == num_dims and isinstance(padding[0], int):
            padding_algorithm = "EXPLICIT"
            padding = utils.convert_to_list(padding, num_dims, 'padding')
        else:
            raise ValueError("Invalid padding: {}".format(padding))
    # for integer padding
124
    else:
125 126 127 128
        padding_algorithm = "EXPLICIT"
        padding = utils.convert_to_list(padding, num_dims, 'padding')
    return padding, padding_algorithm

129

130 131 132 133 134 135 136 137 138 139
def _expand_low_nd_padding(padding):
    #1d to 2d fake input
    if len(padding) == 2:
        padding = [0] * 2 + padding
    elif len(padding) == 1:
        padding = [0] + padding
    else:
        raise ValueError(
            "The size of padding's dimmention should be 1 or 2. But got padding={}".
            format(padding))
140 141 142 143 144 145 146
    return padding


def avg_pool1d(x,
               kernel_size,
               stride=None,
               padding=0,
147
               exclusive=True,
148 149
               ceil_mode=False,
               name=None):
D
Double_V 已提交
150
    """
151 152
    This API implements average pooling 1d operation,
    See more details in :ref:`api_nn_pooling_AvgPool1d` .
153 154 155 156

    Args:
        x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
                          shape [N, C, L]. where `N` is batch size, `C` is the number of channels,
157
                          `L` is the length of the feature. The data type is float32 or float64.
158
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
159
            it must contain an integer.
160
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
161 162 163 164 165 166 167 168
            it must contain an integer.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
            4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
169
        exclusive (bool): Whether to exclude padding points in average pooling
170
                          mode, default is `True`.
171
        ceil_mode (bool): ${ceil_mode_comment}Whether to use the ceil function to calculate output height and width.
172
            If it is set to False, the floor function will be used. The default value is False.
173 174 175 176 177 178 179 180 181
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.

    Raises:
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
182 183
        ValueError: If `padding` is a list or tuple but its length is greater than 1.
        ShapeError: If the input is not a 3-D tensor.
184 185 186 187
        ShapeError: If the output's shape calculated is not greater than 0.

    Examples:
        .. code-block:: python
C
Chen Long 已提交
188 189 190 191 192 193 194 195
          
            import paddle
            import paddle.nn.functional as F
            import numpy as np

            data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
            out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0)
            # out shape: [1, 3, 16]
196 197 198
    """
    """NCL to NCHW"""
    data_format = "NCHW"
199 200
    if not in_dygraph_mode():
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool1d')
201
    _check_input(x, 3)
202
    x = unsqueeze(x, [2])
203
    kernel_size = utils.convert_to_list(kernel_size, 1, 'kernel_size')
204 205 206 207 208 209 210
    kernel_size = [1] + kernel_size
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 1, 'pool_stride')
        stride = [1] + stride

211 212 213
    channel_last = _channel_last("NCL", 1)
    padding, padding_algorithm = _update_padding_nd(
        padding, 1, channel_last=channel_last, ceil_mode=ceil_mode)
214

215 216
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)
217 218 219 220 221

    if in_dygraph_mode():
        output = core.ops.pool2d(
            x, 'pooling_type', 'avg', 'ksize', kernel_size, 'global_pooling',
            False, 'strides', stride, 'paddings', padding, 'padding_algorithm',
222
            padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
D
Double_V 已提交
223
            'use_mkldnn', False, 'exclusive', exclusive, 'data_format',
224
            data_format)
225 226 227 228
        return squeeze(output, [2])

    op_type = 'pool2d'
    helper = LayerHelper(op_type, **locals())
229
    dtype = helper.input_dtype(input_param_name='x')
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
    pool_out = helper.create_variable_for_type_inference(dtype)

    helper.append_op(
        type=op_type,
        inputs={"X": x},
        outputs={"Out": pool_out},
        attrs={
            "pooling_type": 'avg',
            "ksize": kernel_size,
            "global_pooling": False,
            "strides": stride,
            "paddings": padding,
            "padding_algorithm": padding_algorithm,
            "use_cudnn": True,
            "ceil_mode": ceil_mode,
            "use_mkldnn": False,
246
            "exclusive": exclusive,
247 248 249 250 251 252
            "data_format": data_format,
        })

    return squeeze(pool_out, [2])


253
def avg_pool2d(x,
254 255 256 257
               kernel_size,
               stride=None,
               padding=0,
               ceil_mode=False,
258
               exclusive=True,
259 260
               divisor_override=None,
               data_format="NCHW",
261 262
               name=None):
    """
263 264
    This API implements average pooling 2d operation.
    See more details in :ref:`api_nn_pooling_AvgPool2d` .
D
Double_V 已提交
265

266
    Args:
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
        x (Tensor): The input tensor of pooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
                          `"NHWC"`, where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The pool kernel size. If it is a tuple or list,
            it must contain two integers, (kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be a square of an int.
        stride (int|list|tuple): The stride size. If it is a tuple or list,
            it must contain two integers, (stride_Height, stride_Width).
            Otherwise, the stride size will be a square of an int.

        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 2, [pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 4. [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): when True, will use `ceil` instead of `floor` to compute the output shape
287
        exclusive (bool): Whether to exclude padding points in average pooling
288 289 290 291 292
                          mode, default is `true`.
        divisor_override (float): if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
        data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
                        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_height, input_width]`.
293 294 295
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
C
Chen Long 已提交
296
    
297 298
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
C
Chen Long 已提交
299
    
300 301 302 303
    Raises:
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
        ShapeError: If the output's shape calculated is not greater than 0.
C
Chen Long 已提交
304
    
305 306
    Examples:
        .. code-block:: python
C
Chen Long 已提交
307 308 309 310 311 312 313 314 315 316 317
          
            import paddle
            import paddle.nn.functional as F
            import numpy as np
            
            # avg pool2d
            x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
            out = F.avg_pool2d(x,
                            kernel_size=2,
                            stride=2, padding=0)
            # out.shape [1, 3, 16, 16]
318
    """
319
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
320 321 322
    if stride is None:
        stride = kernel_size
    else:
323
        stride = utils.convert_to_list(stride, 2, 'pool_stride')
324

325 326 327
    channel_last = _channel_last(data_format, 2)
    padding, padding_algorithm = _update_padding_nd(
        padding, 2, channel_last, ceil_mode=ceil_mode)
328 329

    if in_dygraph_mode():
330 331 332 333
        output = core.ops.pool2d(
            x, 'pooling_type', 'avg', 'ksize', kernel_size, 'global_pooling',
            False, 'padding_algorithm', padding_algorithm, 'strides', stride,
            'paddings', padding, 'use_cudnn', True, 'ceil_mode', ceil_mode,
D
Double_V 已提交
334
            'use_mkldnn', False, 'exclusive', exclusive, 'data_format',
335
            data_format)
336 337 338 339 340
        if divisor_override is None:
            return output
        else:
            _check_instance(divisor_override, "divisor_override")
            return output * (kernel_size[0] * kernel_size[1]) / divisor_override
341

342
    op_type = 'pool2d'
343
    helper = LayerHelper(op_type, **locals())
344
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
345
    dtype = helper.input_dtype(input_param_name='x')
346 347 348 349 350
    pool_out = helper.create_variable_for_type_inference(dtype)

    helper.append_op(
        type=op_type,
        inputs={"X": x},
351
        outputs={"Out": pool_out},
352
        attrs={
353
            "pooling_type": "avg",
354 355 356 357 358 359 360 361
            "ksize": kernel_size,
            "global_pooling": False,
            "strides": stride,
            "paddings": padding,
            "padding_algorithm": padding_algorithm,
            "use_cudnn": True,
            "ceil_mode": ceil_mode,
            "use_mkldnn": False,
362
            "exclusive": exclusive,
363 364 365
            "data_format": data_format,
        })

366 367 368 369 370
    if divisor_override is None:
        return pool_out
    else:
        _check_instance(divisor_override, "divisor_override")
        return pool_out * (kernel_size[0] * kernel_size[1]) / divisor_override
371 372


373 374 375 376 377
def avg_pool3d(x,
               kernel_size,
               stride=None,
               padding=0,
               ceil_mode=False,
378
               exclusive=True,
379 380 381
               divisor_override=None,
               data_format="NCDHW",
               name=None):
382
    """
383 384
    This API implements average pooling 3d operation.
    See more details in :ref:`api_nn_pooling_AvgPool3d` .
385 386

    Args:
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
        x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
                          shape [N, C, D, H, W], where `N` represents the batch size, `C` represents
                          the number of channels, `D`, `H` and `W` represent the depth, height and width of the feature respectively.
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size
            is a tuple or list, it must contain three integers,
            (kernel_size_Depth, kernel_size_Height, kernel_size_Width).
            Otherwise, the pool kernel size will be the cube of an int.
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain three integers, [stride_Depth, stride_Height, stride_Width).
            Otherwise, the pool stride size will be a cube of an int.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
        ceil_mode (bool): ${ceil_mode_comment}
405
        exclusive (bool): Whether to exclude padding points in average pooling
406 407 408 409 410
                          mode, default is True.
        divisor_override (int|float) if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
        data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
411
        name(str, optional): For detailed information, please refer
412 413
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
C
Chen Long 已提交
414
    
415
    Returns:
416
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
C
Chen Long 已提交
417
    
418
    Raises:
419 420 421
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
        ShapeError: If the output's shape calculated is not greater than 0.
C
Chen Long 已提交
422
    
423 424
    Examples:
        .. code-block:: python
C
Chen Long 已提交
425
          
426
          import paddle
C
Chen Long 已提交
427 428
          import numpy as np

429 430 431 432 433 434 435 436
          x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32))
          # avg pool3d
          out = paddle.nn.functional.avg_pool3d(
                                            x,
                                            kernel_size = 2,
                                            stride = 2,
                                            padding=0)
          # out.shape: [1, 3, 16, 16, 16]
437
    """
438 439 440 441 442
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')
443

444 445 446
    channel_last = _channel_last(data_format, 3)
    padding, padding_algorithm = _update_padding_nd(
        padding, 3, channel_last=channel_last, ceil_mode=ceil_mode)
447 448

    if in_dygraph_mode():
449 450 451 452
        output = core.ops.pool3d(
            x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides', stride,
            'paddings', padding, 'global_pooling', False, 'padding_algorithm',
            padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
D
Double_V 已提交
453
            'use_mkldnn', False, 'exclusive', exclusive, 'data_format',
454
            data_format)
455 456 457 458 459 460
        if divisor_override is None:
            return output
        else:
            _check_instance(divisor_override, "divisor_override")
            return output * (kernel_size[0] * kernel_size[1] *
                             kernel_size[2]) / divisor_override
461

462 463
    op_type = "pool3d"
    helper = LayerHelper(op_type, **locals())
464
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
465
    dtype = helper.input_dtype(input_param_name='x')
466 467
    pool_out = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out}
468 469

    helper.append_op(
470
        type=op_type,
471 472 473
        inputs={"X": x},
        outputs=outputs,
        attrs={
474 475 476 477 478 479 480 481 482
            "pooling_type": 'avg',
            "ksize": kernel_size,
            "global_pooling": False,
            "strides": stride,
            "paddings": padding,
            "padding_algorithm": padding_algorithm,
            "use_cudnn": True,
            "ceil_mode": ceil_mode,
            "use_mkldnn": False,
483
            "exclusive": exclusive,
484
            "data_format": data_format,
485 486
        })

487 488 489 490 491 492
    if divisor_override is None:
        return pool_out
    else:
        _check_instance(divisor_override, "divisor_override")
        return pool_out * (kernel_size[0] * kernel_size[1] *
                           kernel_size[2]) / divisor_override
493 494


495
def max_pool1d(x,
496 497 498
               kernel_size,
               stride=None,
               padding=0,
499
               return_mask=False,
500 501 502
               ceil_mode=False,
               name=None):
    """
503 504
    This API implements max pooling 1d opereation.
    See more details in :ref:`api_nn_pooling_MaxPool1d` .
505 506

    Args:
507 508 509
        x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
                          shape [N, C, L], where `N` is batch size, `C` is the number of channels,
                          `L` is the length of the feature. The data type if float32 or float64.
510
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
511
            it must contain an integer.
512
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
513 514 515 516 517 518 519 520
            it must contain an integer.
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An integer, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
            4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
521
        return_mask (bool): Whether return the max indices along with the outputs. default is `False`.
522 523
        ceil_mode (bool): Whether to use the ceil function to calculate output height and width. False is the default.
            If it is set to False, the floor function will be used. Default False.
524 525 526 527 528
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
529

530 531 532
    Raises:
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
533
        ShapeError: If the input is not a 3-D tensor.
534
        ShapeError: If the output's shape calculated is not greater than 0.
535

536 537
    Examples:
        .. code-block:: python
538

539 540
          import paddle
          import paddle.nn.functional as F
C
Chen Long 已提交
541 542
          import numpy as np

543 544 545
          data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
          pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0)
          # pool_out shape: [1, 3, 16]
546
          pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
547
          # pool_out shape: [1, 3, 16],  indices shape: [1, 3, 16]
548
    """
549 550
    """NCL to NCHW"""
    data_format = "NCHW"
551 552
    if not in_dygraph_mode():
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool1d')
553 554 555
    _check_input(x, 3)
    x = unsqueeze(x, [2])
    kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
556 557 558
    if stride is None:
        stride = kernel_size
    else:
559
        stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride')
560

561 562
    padding, padding_algorithm = _update_padding_nd(
        padding, 1, ceil_mode=ceil_mode)
563

564 565
    # use 2d to implenment 1d should expand padding in advance.
    padding = _expand_low_nd_padding(padding)
566 567

    if in_dygraph_mode():
568
        if return_mask:
D
Double_V 已提交
569 570 571 572 573 574
            pool_out = core.ops.max_pool2d_with_index(
                x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
                stride, 'paddings', padding, 'padding_algorithm',
                padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
                'use_mkldnn', False, 'exclusive', True, 'data_format',
                data_format)
575 576 577
            return (squeeze(pool_out[0], [2]),
                    squeeze(pool_out[1],
                            [2])) if return_mask else squeeze(pool_out[0], [2])
D
Double_V 已提交
578 579 580 581 582 583 584 585 586
        else:
            pool_out = core.ops.pool2d(
                x, 'pooling_type', 'max', 'ksize', kernel_size,
                'global_pooling', False, 'padding_algorithm', padding_algorithm,
                'strides', stride, 'paddings', padding, 'use_cudnn', True,
                'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True,
                'data_format', data_format)
            return squeeze(pool_out, [2])

587
    op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
588
    helper = LayerHelper(op_type, **locals())
589
    dtype = helper.input_dtype(input_param_name='x')
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
    pool_out = helper.create_variable_for_type_inference(dtype)
    mask = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out, "Mask": mask}

    helper.append_op(
        type=op_type,
        inputs={"X": x},
        outputs=outputs,
        attrs={
            "pooling_type": 'max',
            "ksize": kernel_size,
            "global_pooling": False,
            "strides": stride,
            "paddings": padding,
            "padding_algorithm": padding_algorithm,
            "use_cudnn": True,
            "ceil_mode": ceil_mode,
            "use_mkldnn": False,
            "exclusive": True,
            "data_format": data_format,
        })

612
    return (squeeze(pool_out, [2]),
613
            squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2])
614 615


616
def max_pool2d(x,
617 618 619
               kernel_size,
               stride=None,
               padding=0,
620
               return_mask=False,
621 622 623 624
               ceil_mode=False,
               data_format="NCHW",
               name=None):
    """
625 626
    This API implements max pooling 2d operation.
    See more details in :ref:`api_nn_pooling_MaxPool2d` .
627 628 629 630 631 632 633 634

    Args:
        x (Tensor): The input tensor of pooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
                          `"NHWC"`, where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
        kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
635
            it must contain two integers, (kernel_size_Height, kernel_size_Width).
636 637
            Otherwise, the pool kernel size will be a square of an int.
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
638
            it must contain two integers, (stride_Height, stride_Width).
639
            Otherwise, the pool stride size will be a square of an int.
640 641 642 643 644 645 646
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 2, [pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 4. [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
647
        ceil_mode (bool): when True, will use `ceil` instead of `floor` to compute the output shape
648
        return_mask (bool): Whether to return the max indices along with the outputs. Default False, only support `"NCHW"` data format
649
        data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
650 651 652 653 654 655 656
                        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
C
Chen Long 已提交
657 658
   
   Raises:
659 660 661
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
        ShapeError: If the output's shape calculated is not greater than 0.
C
Chen Long 已提交
662
    
663 664
    Examples:
        .. code-block:: python
665

C
Chen Long 已提交
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
            import paddle
            import paddle.nn.functional as F
            import numpy as np
            
            # max pool2d
            x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
            out = F.max_pool2d(x,
                                  kernel_size=2,
                                  stride=2, padding=0)
            # output.shape [1, 3, 16, 16]
            # for return_mask=True
            out, max_indices = F.max_pool2d(x,
                                               kernel_size=2,
                                               stride=2,
                                               padding=0,
                                               return_mask=True)
            # out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16],
683 684 685 686 687 688 689 690 691 692 693
    """
    kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 2, 'pool_stride')

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))
694 695 696 697 698

    channel_last = True if data_format == "NHWC" else False

    padding, padding_algorithm = _update_padding_nd(
        padding, num_dims=2, channel_last=channel_last, ceil_mode=ceil_mode)
699

700
    if data_format == "NHWC" and return_mask:
D
Double_V 已提交
701
        raise ValueError(
702
            "When setting return_mask to true, data_format must be set to NCHW in API:max_pool2d"
D
Double_V 已提交
703 704
        )

705
    if in_dygraph_mode():
706
        if return_mask:
D
Double_V 已提交
707 708 709 710 711 712
            output = core.ops.max_pool2d_with_index(
                x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
                stride, 'paddings', padding, 'padding_algorithm',
                padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
                'use_mkldnn', False, 'exclusive', True, 'data_format',
                data_format)
713
            return output if return_mask else output[0]
D
Double_V 已提交
714
        else:
D
Double_V 已提交
715 716 717 718 719 720 721
            output = core.ops.pool2d(
                x, 'pooling_type', 'max', 'ksize', kernel_size,
                'global_pooling', False, 'padding_algorithm', padding_algorithm,
                'strides', stride, 'paddings', padding, 'use_cudnn', True,
                'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True,
                'data_format', data_format)
            return output
722

723
    op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
724
    helper = LayerHelper(op_type, **locals())
725 726
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'max_pool2d')
727
    dtype = helper.input_dtype(input_param_name='x')
728
    pool_out = helper.create_variable_for_type_inference(dtype)
729 730
    mask = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out, "Mask": mask}
731 732 733 734

    helper.append_op(
        type=op_type,
        inputs={"X": x},
735
        outputs=outputs,
736
        attrs={
737
            "pooling_type": 'max',
738 739 740
            "ksize": kernel_size,
            "global_pooling": False,
            "strides": stride,
741
            "paddings": padding,
742 743 744 745
            "padding_algorithm": padding_algorithm,
            "use_cudnn": True,
            "ceil_mode": ceil_mode,
            "use_mkldnn": False,
746
            "exclusive": True,
747 748 749
            "data_format": data_format,
        })

750
    return (pool_out, mask) if return_mask else pool_out
751 752 753 754 755 756


def max_pool3d(x,
               kernel_size,
               stride=None,
               padding=0,
757
               return_mask=False,
758 759 760 761
               ceil_mode=False,
               data_format="NCDHW",
               name=None):
    """
762 763
    This API implements max pooling 2d operation.
    See more details in :ref:`api_nn_pooling_MaxPool3d` .
764 765
    Args:
        x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
D
Double_V 已提交
766
                          shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` or `"NDHWC"`, where N represents batch size, C represents the number of channels, D, H and W represent the depth, height and width of the feature respectively.
767
        kernel_size (int|list|tuple): The pool kernel size. If the kernel size
768
            is a tuple or list, it must contain three integers,
769
            (kernel_size_Depth, kernel_size_Height, kernel_size_Width).
770
            Otherwise, the pool kernel size will be the cube of an int.
771 772
        stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain three integers, [stride_Depth, stride_Height, stride_Width).
773
            Otherwise, the pool stride size will be a cube of an int.
774 775 776 777 778 779 780
        padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
            1. A string in ['valid', 'same'].
            2. An int, which means the feature map is zero padded by size of `padding` on every sides.
            3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
            4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
            5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
            The default value is 0.
781
        ceil_mode (bool): ${ceil_mode_comment}
782
        return_mask (bool): Whether to return the max indices along with the outputs. Default False. Only support "NDCHW" data_format.
783 784 785 786 787 788
        data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
                        The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                        `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
C
Chen Long 已提交
789
    
790 791
    Returns:
        Tensor: The output tensor of pooling result. The data type is same as input tensor.
C
Chen Long 已提交
792
    
793 794 795 796
    Raises:
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
        ValueError: If `padding` is "VALID", but `ceil_mode` is True.
        ShapeError: If the output's shape calculated is not greater than 0.
C
Chen Long 已提交
797
    
798 799
    Examples:
        .. code-block:: python
800

C
Chen Long 已提交
801 802 803
            import paddle
            import paddle.nn.functional as F
            import numpy as np
804

C
Chen Long 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818
            # max pool3d
            x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32))
            output = F.max_pool2d(x,
                                  kernel_size=2,
                                  stride=2, padding=0)
            output.shape [1, 3, 16, 16, 16]
            # for return_mask=True
            x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32))
            output, max_indices = paddle.nn.functional.max_pool3d(x,
                                          kernel_size = 2,
                                          stride = 2,
                                          padding=0,
                                          return_mask=True)
            # output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16],
819 820 821 822 823 824 825
    """
    kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
    if stride is None:
        stride = kernel_size
    else:
        stride = utils.convert_to_list(stride, 3, 'pool_stride')

826
    channel_last = _channel_last(data_format, 3)
827

828 829
    padding, padding_algorithm = _update_padding_nd(
        padding, 3, channel_last=channel_last, ceil_mode=ceil_mode)
830

831
    if data_format == "NDHWC" and return_mask:
D
Double_V 已提交
832
        raise ValueError(
833
            "When setting return_mask to true, data_format must be set to NCDHW in API:max_pool3d"
D
Double_V 已提交
834 835
        )

836
    if in_dygraph_mode():
837
        if return_mask:
D
Double_V 已提交
838 839 840 841 842 843
            output = core.ops.max_pool3d_with_index(
                x, 'pooling_type', 'max', 'ksize', kernel_size, 'strides',
                stride, 'paddings', padding, 'global_pooling', False,
                'padding_algorithm', padding_algorithm, 'use_cudnn', True,
                'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True,
                'data_format', data_format)
844
            return output if return_mask else output[0]
D
Double_V 已提交
845
        else:
D
Double_V 已提交
846 847 848 849 850 851 852
            output = core.ops.pool3d(
                x, 'pooling_type', 'max', 'ksize', kernel_size,
                'global_pooling', False, 'padding_algorithm', padding_algorithm,
                'strides', stride, 'paddings', padding, 'use_cudnn', True,
                'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True,
                'data_format', data_format)
            return output
853

854
    op_type = "max_pool3d_with_index" if return_mask else "pool3d"
855
    helper = LayerHelper(op_type, **locals())
856
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
857
    dtype = helper.input_dtype(input_param_name='x')
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
    pool_out = helper.create_variable_for_type_inference(dtype)
    mask = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out, "Mask": mask}

    helper.append_op(
        type=op_type,
        inputs={"X": x},
        outputs=outputs,
        attrs={
            "pooling_type": 'max',
            "ksize": kernel_size,
            "global_pooling": False,
            "strides": stride,
            "paddings": padding,
            "padding_algorithm": padding_algorithm,
            "use_cudnn": True,
            "ceil_mode": ceil_mode,
            "use_mkldnn": False,
            "exclusive": False,
            "data_format": data_format,
        })

880
    return (pool_out, mask) if return_mask else pool_out
881 882


883
def adaptive_avg_pool1d(x, output_size, name=None):
884
    """
885 886
    This API implements adaptive average pooling 1d operation.
    See more details in :ref:`api_nn_pooling_AdaptiveAvgPool1d` .
D
Double_V 已提交
887

888
    Args:
889 890 891 892
        x (Tensor): The input tensor of pooling operator, which is a 3-D tensor
                              with shape [N, C, L].  The format of input tensor is NCL,
                              where N is batch size, C is the number of channels, L is the
                              length of the feature. The data type is float32 or float64.
893
        output_size (int): The target output size. It must be an integer.
894
        name(str, optional): For detailed information, please refer
895 896
                                 to :ref:`api_guide_Name`. Usually name is no need to set and
                                 None by default.
897
    Returns:
898 899
            Tensor: The output tensor of adaptive average pooling result. The data type is same
                      as input tensor.
900
    Raises:
901
            ValueError: 'output_size' should be an integer.
902 903
    Examples:
        .. code-block:: python
B
Bai Yifan 已提交
904

905 906 907 908 909 910 911 912 913 914 915 916 917 918
              # average adaptive pool1d
              # suppose input data in shape of [N, C, L], `output_size` is m or [m],
              # output shape is [N, C, m], adaptive pool divide L dimension
              # of input data into m grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(m):
              #         lstart = floor(i * L / m)
              #         lend = ceil((i + 1) * L / m)
              #         output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend)
              #
              import paddle
              import paddle.nn.functional as F
C
Chen Long 已提交
919
              import numpy as np
920

921 922 923 924 925
              data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
              pool_out = F.adaptive_average_pool1d(data, output_size=16)
              # pool_out shape: [1, 3, 16])
    """
    pool_type = 'avg'
926 927 928 929
    if not in_dygraph_mode():
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'adaptive_pool2d')
        check_type(output_size, 'pool_size', (int), 'adaptive_pool1d')
930 931
    _check_input(x, 3)
    pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
932

933
    x = unsqueeze(x, [2])
934
    if in_dygraph_mode():
935 936 937
        pool_out = core.ops.pool2d(x, 'pooling_type', pool_type, 'ksize',
                                   pool_size, 'adaptive', True)
        return squeeze(pool_out, [2])
938

939 940
    l_type = "pool2d"

941
    helper = LayerHelper(l_type, **locals())
942
    dtype = helper.input_dtype(input_param_name='x')
943 944
    pool_out = helper.create_variable_for_type_inference(dtype)

945
    outputs = {"Out": pool_out}
946
    helper.append_op(
947
        type=l_type,
948 949 950
        inputs={"X": x},
        outputs=outputs,
        attrs={
951 952 953
            "pooling_type": pool_type,
            "ksize": pool_size,
            "adaptive": True,
954 955
        })

956
    return squeeze(pool_out, [2])
957 958


959 960
def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
    """
961 962
    This API implements adaptive average pooling 2d operation.
    See more details in :ref:`api_nn_pooling_AdaptiveAvgPool2d` .
963 964 965

    Args:
        x (Tensor): The input tensor of adaptive avg pool2d operator, which is a 4-D tensor.
966
                          The data type can be float32 or float64.
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain two element, (H, W). H and W can be either a int, or None which means
            the size will be the same as that of the input.
        data_format (str): The data format of the input and output data. An optional string
            from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in
            the order of: [batch_size, input_channels, input_height, input_width].
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of avg adaptive pool2d result. The data type is same as input tensor.
    Raises:
        ValueError: If `data_format` is not "NCHW" or "NHWC".
    Examples:
        .. code-block:: python
B
Bai Yifan 已提交
982

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
            # adaptive avg pool2d
            # suppose input data in shape of [N, C, H, W], `output_size` is [m, n],
            # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
            # of input data into m * n grids averagely and performs poolings in each
            # grid to get output.
            # adaptive avg pool performs calculations as follow:
            #
            #     for i in range(m):
            #         for j in range(n):
            #             hstart = floor(i * H / m)
            #             hend = ceil((i + 1) * H / m)
            #             wstart = floor(i * W / n)
            #             wend = ceil((i + 1) * W / n)
            #             output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
            #
            import paddle
            import numpy as np
1000

1001 1002 1003
            input_data = np.random.rand(2, 3, 32, 32)
            x = paddle.to_tensor(input_data)
            # x.shape is [2, 3, 32, 32]
1004
            out = paddle.nn.functional.adaptive_avg_pool2d(
1005 1006
                            x = x,
                            output_size=[3, 3])
1007
            # out.shape is [2, 3, 3, 3]
1008 1009
    """
    if not in_dygraph_mode():
1010
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
1011
                                 'adaptive_avg_pool2d')
1012
        check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    if data_format == "NCHW":
        in_h, in_w = x.shape[2:4]
    else:
        in_h, in_w = x.shape[1:3]

    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 2, 'output_size')
    else:
1027
        output_size = list(output_size)
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
        if output_size[0] == None:
            output_size[0] = in_h
        if output_size[1] == None:
            output_size[1] = in_w

    if in_dygraph_mode():
        output = core.ops.pool2d(x, 'pooling_type', 'avg', 'ksize', output_size,
                                 'global_pooling', False, 'adaptive', True,
                                 'data_format', data_format)
        return output

    l_type = 'pool2d'

    helper = LayerHelper(l_type, **locals())
1042
    dtype = helper.input_dtype(input_param_name='x')
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
    pool_out = helper.create_variable_for_type_inference(dtype)

    outputs = {"Out": pool_out}

    helper.append_op(
        type=l_type,
        inputs={"X": x},
        outputs=outputs,
        attrs={
            "pooling_type": "avg",
            "ksize": output_size,
            "adaptive": True,
            "data_format": data_format,
        })

    return pool_out


def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
    """
1063 1064
    This API implements adaptive average pooling 3d operation.
    See more details in :ref:`api_nn_pooling_AdaptiveAvgPool3d` .
1065 1066 1067

    Args:
        x (Tensor): The input tensor of adaptive avg pool3d operator, which is a 5-D tensor.
1068
                          The data type can be float32, float64.
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
        output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means
            the size will be the same as that of the input.
        data_format (str): The data format of the input and output data. An optional string
            from: "NCDHW", "NDHWC". The default is "NCDHW". When it is "NCDHW", the data is stored in
            the order of: [batch_size, input_channels, input_depth, input_height, input_width].
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
    Returns:
        Tensor: The output tensor of avg adaptive pool3d result. The data type is same as input tensor.
    Raises:
        ValueError: If `data_format` is not "NCDHW" or "NDHWC".
    Examples:
        .. code-block:: python
B
Bai Yifan 已提交
1084

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
            # adaptive avg pool3d
            # suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n],
            # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
            # of input data into l * m * n grids averagely and performs poolings in each
            # grid to get output.
            # adaptive avg pool performs calculations as follow:
            #
            #     for i in range(l):
            #         for j in range(m):
            #             for k in range(n):
            #                 dstart = floor(i * D / l)
            #                 dend = ceil((i + 1) * D / l)
            #                 hstart = floor(j * H / m)
            #                 hend = ceil((j + 1) * H / m)
            #                 wstart = floor(k * W / n)
            #                 wend = ceil((k + 1) * W / n)
            #                 output[:, :, i, j, k] =
            #                     avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
            import paddle
            import numpy as np
            input_data = np.random.rand(2, 3, 8, 32, 32)
            x = paddle.to_tensor(input_data)
            # x.shape is [2, 3, 8, 32, 32]
1108
            out = paddle.nn.functional.adaptive_avg_pool3d(
1109 1110
                            x = x,
                            output_size=[3, 3, 3])
1111
            # out.shape is [2, 3, 3, 3, 3]
1112 1113
    """
    if not in_dygraph_mode():
1114 1115
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_avg_pool3d')
1116
        check_type(data_format, 'data_format', str, 'adaptive_avg_pool3d')
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130

    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    if data_format == "NCDHW":
        in_l, in_h, in_w = x.shape[2:5]
    else:
        in_l, in_h, in_w = x.shape[1:4]

    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 3, 'output_size')
    else:
1131
        output_size = list(output_size)
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
        if output_size[0] == None:
            output_size[0] = in_l
        if output_size[1] == None:
            output_size[1] = in_h
        if output_size[2] == None:
            output_size[2] = in_w

    if in_dygraph_mode():
        output = core.ops.pool3d(x, 'pooling_type', 'avg', 'ksize', output_size,
                                 'global_pooling', False, 'adaptive', True,
                                 'data_format', data_format)
        return output

    l_type = 'pool3d'

    helper = LayerHelper(l_type, **locals())
1148
    dtype = helper.input_dtype(input_param_name='x')
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
    pool_out = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out}

    helper.append_op(
        type=l_type,
        inputs={"X": x},
        outputs=outputs,
        attrs={
            "pooling_type": "avg",
            "ksize": output_size,
            "adaptive": True,
            "data_format": data_format,
        })

    return pool_out
1164 1165


1166
def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
1167 1168 1169 1170 1171 1172 1173 1174 1175
    """
    This API implements adaptive max pooling 1d operation.
    See more details in :ref:`api_nn_pooling_AdaptiveMaxPool1d` .

    Args:
        x (Tensor): The input tensor of pooling operator, which is a 3-D tensor
                              with shape [N, C, L].  The format of input tensor is NCL,
                              where N is batch size, C is the number of channels, L is the
                              length of the feature. The data type is float32 or float64.
1176
        output_size (int): The pool kernel size. The value should be an integer.
1177
        return_mask (bool): If true, the index of max pooling point will be returned along
1178 1179 1180 1181 1182 1183 1184 1185
                with outputs. It cannot be set in average pooling type. Default False.
        name(str, optional): For detailed information, please refer
                                 to :ref:`api_guide_Name`. Usually name is no need to set and
                                 None by default.
    Returns:
            Tensor: The output tensor of adaptive pooling result. The data type is same
                      as input tensor.
    Raises:
1186
            ValueError: 'output_size' should be an integer.
1187 1188
    Examples:
        .. code-block:: python
1189

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
              # max adaptive pool1d
              # suppose input data in shape of [N, C, L], `output_size` is m or [m],
              # output shape is [N, C, m], adaptive pool divide L dimension
              # of input data into m grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(m):
              #         lstart = floor(i * L / m)
              #         lend = ceil((i + 1) * L / m)
              #         output[:, :, i] = max(input[:, :, lstart: lend])
              #
              import paddle
              import paddle.nn.functional as F
C
Chen Long 已提交
1204
              import numpy as np
1205

1206 1207 1208
              data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
              pool_out = F.adaptive_max_pool1d(data, output_size=16)
              # pool_out shape: [1, 3, 16])
1209
              pool_out, indices = F.adaptive_max_pool1d(data, output_size=16, return_mask=True)
1210 1211 1212
              # pool_out shape: [1, 3, 16] indices  shape: [1, 3, 16]
    """
    pool_type = 'max'
1213 1214 1215 1216 1217
    if not in_dygraph_mode():
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_max_pool1d')
        check_type(output_size, 'pool_size', int, 'adaptive_max_pool1d')
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool1d')
1218 1219 1220 1221 1222 1223 1224 1225 1226
    _check_input(x, 3)

    pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')

    x = unsqueeze(x, [2])
    if in_dygraph_mode():
        pool_out = core.ops.max_pool2d_with_index(
            x, 'pooling_type', pool_type, 'ksize', pool_size, 'adaptive', True)
        return (squeeze(pool_out[0], [2]), squeeze(
1227
            pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2])
1228

1229 1230
    l_type = 'max_pool2d_with_index'

1231
    helper = LayerHelper(l_type, **locals())
1232
    dtype = helper.input_dtype(input_param_name='x')
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
    pool_out = helper.create_variable_for_type_inference(dtype)

    mask = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out, "Mask": mask}

    helper.append_op(
        type=l_type,
        inputs={"X": x},
        outputs=outputs,
        attrs={
            "pooling_type": pool_type,
            "ksize": pool_size,
            "adaptive": True,
        })

    return (squeeze(pool_out, [2]),
1249
            squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2])
1250 1251


1252
def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
1253 1254 1255
    """
        This operation applies a 2D adaptive max pooling on input tensor.
        See more details in :ref:`api_nn_pooling_AdaptiveMaxPool2d` .
1256

1257 1258 1259
        Args:
            x (Tensor): The input tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type can be float16, float32, float64, int32 or int64.
            output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two elements, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input.
1260
            return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
1261
            name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
1262

1263 1264
        Returns:
            Tensor: The output tensor of adaptive max pool2d result. The data type is same as input tensor.
1265

1266 1267
        Examples:
            .. code-block:: python
1268

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
              # max adaptive pool2d
              # suppose input data in the shape of [N, C, H, W], `output_size` is [m, n]
              # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
              # of input data into m*n grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(m):
              #         for j in range(n):
              #             hstart = floor(i * H / m)
              #             hend = ceil((i + 1) * H / m)
              #             wstart = floor(i * W / n)
              #             wend = ceil((i + 1) * W / n)
              #             output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
              #
              import paddle
              import numpy as np
1286

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
              input_data = np.random.rand(2, 3, 32, 32)
              x = paddle.to_tensor(input_data)
              # x.shape is [2, 3, 32, 32]
              out = paddle.nn.functional.adaptive_max_pool2d(
                            x = x,
                            output_size=[3, 3])
              # out.shape is [2, 3, 3, 3]
    """
    if not in_dygraph_mode():
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_max_pool2d')
1298 1299
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool2d')
        #check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')
1300 1301 1302 1303 1304 1305
    _check_input(x, 4)

    in_h, in_w = x.shape[2:4]
    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 2, 'output_size')
    else:
1306
        output_size = list(output_size)
1307 1308 1309 1310 1311 1312 1313 1314
        if output_size[0] == None:
            output_size[0] = in_h
        if output_size[1] == None:
            output_size[1] = in_w

    if in_dygraph_mode():
        pool_out = core.ops.max_pool2d_with_index(
            x, 'pooling_type', 'max', 'ksize', output_size, 'adaptive', True)
1315
        return pool_out if return_mask else pool_out[0]
1316 1317 1318 1319

    l_type = 'max_pool2d_with_index'

    helper = LayerHelper(l_type, **locals())
1320
    dtype = helper.input_dtype(input_param_name='x')
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
    pool_out = helper.create_variable_for_type_inference(dtype)

    mask = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out, "Mask": mask}

    helper.append_op(
        type=l_type,
        inputs={"X": x},
        outputs=outputs,
        attrs={
            "pooling_type": 'max',
            "ksize": output_size,
            "adaptive": True,
        })
1335
    #return (pool_out, mask) if return_mask else pool_out
1336 1337 1338
    return pool_out


1339
def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
1340 1341 1342
    """
        This operation applies a 3D adaptive max pooling on input tensor.
        See more details in :ref:`api_nn_pooling_AdaptiveMaxPool3d` .
1343

1344 1345 1346
        Args:
            x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64.
            output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means the size will be the same as that of the input.
1347
            return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
1348
            name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
1349

1350 1351
        Returns:
            Tensor: The output tensor of adaptive max pool3d result. The data type is same as input tensor.
1352

1353 1354
        Examples:
            .. code-block:: python
1355

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
              # adaptive max pool3d
              # suppose input data in the shape of [N, C, D, H, W], `output_size` is [l, m, n]
              # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
              # of input data into m*n grids averagely and performs poolings in each
              # grid to get output.
              # adaptive max pool performs calculations as follow:
              #
              #     for i in range(l):
              #         for j in range(m):
              #             for k in range(n):
              #                 dstart = floor(i * D / l)
              #                 dend = ceil((i + 1) * D / l)
              #                 hstart = floor(i * H / m)
              #                 hend = ceil((i + 1) * H / m)
              #                 wstart = floor(i * W / n)
              #                 wend = ceil((i + 1) * W / n)
              #             output[:, :, i, j, k] = max(input[:, :, dstart: dend, hstart: hend, wstart: wend])
              #
              import paddle
              import numpy as np
1376

1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
              input_data = np.random.rand(2, 3, 8, 32, 32)
              x = paddle.to_tensor(input_data)
              # x.shape is [2, 3, 8, 32, 32]
              out = paddle.nn.functional.adaptive_max_pool3d(
                            x = x,
                            output_size=[3, 3, 3])
              # out.shape is [2, 3, 3, 3, 3]
    """

    if not in_dygraph_mode():
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'adaptive_max_pool3d')
1389 1390
        check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool3d')
        #check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')
1391 1392 1393 1394 1395 1396
    _check_input(x, 5)

    in_l, in_h, in_w = x.shape[2:5]
    if isinstance(output_size, int):
        output_size = utils.convert_to_list(output_size, 3, 'output_size')
    else:
1397
        output_size = list(output_size)
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
        if output_size[0] == None:
            output_size[0] = in_l
        if output_size[1] == None:
            output_size[1] = in_h
        if output_size[2] == None:
            output_size[2] = in_w

    if in_dygraph_mode():
        pool_out = core.ops.max_pool3d_with_index(
            x, 'pooling_type', 'max', 'ksize', output_size, 'adaptive', True)
1408
        return pool_out if return_mask else pool_out[0]
1409 1410 1411 1412

    l_type = 'max_pool3d_with_index'

    helper = LayerHelper(l_type, **locals())
1413
    dtype = helper.input_dtype(input_param_name='x')
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
    pool_out = helper.create_variable_for_type_inference(dtype)

    mask = helper.create_variable_for_type_inference(dtype)
    outputs = {"Out": pool_out, "Mask": mask}

    helper.append_op(
        type=l_type,
        inputs={"X": x},
        outputs=outputs,
        attrs={
            "pooling_type": 'max',
            "ksize": output_size,
            "adaptive": True,
        })

1429
    return (pool_out, mask) if return_mask else pool_out