common.py 92.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
xiaoting 已提交
15
import warnings
16
import paddle
X
xiaoting 已提交
17
from paddle.fluid.layer_helper import LayerHelper
18 19 20 21
from paddle.fluid.layers.tensor import fill_constant
from ...tensor import concat
from ...tensor.creation import zeros
from paddle.static import Variable
22
from ...fluid import dygraph_utils
23
# TODO: define the common functions to build a neural network
24 25
from ...tensor.manipulation import squeeze
from ...tensor.manipulation import unsqueeze
Y
Yang Zhang 已提交
26 27 28
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
29
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
H
hong 已提交
30
from ...fluid.framework import _varbase_creator, _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
X
xiaoting 已提交
31

Z
zhiboniu 已提交
32 33
from ...fluid import dygraph_utils

W
wanghuancoder 已提交
34
from paddle import _C_ops
Z
zhiboniu 已提交
35 36 37
from paddle.framework import in_dynamic_mode
from paddle.tensor.creation import full
from paddle.framework import core
38
from paddle.fluid.framework import _in_legacy_dygraph
Z
zhiboniu 已提交
39
from paddle.static import default_main_program
40

41 42
__all__ = []

X
xiaoting 已提交
43

44 45 46
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
    r"""

47
    Return a col buffer of sliding local blocks of input x, also known
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
    as im2col for batched 2D image tensors. For each block under the convolution filter,
    all element will be rearranged as a column. While the convolution filter sliding over
    the input feature map, a series of such columns will be formed.

    For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
    can be calculated as following.

    .. math::

        dkernel[0] &= dilations[0] \times (kernel\_sizes[0] - 1) + 1

        dkernel[1] &= dilations[1] \times (kernel\_sizes[1] - 1) + 1

        hout &= \frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1

        wout &= \frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1

        Cout &= C \times kernel\_sizes[0] \times kernel\_sizes[1]

        Lout &= hout \times wout


    Parameters:
        x(Tensor):              4-D Tensor, input tensor of format [N, C, H, W],
                                  data type can be float32 or float64
        kernel_sizes(int|list):   The size of convolution kernel, should be [k_h, k_w]
                                  or an integer k treated as [k, k].
        strides(int|list):        The strides, should be [stride_h, stride_w]
                                  or an integer stride treated as [sride, stride].
                                  For default, strides will be [1, 1].
        paddings(int|list):       The paddings of each dimension, should be
                                  [padding_top, padding_left, padding_bottom, padding_right]
                                  or [padding_h, padding_w] or an integer padding.
                                  If [padding_h, padding_w] was given, it will expanded to
                                  [padding_h, padding_w, padding_h, padding_w]. If an integer
                                  padding was given, [padding, padding, padding, padding] will
                                  be used. For default, paddings will be [0, 0, 0, 0]
        dilations(int|list):      the dilations of convolution kernel, should be
                                  [dilation_h, dilation_w], or an integer dilation treated as
                                  [dilation, dilation]. For default, it will be [1, 1].
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`


    Returns:
94
        Tensor, The tensor corresponding to the sliding local blocks.
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
        The output shape is [N, Cout, Lout] as decriabled above.
        Cout is the  total number of values within each block,
        and Lout is the total number of such blocks.
        The data type of output is the same as the input :math:`x`

    Examples:

        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.randn((100,3,224,224))
            y = F.unfold(x, [3, 3], 1, 1, 1)
    """

    helper = LayerHelper("unfold", **locals())

    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')

    assert len(x.shape) == 4, \
            "input should be the format of [N, C, H, W]"

    if isinstance(kernel_sizes, int):
        kernel_sizes = [kernel_sizes, kernel_sizes]
    else:
        assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \
            "kernel_sizes should either be an integer or a list of two integers"

    if isinstance(strides, int):
        strides = [strides, strides]
    else:
        assert isinstance(strides, list) and (len(strides) == 2), \
            "strides should either be an integer or a list of two integers"

    if isinstance(dilations, int):
        dilations = [dilations, dilations]
    else:
        assert isinstance(dilations, list) and (len(dilations) == 2), \
            "dilations should either be an integer or a list of two integers"

    if isinstance(paddings, int):
        paddings = [paddings] * 4
    elif isinstance(paddings, list):
        if len(paddings) == 2:
            paddings = paddings * 2
        elif len(paddings) == 4:
            pass
        else:
            raise ValueError(
                "paddings should either be an integer or a list of 2 or 4 integers"
            )
    else:
        raise ValueError(
            "Unexpected type of paddings, it should be either an integer or a list"
            "of 2 or 4 integers")

    if in_dygraph_mode():
        return _C_ops.final_state_unfold(x, kernel_sizes, strides, paddings,
                                         dilations)

    out = helper.create_variable_for_type_inference(dtype=x.dtype)
157 158 159 160 161 162 163 164 165
    helper.append_op(type="unfold",
                     inputs={"X": x},
                     outputs={"Y": out},
                     attrs={
                         "kernel_sizes": kernel_sizes,
                         "strides": strides,
                         "paddings": paddings,
                         "dilations": dilations
                     })
166 167 168
    return out


X
xiaoting 已提交
169
def interpolate(x,
170 171 172 173
                size=None,
                scale_factor=None,
                mode='nearest',
                align_corners=False,
X
xiaoting 已提交
174
                align_mode=0,
175 176
                data_format='NCHW',
                name=None):
X
xiaoting 已提交
177
    """
S
swtkiwi 已提交
178

X
xiaoting 已提交
179
    This op resizes a batch of images.
180 181
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
X
xiaoting 已提交
182
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
183 184
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
X
xiaoting 已提交
185
    and the resizing only applies on the three dimensions(depth, height and width).
X
xiaoting 已提交
186

X
xiaoting 已提交
187
    Supporting resample methods:
188 189 190 191 192
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation
193
        'area': Area interpolation
194 195 196 197

    Linear interpolation is the method of using a line connecting two known quantities 
    to determine the value of an unknown quantity between the two known quantities. 
    
X
xiaoting 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.

    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
    The linear interpolation is performed on three directions.
X
xiaoting 已提交
212
    align_corners and align_mode are optional parameters,the calculation method
X
xiaoting 已提交
213 214 215 216 217 218 219
    of interpolation can be selected by them.

    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.

220 221 222 223 224 225
    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to 
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or 
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

X
xiaoting 已提交
226 227 228 229
    Example:

    .. code-block:: text

230
        For scale_factor:
X
xiaoting 已提交
231 232 233 234 235
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)

236 237 238 239 240 241 242 243 244 245 246
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
        
X
xiaoting 已提交
247
        Nearest neighbor interpolation:
X
xiaoting 已提交
248

X
xiaoting 已提交
249 250 251 252 253
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
254

X
xiaoting 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

296 297 298
    For details of linear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Linear_interpolation.
    
X
xiaoting 已提交
299 300
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
301
    
X
xiaoting 已提交
302 303
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
304
    
X
xiaoting 已提交
305 306
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
307
    
X
xiaoting 已提交
308 309
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
310
    
X
xiaoting 已提交
311
    Parameters:
X
xiaoting 已提交
312
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
X
xiaoting 已提交
313
                          its data format is specified by :attr:`data_format`.
X
xiaoting 已提交
314
        size (list|tuple|Tensor|None): Output shape of image resize
315 316
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
317
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
318
             If a Tensor, its dimensions size should be a 1.
319 320 321
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor.
X
xiaoting 已提交
322
             Default: None.
323
        mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear',
324
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
X
xiaoting 已提交
325 326
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
X
xiaoting 已提交
327
                               corner pixels.This only has an effect when 'linear', 'bilinear', 'bicubic' or 'trilinear'.
328 329 330 331
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
X
xiaoting 已提交
332
        data_format (str, optional): Specify the data format of the input, and the data format of the output
333
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`,  `"NCHW"`, `"NHWC"`, `"NCDHW"`,
X
xiaoting 已提交
334 335 336
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
337 338 339
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
X
xiaoting 已提交
340
    Returns:
341
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
X
xiaoting 已提交
342 343 344
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
    Raises:
X
xiaoting 已提交
345
        TypeError: size should be a list or tuple or Tensor.
346
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
347
                    'trilinear', 'bicubic', 'area' or 'nearest' currently.
348
        ValueError: 'linear' only support 3-D tensor.
349 350
        ValueError: 'bilinear' and 'bicubic' only support 4-D tensor.
        ValueError: 'nearest' only support 4-D or 5-D tensor.
351 352 353 354 355 356
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
X
xiaoting 已提交
357 358
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
359 360
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.

X
xiaoting 已提交
361 362 363
    Examples:
        .. code-block:: python

364 365
	        import paddle
	        import numpy as np
X
xiaoting 已提交
366 367 368 369 370 371 372
            import paddle.nn.functional as F
            
            # given out size
            input_data = np.random.rand(2,3,6,10).astype("float32")
            x = paddle.to_tensor(input_data)
            output_1 = F.interpolate(x=x, size=[12,12])
    	    print(output_1.shape)
373
	        # [2L, 3L, 12L, 12L]
X
xiaoting 已提交
374 375 376 377 378 379 380 381 382 383
            
            # given scale
            output_2 = F.interpolate(x=x, scale_factor=[2,1])
            print(output_2.shape)
            # [2L, 3L, 12L, 10L]
            
            # bilinear interp
            output_3 = F.interpolate(x=x, scale_factor=[2,1], mode="bilinear")
            print(output_2.shape)
            # [2L, 3L, 12L, 10L]
X
xiaoting 已提交
384
    """
385 386 387 388 389 390 391 392 393 394
    data_format = data_format.upper()
    resample = mode.upper()
    resample_type = mode.lower()

    resample_methods = [
        'LINEAR',
        'BILINEAR',
        'TRILINEAR',
        'NEAREST',
        'BICUBIC',
395
        'AREA',
396
    ]
X
xiaoting 已提交
397 398
    if resample not in resample_methods:
        raise ValueError(
399
            "The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', "
400
            " 'bicubic' or 'nearest' currently.")
X
xiaoting 已提交
401

X
xiaoting 已提交
402
    if resample in ['LINEAR'] and len(x.shape) != 3:
403
        raise ValueError("'linear' only support 3-D tensor.")
404

405 406 407 408 409
    if resample in ['NEAREST'] and len(x.shape) != 4 and len(x.shape) != 5:
        raise ValueError("'NEAREST' only support 4-D  or 5-D tensor.")

    if resample in ['BILINEAR', 'BICUBIC'] and len(x.shape) != 4:
        raise ValueError("'bilinear' and 'bicubic' only support 4-D tensor.")
X
xiaoting 已提交
410
    if resample == 'TRILINEAR' and len(x.shape) != 5:
411 412 413 414
        raise ValueError("'trilinear'only support 5-D tensor.")

    if size is None and scale_factor is None:
        raise ValueError("One of size and scale_factor must not be None.")
X
xiaoting 已提交
415 416 417

    if not isinstance(align_corners, bool):
        raise TypeError("Attr align_corners should be a bool value")
418

X
xiaoting 已提交
419 420
    if align_mode != 0 and align_mode != 1:
        raise ValueError("align_mode can only be 0 or 1")
X
xiaoting 已提交
421 422 423 424
    if align_corners != 0 and resample == 'NEAREST':
        raise ValueError(
            "align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
        )
425

X
xiaoting 已提交
426
    if resample == 'AREA':
427 428
        if isinstance(size, list) or isinstance(size, tuple) or isinstance(
                size, Variable):
X
xiaoting 已提交
429 430 431 432 433 434 435 436
            if len(size) == 0:
                raise ValueError("output size can not be empty")
        if len(x.shape) == 3:
            return paddle.nn.functional.adaptive_avg_pool1d(x, size)
        elif len(x.shape) == 4:
            return paddle.nn.functional.adaptive_avg_pool2d(x, size)
        elif len(x.shape) == 5:
            return paddle.nn.functional.adaptive_avg_pool3d(x, size)
437

X
xiaoting 已提交
438
    helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals())
439
    dtype = helper.input_dtype(input_param_name='x')
X
xiaoting 已提交
440
    if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']:
441 442
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
443
            " received but only `NCW` or `NWC` supported for 3-D input.")
X
xiaoting 已提交
444
    elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
X
xiaoting 已提交
445 446 447
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCHW` or `NHWC` supported for 4-D input.")
X
xiaoting 已提交
448
    elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
X
xiaoting 已提交
449 450 451 452 453 454 455
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCDHW` or `NDHWC` supported for 5-D input.")

    def _is_list_or_turple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

456
    if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
X
xiaoting 已提交
457
        data_layout = 'NCHW'
458
    if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
X
xiaoting 已提交
459 460
        data_layout = 'NHWC'

X
xiaoting 已提交
461 462 463 464
    if resample == 'NEAREST':
        align_corners = False

    inputs = {"X": x}
X
xiaoting 已提交
465 466 467 468 469 470 471 472 473 474
    attrs = {
        "out_d": -1,
        "out_h": -1,
        "out_w": -1,
        "interp_method": resample_type,
        "align_corners": align_corners,
        "align_mode": align_mode,
        "data_layout": data_layout
    }

475 476
    out_shape = size
    scale = scale_factor
477 478
    if out_shape is not None and scale is not None:
        raise ValueError("Only one of size or scale_factor should be defined.")
X
xiaoting 已提交
479
    if out_shape is not None:
Z
zhiboniu 已提交
480
        if isinstance(out_shape, Variable) and not in_dynamic_mode():
X
xiaoting 已提交
481 482 483
            out_shape.stop_gradient = True
            inputs['OutSize'] = out_shape
        else:
Z
zhiboniu 已提交
484
            if in_dynamic_mode():
485 486
                if isinstance(out_shape, Variable):
                    out_shape = list(out_shape.numpy())
X
xiaoting 已提交
487 488
                else:
                    out_shape = list(out_shape)
489 490 491
                for i, dim in enumerate(out_shape):
                    if isinstance(dim, Variable):
                        out_shape[i] = dim.numpy()[0]
X
xiaoting 已提交
492
            if not (_is_list_or_turple_(out_shape)):
493
                raise TypeError("size should be a list or tuple or Variable.")
X
xiaoting 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
            # Validate the shape
            contain_var = False
            for dim_idx, dim_size in enumerate(out_shape):
                if isinstance(dim_size, Variable):
                    contain_var = True
                    continue
                assert dim_size > 0, (
                    "Each dimension size given in out_shape must be greater than 0."
                )

            if contain_var:
                new_size_tensor = []
                size_list = []
                for dim in out_shape:
                    if isinstance(dim, Variable):
                        dim.stop_gradient = True
                        new_size_tensor.append(dim)
                        size_list.append(-1)
                    else:
                        assert (isinstance(dim, int))
                        temp_out = helper.create_variable_for_type_inference(
                            'int32')
516 517 518 519 520
                        fill_constant([1],
                                      'int32',
                                      dim,
                                      force_cpu=True,
                                      out=temp_out)
X
xiaoting 已提交
521 522 523 524
                        new_size_tensor.append(temp_out)
                        size_list.append(dim)
                inputs['SizeTensor'] = new_size_tensor

X
xiaoting 已提交
525
            if len(x.shape) == 3:
526 527
                if len(out_shape) != 1:
                    raise ValueError(
528
                        "size length should be 2 for input 3-D tensor")
529 530 531 532 533
                if contain_var:
                    attrs['out_w'] = size_list[0]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_w'] = out_shape[0]
X
xiaoting 已提交
534
            if len(x.shape) == 4:
X
xiaoting 已提交
535
                if len(out_shape) != 2:
536
                    raise ValueError("size length should be 2 for "
X
xiaoting 已提交
537 538 539 540 541 542 543 544
                                     "input 4-D tensor.")
                if contain_var:
                    attrs['out_h'] = size_list[0]
                    attrs['out_w'] = size_list[1]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_h'] = out_shape[0]
                    attrs['out_w'] = out_shape[1]
X
xiaoting 已提交
545
            if len(x.shape) == 5:
X
xiaoting 已提交
546
                if len(out_shape) != 3:
547
                    raise ValueError("size length should be 3 for "
X
xiaoting 已提交
548 549 550 551 552 553 554 555 556 557 558 559
                                     "input 5-D tensor.")
                if contain_var:
                    attrs['out_d'] = size_list[0]
                    attrs['out_h'] = size_list[1]
                    attrs['out_w'] = size_list[2]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_d'] = out_shape[0]
                    attrs['out_h'] = out_shape[1]
                    attrs['out_w'] = out_shape[2]

    else:
Z
zhiboniu 已提交
560
        if in_dynamic_mode() and isinstance(scale, Variable):
561
            scale = list(scale.numpy())
X
xiaoting 已提交
562 563 564 565 566 567
        if isinstance(scale, Variable):
            scale.stop_gradient = True
            inputs["Scale"] = scale
        elif isinstance(scale, float) or isinstance(scale, int):
            if scale <= 0:
                raise ValueError("Attr(scale) should be greater than zero.")
X
xiaoting 已提交
568 569 570 571
            scale_list = []
            for i in range(len(x.shape) - 2):
                scale_list.append(scale)
            attrs['scale'] = list(map(float, scale_list))
X
xiaoting 已提交
572
        elif isinstance(scale, list) or isinstance(scale, tuple):
X
xiaoting 已提交
573 574 575 576 577 578 579 580
            if len(scale) != len(x.shape) - 2:
                raise ValueError("scale_shape length should be {} for "
                                 "input {}-D tensor.".format(
                                     len(x.shape) - 2, len(x.shape)))
            for value in scale:
                if value <= 0:
                    raise ValueError("Attr(scale) should be greater than zero.")
            attrs['scale'] = list(map(float, scale))
X
xiaoting 已提交
581 582
        else:
            raise TypeError(
583 584
                "Attr(scale)'s type should be float, int, list, tuple, or Tensor."
            )
X
xiaoting 已提交
585

Z
zhiboniu 已提交
586
    if in_dynamic_mode():
X
xiaoting 已提交
587 588 589 590 591 592
        attr_list = []
        for k, v in attrs.items():
            attr_list.append(k)
            attr_list.append(v)
        dy_attr = tuple(attr_list)

593 594 595 596 597 598 599 600 601 602 603 604 605
        eager_args = [x]
        eager_args.append(inputs['OutSize'] if 'OutSize' in inputs else None)
        eager_args.append(inputs['SizeTensor'] if 'SizeTensor' in
                          inputs else None)
        eager_args.append(inputs['Scale'] if 'Scale' in inputs else None)
        eager_args.extend([
            attrs['data_layout'], attrs['out_d'], attrs['out_h'], attrs['out_w']
        ])
        eager_args.append(attrs['scale'] if 'scale' in attrs else [])
        eager_args.extend([
            attrs['interp_method'], attrs['align_corners'], attrs['align_mode']
        ])

X
xiaoting 已提交
606
        if resample_type == "linear":
607 608 609 610
            if in_dygraph_mode():
                out = _C_ops.final_state_linear_interp_v2(*eager_args)
            else:
                out = _C_ops.linear_interp_v2(x, *dy_attr)
611
        elif resample_type == "bilinear":
W
wanghuancoder 已提交
612
            out = _C_ops.bilinear_interp_v2(x, *dy_attr)
613
        elif resample_type == "trilinear":
W
wanghuancoder 已提交
614
            out = _C_ops.trilinear_interp_v2(x, *dy_attr)
615
        elif resample_type == "nearest":
W
wanghuancoder 已提交
616
            out = _C_ops.nearest_interp_v2(x, *dy_attr)
617
        elif resample_type == "bicubic":
W
wanghuancoder 已提交
618
            out = _C_ops.bicubic_interp_v2(x, *dy_attr)
X
xiaoting 已提交
619
        return out
X
xiaoting 已提交
620
    out = helper.create_variable_for_type_inference(dtype)
621 622 623 624
    helper.append_op(type='{}_interp_v2'.format(resample_type),
                     inputs=inputs,
                     outputs={"Out": out},
                     attrs=attrs)
X
xiaoting 已提交
625
    return out
L
littletomatodonkey 已提交
626 627


X
xiaoting 已提交
628 629 630 631 632 633 634 635 636 637
def upsample(x,
             size=None,
             scale_factor=None,
             mode='nearest',
             align_corners=False,
             align_mode=0,
             data_format='NCHW',
             name=None):
    """
    This op resizes a batch of images.
638

X
xiaoting 已提交
639 640 641
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
642 643
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
X
xiaoting 已提交
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
    and the resizing only applies on the three dimensions(depth, height and width).

    Supporting resample methods:
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation
    Linear interpolation is the method of using a line connecting two known quantities 
    to determine the value of an unknown quantity between the two known quantities. 
    
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.
    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.
    
    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.
668

X
xiaoting 已提交
669 670 671
    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
672

X
xiaoting 已提交
673 674 675
    The linear interpolation is performed on three directions.
    align_corners and align_mode are optional parameters,the calculation method
    of interpolation can be selected by them.
676 677 678 679 680 681 682

    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

X
xiaoting 已提交
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
    Example:
    .. code-block:: text
        For scale_factor:
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
        Nearest neighbor interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
          else:
              align_corners = True
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
        
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
    https://en.wikipedia.org/wiki/Linear_interpolation.
    For details of linear interpolation, please refer to Wikipedia:
    
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
    
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
    
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
    
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
    
    Parameters:
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
773
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
774
             If a Tensor , its dimensions size should be a 1.
775 776 777 778
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if 
             it is either a list or a tuple or a Tensor.
X
xiaoting 已提交
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
             Default: None.
        mode (str): The resample method. It supports 'linear', 'nearest', 'bilinear',
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
                               corner pixels.
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
    Raises:
        TypeError: size should be a list or tuple or Tensor.
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
                    'trilinear', 'bicubic', or 'nearest' currently.
        ValueError: 'linear' only support 3-D tensor.
        ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
        Examples:
        .. code-block:: python
            import paddle
            import numpy as np
            import paddle.nn.functional as F

X
xiaoting 已提交
822
            input_data = np.random.rand(2,3,6,10).astype("float32")
X
xiaoting 已提交
823
            input = paddle.to_tensor(input_data)
X
xiaoting 已提交
824
            output = F.upsample(x=input, size=[12,12])
X
xiaoting 已提交
825 826 827 828 829 830 831 832
            print(output.shape)
            # [2L, 3L, 12L, 12L]

    """
    return interpolate(x, size, scale_factor, mode, align_corners, align_mode,
                       data_format)


833 834 835 836
def bilinear(x1, x2, weight, bias=None, name=None):
    """

    This layer performs bilinear on two inputs.
837
    See :ref:`api_nn_Bilinear` for details and output shape.
838 839 840 841 842 843 844 845 846 847

    Parameters:
       x1 (Tensor): the first input tensor, it's data type should be float32, float64.
       x2 (Tensor): the second input tensor, it's data type should be float32, float64.
       weight (Parameter): The learnable weights of this layer, shape is [out_features, in1_features, in2_features].
       bias (Parameter, optional): The learnable bias(Bias) of this layer, shape is [1, out_features]. If it is set to None, no bias will be added to the output units. The default value is None.
       name (str, optional): The default value is None. Normally there is no need for user
           to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Returns:
848
       Tensor: A 2-D Tensor of shape [batch_size, out_features].
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865

    Examples:
       .. code-block:: python

        import paddle
        import numpy
        import paddle.nn.functional as F

        x1 = numpy.random.random((5, 5)).astype('float32')
        x2 = numpy.random.random((5, 4)).astype('float32')
        w = numpy.random.random((1000, 5, 4)).astype('float32')
        b = numpy.random.random((1, 1000)).astype('float32')

        result = F.bilinear(paddle.to_tensor(x1), paddle.to_tensor(x2), paddle.to_tensor(w), paddle.to_tensor(b))           # result shape [5, 1000]

    """

866 867 868
    if in_dygraph_mode():
        return _C_ops.final_state_bilinear_tensor_product(x1, x2, weight, bias)
    elif _non_static_mode():
W
wanghuancoder 已提交
869
        return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
870 871 872 873 874 875 876 877 878 879 880

    check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
    check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')

    inputs = {"X": x1, "Y": x2, "Weight": weight}
    if bias is not None:
        inputs["Bias"] = bias

    helper = LayerHelper("bilinear", **locals())
    out = helper.create_variable_for_type_inference(dtype=x1.dtype)

881 882 883
    helper.append_op(type="bilinear_tensor_product",
                     inputs=inputs,
                     outputs={"Out": out})
884 885 886 887

    return out


888 889 890 891 892 893 894 895 896 897 898 899 900 901
def dropout(x,
            p=0.5,
            axis=None,
            training=True,
            mode="upscale_in_train",
            name=None):
    """
    Dropout is a regularization technique for reducing overfitting by preventing
    neuron co-adaption during training. The dropout operator randomly sets the
    outputs of some units to zero, while upscale others according to the given
    dropout probability.

    Args:
        x (Tensor): The input tensor. The data type is float32 or float64.
902 903
        p (float|int): Probability of setting units to zero. Default 0.5.
        axis (int|list|tuple): The axis along which the dropout is performed. Default None.
904
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
905
        mode(str): ['upscale_in_train'(default) | 'downscale_in_infer'].
906 907 908 909 910 911 912 913 914 915

                           1. upscale_in_train(default), upscale the output at training time

                              - train: out = input * mask / ( 1.0 - dropout_prob )
                              - inference: out = input

                           2. downscale_in_infer, downscale the output at inference

                              - train: out = input * mask
                              - inference: out = input * (1.0 - dropout_prob)
916
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
917 918 919 920

    Returns:
        A Tensor representing the dropout, has same shape and data type as `x` .

921

922 923
    Examples:
        We use ``p=0.5`` in the following description for simplicity.
924

925
        1. When ``axis=None`` , this is commonly used dropout, which dropout each element of x randomly.
926 927 928

        ..  code-block:: text

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
            Let's see a simple case when x is a 2d tensor with shape 2*3:
            [[1 2 3]
             [4 5 6]]
            we generate mask with the same shape as x, which is 2*3. The value of mask is
            sampled from a Bernoulli distribution randomly. For example, we may get such mask:
            [[0 1 0]
             [1 0 1]]
            So the output is obtained from elementwise multiply of x and mask:
            [[0 2 0]
             [4 0 6]]
            Using default setting, i.e. ``mode='upscale_in_train'`` ,
            if in training phase, the final upscale output is:
            [[0 4 0 ]
             [8 0 12]]
            if in test phase, the output is the same as input:
            [[1 2 3]
             [4 5 6]]
            we can also set ``mode='downscale_in_infer'`` , then
            if in training phase, the final output is:
            [[0 2 0]
             [4 0 6]]
            if in test phase, the scale output is:
            [[0.5 1.  1.5]
             [2.  2.5 3. ]]

954 955


956
        2. When ``axis!=None`` , this is useful for dropping whole channels from an image or sequence.
957 958 959

        ..  code-block:: text

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
            Let's see the simple case when x is a 2d tensor with shape 2*3 again:
            [[1 2 3]
             [4 5 6]]
            (1) If ``axis=0`` , this means the dropout is only performed in axis `0` .
                we generate mask with the shape 2*1. Only in axis `0` the value is randomly selected.
                For example, we may get such mask:
                [[1]
                 [0]]
                The output is obtained from elementwise multiply of x and mask. Doing that the mask will be
                broadcast from 2*1 to 2*3:
                [[1 1 1]
                 [0 0 0]]
                and the result after elementwise multiply is:
                [[1 2 3]
                 [0 0 0]]
                then we can do upscale or downscale according to the setting of other arguments.
            (2) If ``axis=1`` , this means the dropout is only performed in axis `1` .
                we generate mask with the shape 1*3. Only in axis `1` the value is randomly selected.
                For example, we may get such mask:
                [[1 0 1]]
                Doing elementwise multiply the mask will be broadcast from 1*3 to 2*3:
                [[1 0 1]
                 [1 0 1]]
                and the result after elementwise multiply is:
                [[1 0 3]
                 [4 0 6]]
            (3) What about ``axis=[0, 1]`` ? This means the dropout is performed in all axes of x,
                which is the same case as default setting ``axis=None`` .
988
            (4) You may note that logically `axis=None` means the dropout is performed in none axis of x,
989 990 991 992 993 994 995 996 997 998
                We generate mask with the shape 1*1. Whole input is randomly selected or dropped.
                For example, we may get such mask:
                [[0]]
                Doing elementwise multiply the mask will be broadcast from 1*1 to 2*3:
                [[0 0 0]
                 [0 0 0]]
                and the result after elementwise multiply is:
                [[0 0 0]
                 [0 0 0]]
                Actually this is not what we want because all elements may set to zero~
999 1000 1001

        When x is a 4d tensor with shape `NCHW`, we can set ``axis=[0,1]`` and the dropout will be performed in channel `N` and `C`, `H` and `W` is tied, i.e. paddle.nn.dropout(x, p, axis=[0,1]) . Please refer to ``paddle.nn.functional.dropout2d`` for more details.
        Similarly, when x is a 5d tensor with shape `NCDHW`, we can set ``axis=[0,1]`` to perform dropout3d. Please refer to ``paddle.nn.functional.dropout3d`` for more details.
1002 1003

        .. code-block:: python
1004

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
            import paddle
            import numpy as np

            x = np.array([[1,2,3], [4,5,6]]).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout(x, 0.5)
            y_test = paddle.nn.functional.dropout(x, 0.5, training=False) 
            y_0 = paddle.nn.functional.dropout(x, axis=0)
            y_1 = paddle.nn.functional.dropout(x, axis=1)
            y_01 = paddle.nn.functional.dropout(x, axis=[0,1])
1015 1016 1017 1018 1019 1020
            print(x)
            print(y_train)
            print(y_test)
            print(y_0)
            print(y_1)
            print(y_01)
1021 1022

    """
1023 1024 1025 1026 1027 1028 1029 1030
    if not isinstance(p, (float, int, Variable)):
        raise TypeError("p argument should be a number or Variable")

    if isinstance(p, (int, float)):
        # fast return for p == 0
        if p == 0: return x
        elif p < 0 or p > 1:
            raise ValueError("p argument should between 0 and 1")
1031 1032
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
1033 1034
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'"
        )
1035
    if axis and not isinstance(axis, (int, list, tuple)):
1036 1037 1038 1039 1040 1041
        raise TypeError("datatype of axis argument should be int or list")

    if axis == None:  # commonly used dropout
        seed = None
        mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

H
hong 已提交
1042
        if _non_static_mode():
1043 1044
            if default_main_program().random_seed != 0:
                seed = default_main_program().random_seed
H
hong 已提交
1045 1046 1047 1048 1049 1050

            if in_dygraph_mode():
                out, mask = _C_ops.final_state_dropout( x, None, p, not training, mode, \
                    seed if seed is not None else 0, seed is not None)

                return out
1051 1052 1053 1054 1055
            out, mask = _C_ops.dropout(x, 'dropout_prob', p, 'is_test',
                                       not training, 'fix_seed', seed
                                       is not None, 'seed',
                                       seed if seed is not None else 0,
                                       'dropout_implementation', mode)
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
            return out

        helper = LayerHelper('dropout', **locals())
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'dropout')

        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        mask = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)

1066 1067 1068
        def get_attrs(prog, dropout_prob, is_test, seed):
            if (seed is None or seed == 0) and prog.random_seed != 0:
                seed = prog.random_seed
1069 1070 1071 1072 1073 1074

            if isinstance(dropout_prob,
                          Variable) and not dropout_prob.shape != [1]:
                raise TypeError(
                    "Required p.shape == [1] if type(p) is Variable, but received p.shape = {}"
                    .format(p.shape))
1075 1076 1077 1078 1079 1080 1081 1082 1083
            attrs = {
                'dropout_prob': dropout_prob,
                'is_test': is_test,
                'fix_seed': seed is not None,
                'seed': seed if seed is not None else 0,
                'dropout_implementation': mode,
            }
            return attrs

1084 1085
        attrs = get_attrs(helper.main_program, p, not training, seed)

1086 1087 1088 1089 1090 1091 1092
        helper.append_op(type='dropout',
                         inputs={'X': [x]},
                         outputs={
                             'Out': [out],
                             'Mask': [mask]
                         },
                         attrs=attrs)
1093 1094
        return out
    else:  #sometimes called dropout_nd #TODO: optimize with c++
Z
zhiboniu 已提交
1095
        if not in_dynamic_mode():
1096 1097 1098 1099 1100
            check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'dropout')
        dtype = x.dtype
        keep_prob = 1 - p
        if training:
            if p == 1.:
1101
                return paddle.scale(x, scale=0.)
1102

1103
            scale_input = paddle.scale(
1104 1105 1106 1107
                x, scale=1 / keep_prob) if mode == 'upscale_in_train' else x

            #get mask shape
            input_shape = x.shape
Z
zhiboniu 已提交
1108
            if not in_dynamic_mode():
1109
                input_shape_tensor = paddle.shape(x)
1110
            drop_axes = [axis] if isinstance(axis, int) else list(axis)
1111 1112
            if min(drop_axes) < 0 or max(drop_axes) > len(input_shape) - 1:
                raise ValueError("axis value should be greater than or equal to 0 and less than dimensions of x:{}, but get axis value:{} " \
1113 1114 1115
                                 .format(len(input_shape), max(drop_axes)))
            if len(drop_axes) > len(input_shape):
                raise ValueError(
1116 1117
                    "length of axis should not be greater than dimensions of x:{}, but get length of axis: {}"
                    .format(len(input_shape), len(drop_axes)))
1118
            mask_shape = [1] * len(input_shape)
Z
zhiboniu 已提交
1119
            if not in_dynamic_mode():
1120 1121 1122 1123 1124
                for i in drop_axes:
                    mask_shape[i] = input_shape_tensor[i]
            else:
                for i in drop_axes:
                    mask_shape[i] = input_shape[i]
1125 1126

            #get mask
1127 1128 1129 1130
            random_tensor = paddle.uniform(mask_shape,
                                           dtype='float32',
                                           min=0.,
                                           max=1.0)
Z
zhiboniu 已提交
1131
            p = full(shape=[1], fill_value=p, dtype='float32')
1132
            keep_mask = paddle.greater_equal(random_tensor, p)
1133

1134 1135
            scale_input = paddle.cast(scale_input, dtype)
            keep_mask = paddle.cast(keep_mask, dtype)
1136 1137 1138
            ret = paddle.multiply(scale_input, keep_mask, name=name)
            return ret
        else:  # test
1139
            ret = paddle.scale(
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
                x, scale=keep_prob) if mode == 'downscale_in_infer' else x
            return ret


def dropout2d(x, p=0.5, training=True, data_format='NCHW', name=None):
    """
    Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
    a channel is a 2D feature map with the shape `HW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.

    See ``paddle.nn.functional.dropout`` for more details.

    Args:
        x (Tensor):  The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C].
                     The data type is float32 or float64.
        p (float): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
1157
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCHW` or `NHWC` . The default is `NCHW` . When it is `NCHW` , the data is stored in the order of: [batch_size, input_channels, input_height, input_width].
1158 1159 1160 1161 1162
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout2d, has same shape and data type as `x` .

1163

1164 1165
    Examples:
        .. code-block:: python
1166

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout2d(x)  #train
            y_test = paddle.nn.functional.dropout2d(x, training=False) #test
            for i in range(2):
                for j in range(3):
                    print(x.numpy()[i,j,:,:])
                    print(y_train.numpy()[i,j,:,:]) # may all 0
                    print(y_test.numpy()[i,j,:,:])
    """
    input_shape = x.shape
    if len(input_shape) != 4:
        raise ValueError("dimensions of x should be 4, but received {} != 4"\
        .format(len(input_shape)))

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

1190 1191 1192 1193 1194 1195
    return dropout(x,
                   p=p,
                   axis=[0, 1] if data_format == 'NCHW' else [0, 3],
                   training=training,
                   mode="upscale_in_train",
                   name=name)
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210


def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None):
    """
    Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
    a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.

    See ``paddle.nn.functional.dropout`` for more details.

    Args:
        x (Tensor):  The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C].
                     The data type is float32 or float64.
        p (float): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
1211
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from ``NCDHW`` or ``NDHWC``. The default is ``NCDHW`` . When it is ``NCDHW`` , the data is stored in the order of: [batch_size, input_channels, input_depth, input_height, input_width].
1212 1213 1214 1215 1216
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout3d, has same shape and data type with `x` .

1217

1218 1219
    Examples:
        .. code-block:: python
1220

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout3d(x)  #train
            y_test = paddle.nn.functional.dropout3d(x, training=False) #test
            print(x.numpy()[0,0,:,:,:])
            print(y_train.numpy()[0,0,:,:,:]) # may all 0
            print(y_test.numpy()[0,0,:,:,:])
    """

    input_shape = x.shape
    if len(input_shape) != 5:
        raise ValueError("dimensions of x should be 5, but received {} != 5" \
        .format(len(input_shape)))

    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

1243 1244 1245 1246 1247 1248
    return dropout(x,
                   p=p,
                   axis=[0, 1] if data_format == 'NCDHW' else [0, 4],
                   training=training,
                   mode="upscale_in_train",
                   name=name)
1249 1250


1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
def alpha_dropout(x, p=0.5, training=True, name=None):
    """
    Alpha Dropout is a type of Dropout that maintains the self-normalizing property.
    For an input with zero mean and unit standard deviation, the output of Alpha Dropout
    maintains the original mean and standard deviation of the input.
    Alpha Dropout fits well to SELU activate function by randomly setting activations to the negative saturation value.

    Args:
        x (Tensor): The input tensor. The data type is float32 or float64.
        p (float | int): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout, has same shape and data type as `x`.

    Examples:
        .. code-block:: python
1269

1270 1271 1272 1273 1274 1275 1276
            import paddle
            import numpy as np

            x = np.array([[-1, 1], [-1, 1]]).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.alpha_dropout(x, 0.5)
            y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False)
1277 1278
            print(x)
            print(y_train)
1279
            # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
1280
            print(y_test)
1281 1282 1283 1284 1285 1286
    """
    if not isinstance(p, (float, int)):
        raise TypeError("p argument should be a float or int")
    if p < 0 or p > 1:
        raise ValueError("p argument should between 0 and 1")

Z
zhiboniu 已提交
1287
    if not in_dynamic_mode():
1288 1289 1290 1291
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'alpha_dropout')

    if training:
1292
        if p == 1:
1293
            return paddle.scale(x, scale=0.)
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
        #get transformation params
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        alpha_p = -alpha * scale
        a = ((1 - p) * (1 + p * alpha_p**2))**-0.5
        b = -a * alpha_p * p

        dtype = x.dtype
        input_shape = x.shape

        #get mask
1305 1306 1307 1308
        random_tensor = paddle.uniform(input_shape,
                                       dtype='float32',
                                       min=0.,
                                       max=1.0)
Z
zhiboniu 已提交
1309
        p = full(shape=[1], fill_value=p, dtype='float32')
1310 1311 1312
        keep_mask = paddle.greater_equal(random_tensor, p)
        keep_mask = paddle.cast(keep_mask, dtype)
        drop_mask = paddle.subtract(
1313
            full(shape=input_shape, fill_value=1., dtype=dtype), keep_mask)
1314 1315

        #apply mask
Z
zhiboniu 已提交
1316
        b = full(shape=[1], fill_value=b, dtype=dtype)
1317
        y = paddle.add(paddle.multiply(x, keep_mask),
1318
                       paddle.scale(drop_mask, scale=alpha_p))
1319
        res = paddle.add(paddle.scale(y, scale=a), b, name=name)
1320 1321 1322 1323 1324
        return res
    else:  # test
        return x


L
littletomatodonkey 已提交
1325 1326 1327
def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
    """
    Pad tensor according to 'pad' and 'mode'.
L
littletomatodonkey 已提交
1328 1329 1330
    If mode is 'constant' and length of pad is twice as length of x dimension,
    then the padding will be started from the first dimension and moved back onto x
    according to 'pad' and 'value'.
L
littletomatodonkey 已提交
1331 1332 1333 1334 1335
    If mode is 'reflect', pad[0] and pad[1] must be no greater
    than width-1. The height and depth dimension has the same condition.

    Parameters:
        x (Tensor): The input tensor with data type float32/double/int32/int64_t.
1336 1337 1338 1339
        pad (Tensor | List[int] | Tuple[int]): The padding size with data type int.
            If mode is 'constant' and length of pad is twice as length of x dimension, then x will 
            be padded from the first  dimension to the last dimension.
            Else: 1. If input dimension is 3, then the pad has the form (pad_left,
L
littletomatodonkey 已提交
1340 1341 1342
            pad_right). 2. If the input dimension is 4, then the pad has the form (pad_left, pad_right, 
            pad_top, pad_bottom). 3. If the input dimension is 5, then the pad has the form 
            (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
1343
        mode (str, optional): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
L
littletomatodonkey 已提交
1344 1345 1346 1347 1348
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'
1349 1350
        value (float32, optional): The value to fill the padded areas in 'constant' mode . Default is 0.0
        data_format (str, optional): An string from: "NCL", "NLC", NHWC", "NCHW", "NCDHW", "NDHWC". Specify the data format of
L
littletomatodonkey 已提交
1351 1352 1353 1354 1355
           the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
                    
1356 1357
    Returns: 
        Tensor, a Tensor padded according to pad and mode and data type is same as input.
L
littletomatodonkey 已提交
1358 1359 1360 1361 1362 1363 1364 1365

    Examples:
        .. code-block:: text

            x = [[[[[1., 2., 3.],
                    [4., 5., 6.]]]]]

            Case 0:
1366 1367 1368 1369 1370 1371 1372 1373 1374
                pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
                mode = 'constant'
                value = 0
                Out = [[[[[0., 0., 0.],
                          [1., 2., 3.],
                          [4., 5., 6.],
                          [0., 0., 0.]]]]]

            Case 1:
L
littletomatodonkey 已提交
1375 1376 1377 1378 1379 1380 1381 1382
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'constant'
                value = 0
                Out = [[[[[0. 0. 0. 0. 0. 0. 0.]
                          [0. 0. 1. 2. 3. 0. 0.]
                          [0. 0. 4. 5. 6. 0. 0.]
                          [0. 0. 0. 0. 0. 0. 0.]]]]]

1383
            Case 2:
L
littletomatodonkey 已提交
1384 1385 1386 1387 1388 1389 1390
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'reflect'
                Out = [[[[[6. 5. 4. 5. 6. 5. 4.]
                          [3. 2. 1. 2. 3. 2. 1.]
                          [6. 5. 4. 5. 6. 5. 4.]
                          [3. 2. 1. 2. 3. 2. 1.]]]]]

1391
            Case 3:
L
littletomatodonkey 已提交
1392 1393 1394 1395 1396 1397 1398
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'replicate'
                Out = [[[[[1. 1. 1. 2. 3. 3. 3.]
                          [1. 1. 1. 2. 3. 3. 3.]
                          [4. 4. 4. 5. 6. 6. 6.]
                          [4. 4. 4. 5. 6. 6. 6.]]]]]

1399
            Case 4:
L
littletomatodonkey 已提交
1400 1401 1402 1403 1404 1405 1406 1407 1408
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'circular'
                Out = [[[[[5. 6. 4. 5. 6. 4. 5.]
                          [2. 3. 1. 2. 3. 1. 2.]
                          [5. 6. 4. 5. 6. 4. 5.]
                          [2. 3. 1. 2. 3. 1. 2.]]]]]

    Code Examples:
        .. code-block:: python
L
littletomatodonkey 已提交
1409

L
littletomatodonkey 已提交
1410 1411 1412 1413 1414 1415
            import numpy as np
            import paddle
            import paddle.nn.functional as F
            
            # example 1
            x_shape = (1, 1, 3)
L
littletomatodonkey 已提交
1416
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
1417
            y = F.pad(x, [0, 0, 0, 0, 2, 3], value=1, mode='constant', data_format="NCL")
L
littletomatodonkey 已提交
1418
            print(y)
L
littletomatodonkey 已提交
1419
            # [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
1420
            
L
littletomatodonkey 已提交
1421
            # example 2
1422 1423 1424 1425 1426 1427 1428
            x_shape = (1, 1, 3)
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.pad(x, [2, 3], value=1, mode='constant', data_format="NCL")
            print(y)
            # [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
            
            # example 3
L
littletomatodonkey 已提交
1429
            x_shape = (1, 1, 2, 3)
L
littletomatodonkey 已提交
1430 1431 1432
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.pad(x, [1, 2, 1, 1], value=1, mode='circular')
            print(y)
L
littletomatodonkey 已提交
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
            # [[[[6. 4. 5. 6. 4. 5.]
            #    [3. 1. 2. 3. 1. 2.]
            #    [6. 4. 5. 6. 4. 5.]
            #    [3. 1. 2. 3. 1. 2.]]]]
    """
    assert mode in ['reflect', 'replicate', 'constant', 'circular'], \
            "mode should be one of constant, reflect, replicate, circular, but got {}.".format(mode)

    data_format = data_format.upper()
    assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], \
        "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \
        "but got {}".format(data_format)

    x_dim = len(x.shape)

1448 1449
    if mode == "constant" and isinstance(
            pad, (list, tuple)) and len(pad) == x_dim * 2:
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
        paddings = pad
        pad_value = value
        check_variable_and_dtype(x, 'x', [
            'float16', 'float32', 'float64', 'int32', 'int64', 'complex64',
            'complex128'
        ], "pad")

        helper = LayerHelper('pad', **locals())
        dtype = helper.input_dtype(input_param_name='x')
        out = helper.create_variable_for_type_inference(dtype)
1460 1461 1462 1463 1464 1465 1466
        helper.append_op(type='pad',
                         inputs={'X': x},
                         outputs={'Out': out},
                         attrs={
                             'paddings': paddings,
                             'pad_value': float(pad_value)
                         })
1467
        return out
L
littletomatodonkey 已提交
1468

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
    assert x_dim in [
        3, 4, 5
    ], "input tesor dimension must be in [3, 4, 5] but got {}".format(x_dim)

    supported_format_map = {
        3: ["NCL", "NLC"],
        4: ["NCHW", "NHWC"],
        5: ["NCDHW", "NDHWC"],
    }
    assert data_format in supported_format_map[x_dim], \
    "input tensor dimension is {}, it's data format should be in {} but got {}".format(
        x_dim, supported_format_map[x_dim], data_format)

L
littletomatodonkey 已提交
1482 1483 1484 1485 1486 1487 1488 1489
    unsqueezed_dim = []

    if isinstance(pad, Variable):
        if data_format in ["NCL", "NCHW", "NCDHW"]:
            data_format = "NCDHW"
            if x_dim == 3:
                pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
                unsqueezed_dim = [3, 4]
1490
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1491 1492 1493
            elif x_dim == 4:
                pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
                unsqueezed_dim = [2]
1494
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1495 1496 1497 1498 1499
        elif data_format in ["NLC", "NHWC", "NDHWC"]:
            data_format = "NDHWC"
            if x_dim == 3:
                pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
                unsqueezed_dim = [2, 3]
1500
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1501 1502 1503
            elif x_dim == 4:
                pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
                unsqueezed_dim = [1]
1504
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1505
    else:
1506
        pad = list(pad)
L
littletomatodonkey 已提交
1507 1508 1509 1510 1511
        if data_format in ["NCL", "NCHW", "NCDHW"]:
            data_format = "NCDHW"
            if x_dim == 3:
                pad = [0, 0, 0, 0] + pad
                unsqueezed_dim = [3, 4]
1512
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1513 1514 1515
            elif x_dim == 4:
                pad = pad + [0, 0]
                unsqueezed_dim = [2]
1516
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1517 1518 1519 1520 1521
        elif data_format in ["NLC", "NHWC", "NDHWC"]:
            data_format = "NDHWC"
            if x_dim == 3:
                pad = [0, 0, 0, 0] + pad
                unsqueezed_dim = [2, 3]
1522
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1523 1524 1525
            elif x_dim == 4:
                pad = pad + [0, 0]
                unsqueezed_dim = [1]
1526
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1527

J
Jiabin Yang 已提交
1528
    if in_dygraph_mode():
L
littletomatodonkey 已提交
1529
        if isinstance(pad, Variable):
J
Jiabin Yang 已提交
1530 1531 1532
            pad = pad.numpy().tolist()
        out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format)
    else:
1533
        if _in_legacy_dygraph():
J
Jiabin Yang 已提交
1534 1535
            if isinstance(pad, Variable):
                pad = pad.numpy().tolist()
1536 1537 1538
            out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
                               "data_format", data_format, "name", name)
        else:
J
Jiabin Yang 已提交
1539 1540 1541 1542 1543 1544 1545
            attrs = {'mode': mode, 'value': value, 'data_format': data_format}
            inputs = {'X': [x]}
            if isinstance(pad, Variable):
                inputs['Paddings'] = [pad]
                attrs['paddings'] = []
            else:
                attrs['paddings'] = pad
L
littletomatodonkey 已提交
1546

J
Jiabin Yang 已提交
1547
            helper = LayerHelper('pad3d', **locals())
L
littletomatodonkey 已提交
1548

J
Jiabin Yang 已提交
1549 1550
            dtype = helper.input_dtype(input_param_name='input')
            out = helper.create_variable_for_type_inference(dtype)
1551 1552 1553 1554
            helper.append_op(type='pad3d',
                             inputs=inputs,
                             outputs={"Out": out},
                             attrs=attrs)
L
littletomatodonkey 已提交
1555 1556

    if len(unsqueezed_dim) != 0:
1557
        out = squeeze(out, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1558 1559 1560 1561

    return out


1562 1563 1564 1565 1566 1567 1568 1569 1570
def zeropad2d(x, padding, data_format="NCHW", name=None):
    """
    Pads the input tensor boundaries with zero according to 'pad'.

    Args:
        x(Tensor): The input tensor with data type float16/float32/float64/int32/int64.
        padding(int | Tensor | List[int] | Tuple[int]): The padding size with data type int.
            The input dimension should be 4 and pad has the form (pad_left, pad_right,
            pad_top, pad_bottom).
1571
        data_format(str, optional): An string from: "NHWC", "NCHW". Specify the data format of
1572 1573 1574 1575
            the input data. Default: "NCHW".
        name(str, optional): The default value is None. Normally there is no need for user
            to set this property.

1576
    Returns: 
1577
        Tensor, padded with 0 according to pad and data type is same as input.
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np
            import paddle.nn.functional as F

            x_shape = (1, 1, 2, 3)
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.zeropad2d(x, [1, 2, 1, 1])
            # [[[[0. 0. 0. 0. 0. 0.]
            #    [0. 1. 2. 3. 0. 0.]
            #    [0. 4. 5. 6. 0. 0.]
            #    [0. 0. 0. 0. 0. 0.]]]]
    """

    return pad(x,
               pad=padding,
               mode='constant',
               value=0,
               data_format=data_format,
               name=name)


Y
Yang Zhang 已提交
1603
def cosine_similarity(x1, x2, axis=1, eps=1e-8):
L
littletomatodonkey 已提交
1604
    """
Y
Yang Zhang 已提交
1605
    Compute cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1606 1607 1608 1609

    Parameters:
        x1 (Tensor): First input. float32/double.
        x2 (Tensor): Second input. float32/double.
1610 1611
        axis (int, optional): Dimension of vectors to compute cosine similarity. Default is 1.
        eps(float, optional): Small value to avoid division by zero. Default is 1e-8.
L
littletomatodonkey 已提交
1612
                    
1613 1614
    Returns: 
        Tensor, a Tensor representing cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1615 1616 1617

    Examples:
        .. code-block:: text
1618

L
littletomatodonkey 已提交
1619 1620 1621 1622 1623 1624 1625 1626 1627
            Case 0:
                x1 = [[0.8024077  0.9927354  0.27238318 0.8344984 ]
                     [0.48949873 0.5797396  0.65444374 0.66510963]
                     [0.1031398  0.9614342  0.08365563 0.6796464 ]
                     [0.10760343 0.7461209  0.7726148  0.5801006 ]]
                x2 = [[0.62913156 0.1536727  0.9847992  0.04591406]
                     [0.9098952  0.15715368 0.8671125  0.3156102 ]
                     [0.4427798  0.54136837 0.5276275  0.32394758]
                     [0.3769419  0.8535014  0.48041078 0.9256797 ]]
Y
Yang Zhang 已提交
1628
                axis = 1
L
littletomatodonkey 已提交
1629 1630 1631 1632 1633
                eps = 1e-8
                Out: [0.5275037  0.8368967  0.75037485 0.9245899]

    Code Examples:
        .. code-block:: python
1634

L
littletomatodonkey 已提交
1635 1636 1637
            import paddle
            import paddle.nn as nn

1638 1639 1640 1641
            paddle.seed(1)
            x1 = paddle.randn(shape=[2, 3])
            x2 = paddle.randn(shape=[2, 3])

Y
Yang Zhang 已提交
1642
            result = paddle.nn.functional.cosine_similarity(x1, x2, axis=0)
L
littletomatodonkey 已提交
1643
            print(result)
1644
            # [0.97689527,  0.99996042, -0.55138415]
L
littletomatodonkey 已提交
1645 1646
            
    """
1647 1648 1649
    w12 = sum(paddle.multiply(x1, x2), axis=axis)
    w1 = sum(paddle.multiply(x1, x1), axis=axis)
    w2 = sum(paddle.multiply(x2, x2), axis=axis)
Y
Yang Zhang 已提交
1650
    n12 = sqrt(clip(w1 * w2, min=eps * eps))
L
littletomatodonkey 已提交
1651 1652
    cos_sim = w12 / n12
    return cos_sim
1653 1654 1655


def linear(x, weight, bias=None, name=None):
1656
    r"""
1657

1658 1659
    Fully-connected linear transformation operator. For each input :math:`X` ,
    the equation is:
1660 1661 1662

    .. math::

1663
        Out = XW + b
1664

1665
    where :math:`W` is the weight and :math:`b` is the bias.
1666

1667 1668 1669 1670 1671 1672 1673
    If the weight is a 2-D tensor of shape :math:`[in\_features, out\_features]` ,
    input should be a multi-dimensional tensor of shape
    :math:`[batch\_size, *, in\_features]` , where :math:`*` means any number of
    additional dimensions. The linear operator multiplies input tensor with
    weight and produces an output tensor of shape :math:`[batch\_size, *, out\_features]` , 
    If :math:`bias` is not None, the bias should be a 1-D tensor of shape
    :math:`[out\_features]` and will be added to the output.
1674

1675 1676 1677 1678 1679 1680 1681
    Parameters:
        x (Tensor): Input tensor. The data type should be float16, float32 or float64.
        weight (Tensor): Weight tensor. The data type should be float16, float32 or float64.
        bias (Tensor, optional): Bias tensor. The data type should be float16, float32 or float64.
                                 If it is set to None, no bias will be added to the output units.
        name (str, optional): Normally there is no need for user to set this parameter.
                              For detailed information, please refer to :ref:`api_guide_Name` .
1682 1683

    Returns:
1684 1685
        Tensor, the shape is :math:`[batch\_size, *, out\_features]` and the
        data type is the same with input :math:`x` .
1686 1687 1688 1689 1690 1691

    Examples:
        .. code-block:: python
          
          import paddle
          
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
          x = paddle.randn((3, 2), dtype="float32")
          # x: [[-0.32342386 -1.200079  ]
          #     [ 0.7979031  -0.90978354]
          #     [ 0.40597573  1.8095392 ]]
          weight = paddle.full(shape=[2, 4], fill_value="0.5", dtype="float32", name="weight")
          # weight: [[0.5 0.5 0.5 0.5]
          #          [0.5 0.5 0.5 0.5]]
          bias = paddle.ones(shape=[4], dtype="float32", name="bias")
          # bias: [1. 1. 1. 1.]
          y = paddle.nn.functional.linear(x, weight, bias)
          # y: [[0.23824859 0.23824859 0.23824859 0.23824859]
          #     [0.9440598  0.9440598  0.9440598  0.9440598 ]
          #     [2.1077576  2.1077576  2.1077576  2.1077576 ]]
1705
    """
J
Jiabin Yang 已提交
1706
    if in_dygraph_mode():
1707
        #TODO(jiabin): using addmm for fast forward route
1708
        return _C_ops.final_state_linear(x, weight, bias)
1709
    else:
J
Jiabin Yang 已提交
1710 1711 1712
        if _in_legacy_dygraph():
            pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y',
                                        False)
1713

J
Jiabin Yang 已提交
1714 1715
            if bias is None:
                return pre_bias
1716

J
Jiabin Yang 已提交
1717
            return _C_ops.elementwise_add(pre_bias, bias)
1718
        else:
J
Jiabin Yang 已提交
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729
            helper = LayerHelper('linear', **locals())
            dtype = x.dtype

            check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                     'linear')
            check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
                        'linear')

            inputs = {'X': [x], 'Y': [weight]}
            attrs = {'trans_x': False, 'trans_y': False}
            tmp = helper.create_variable_for_type_inference(dtype)
1730 1731 1732 1733
            helper.append_op(type='matmul_v2',
                             inputs=inputs,
                             outputs={'Out': tmp},
                             attrs=attrs)
J
Jiabin Yang 已提交
1734 1735
            if bias is not None:
                res = helper.create_variable_for_type_inference(dtype)
1736 1737 1738 1739 1740 1741 1742
                helper.append_op(type='elementwise_add',
                                 inputs={
                                     'X': [tmp],
                                     'Y': [bias]
                                 },
                                 outputs={'Out': [res]},
                                 attrs={'axis': len(x.shape) - 1})
J
Jiabin Yang 已提交
1743 1744 1745
            else:
                res = tmp
            return res
1746 1747 1748


def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
1749
    r"""
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
    Label smoothing is a mechanism to regularize the classifier layer and is called
    label-smoothing regularization (LSR).

    Label smoothing is proposed to encourage the model to be less confident,
    since optimizing the log-likelihood of the correct label directly may
    cause overfitting and reduce the ability of the model to adapt. Label
    smoothing replaces the ground-truth label :math:`y` with the weighted sum
    of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
    i.e.

    .. math::

        \\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,

    where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
    respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
    uniform distribution is used for :math:`\mu`.

    See more details about label smoothing in https://arxiv.org/abs/1512.00567.

    Parameters:
        label(Tensor): The input variable containing the label data. The
                        label data should use one-hot representation. It's
                        a multidimensional tensor with a shape of
                        :math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64".
        prior_dist(Tensor, optional): The prior distribution to be used to smooth
                        labels. If not provided, an uniform distribution
                        is used. It's a multidimensional tensor with a shape of
                        :math:`[1, class\_num]` . The default value is None.
        epsilon(float, optional): The weight used to mix up the original ground-truth
                        distribution and the fixed distribution. The default value is
                        0.1.
        name(str, optional): The default value is None. Normally there is no need for user
                        to set this property. For more information, please refer to
                        :ref:`api_guide_Name`.

    Returns:
        Tensor: The tensor containing the smoothed labels.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np
            
            x_data = np.array([[[0, 1, 0],
                                [ 1,  0, 1]]]).astype("float32")
            print(x_data.shape)
            paddle.disable_static()
            x = paddle.to_tensor(x_data, stop_gradient=False)
            output = paddle.nn.functional.label_smooth(x)
1801
            print(output)
1802 1803 1804 1805
            
            #[[[0.03333334 0.93333334 0.03333334]
            #  [0.93333334 0.03333334 0.93333334]]]
    """
1806 1807 1808
    if epsilon > 1. or epsilon < 0.:
        raise ValueError("The value of epsilon must be between 0 and 1.")

1809 1810 1811 1812
    if in_dygraph_mode():
        return _C_ops.final_state_label_smooth(label, prior_dist,
                                               float(epsilon))

1813
    elif paddle.in_dynamic_mode():
W
wanghuancoder 已提交
1814
        return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon))
1815 1816 1817 1818 1819 1820 1821

    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'label_smooth')

    helper = LayerHelper("label_smooth", **locals())
    label.stop_gradient = True
    smooth_label = helper.create_variable_for_type_inference(label.dtype)
1822 1823 1824 1825 1826 1827 1828
    helper.append_op(type="label_smooth",
                     inputs={
                         "X": label,
                         "PriorDist": prior_dist
                     } if prior_dist else {"X": label},
                     outputs={"Out": smooth_label},
                     attrs={"epsilon": float(epsilon)})
1829
    return smooth_label
1830 1831


G
Guoxia Wang 已提交
1832
def class_center_sample(label, num_classes, num_samples, group=None):
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
    """
    Class center sample method is proposed from the paper PartialFC that only sample a subset of the class centers.
    The process of sampling subset class centers is straightforward: 

    1. First select the positive class centers;
    2. Then randomly sample negative class centers.

    Specifically, given a label tensor, shape [batch_size], select all the positive class centers and randomly 
    sample negative class centers, then remap the input label tensor using the sampled class centers.

    For more information, Partial FC: Training 10 Million Identities on a Single Machine
    arxiv: https://arxiv.org/abs/2010.05222
    
    .. hint::
        If the number of the positive class centers is greater than the input num_samples, it keeps all the positive 
        class centers and the shape of sampled_class_center will be [num_positive_class_centers].
1849

1850 1851
        The API supports CPU, single GPU and multi GPU.

1852 1853 1854 1855
        For data parallel mode, set ``group=False``.

        For model parallel mode, set ``group=None`` or the group instance return by paddle.distributed.new_group.

1856
    Args:
G
Guoxia Wang 已提交
1857 1858
        label (Tensor): 1-D tensor with shape [N], each label in [0, num_classes)
        num_classes (int): A positive integer to specify the number of classes at local rank.
1859
            Note that num_classes of each GPU can be different.
G
Guoxia Wang 已提交
1860
        num_samples (int): A positive integer to specify the number of class center to sample.
1861 1862 1863
        group (Group, optional): The group instance return by paddle.distributed.new_group 
            or ``None`` for global default group or ``False`` for data parallel (do not communication cross ranks).
            Default is ``None``.
1864 1865 1866 1867 1868 1869 1870 1871

    Returns:
        Tuple of two ``Tensor`` : (remapped_label, sampled_class_center), remapped label using sampled class center,
        sampled class center from [0, num_classes).

    Examples:

    .. code-block:: python
G
Guoxia Wang 已提交
1872
        :name: code-example1
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894

        # CPU or single GPU
        import paddle
        num_classes = 20
        batch_size = 10
        num_samples = 6
        label = paddle.randint(low=0, high=num_classes, shape=[batch_size], dtype='int64')
        remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes, num_samples)

        print(label)
        print(remapped_label)
        print(sampled_class_index)

        # the output is
        #Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [11, 5 , 1 , 3 , 12, 2 , 15, 19, 18, 19])
        #Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [4, 3, 0, 2, 5, 1, 6, 8, 7, 8])
        #Tensor(shape=[9], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [1 , 2 , 3 , 5 , 11, 12, 15, 18, 19])

    .. code-block:: python
G
Guoxia Wang 已提交
1895
        :name: code-example2
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935

        # required: distributed
        # Multi GPU, test_class_center_sample.py
        import paddle
        import paddle.distributed as dist
        strategy = dist.fleet.DistributedStrategy()
        dist.fleet.init(is_collective=True, strategy=strategy)
        batch_size = 10
        num_samples = 6
        rank_id = dist.get_rank()
        # num_classes of each GPU can be different, e.g num_classes_list = [10, 8]
        num_classes_list = [10, 10]
        num_classes = paddle.sum(paddle.to_tensor(num_classes_list))
        label = paddle.randint(low=0, high=num_classes.item(), shape=[batch_size], dtype='int64')
        label_list = []
        dist.all_gather(label_list, label)
        label = paddle.concat(label_list, axis=0)
        remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes_list[rank_id], num_samples)

        print(label)
        print(remapped_label)
        print(sampled_class_index)

        #python -m paddle.distributed.launch --gpus=0,1 test_class_center_sample.py
        # rank 0 output:
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
        #Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [0, 2, 4, 8, 9, 3])
        
        # rank 1 output:
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
        #Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [0, 1, 2, 3, 5, 7, 8])
    """
1936 1937 1938 1939 1940 1941 1942
    if not (group == False or group is None or hasattr(group, 'is_member')):
        raise ValueError(
            'Expected group is False, None or instance of paddle.distributed.collective.Group \
             (got group: {})'.format(group))
        return

    if hasattr(group, 'is_member') and not group.is_member():
1943 1944
        return

1945
    ring_id = 0
1946 1947
    rank = 0
    nranks = 1
1948 1949 1950 1951 1952 1953 1954
    if group != False:
        if core.is_compiled_with_dist():
            parallel_env = paddle.distributed.ParallelEnv()
            global_rank = parallel_env.rank
            rank = global_rank if group is None else group.get_group_rank(
                global_rank)
            nranks = parallel_env.world_size if group is None else group.nranks
1955 1956 1957 1958 1959 1960

    if num_samples > num_classes:
        raise ValueError(
            'Expected num_samples less than or equal to {}, got num_samples {}'.
            format(num_classes, num_samples))

G
Guoxia Wang 已提交
1961 1962 1963
    label_size = 1
    for dim in list(label.shape):
        label_size *= dim
1964
    if label_size != -1 and label_size < 1:
G
Guoxia Wang 已提交
1965
        raise ValueError('Expected label_size > 0 \
1966
             (got label_size: {})'.format(label_size))
G
Guoxia Wang 已提交
1967 1968 1969 1970

    label_dims = len(list(label.shape))
    if label_dims != 1:
        raise ValueError('Expected label_dims == 1 \
1971
             (got label_dims: {})'.format(label_dims))
G
Guoxia Wang 已提交
1972 1973

    seed = None
1974 1975 1976
    if (seed is None or seed == 0) and default_main_program().random_seed != 0:
        seed = default_main_program().random_seed

1977 1978 1979 1980 1981
    if in_dygraph_mode():
        return _C_ops.final_state_class_center_sample(
            label, num_classes, num_samples, ring_id, rank, nranks, seed
            is not None, seed if seed is not None else 0)
    elif paddle.in_dynamic_mode():
1982
        remapped_label, sampled_class_center = _C_ops.class_center_sample(
1983
            label, 'num_classes', num_classes, 'num_samples', num_samples,
1984 1985
            'ring_id', ring_id, 'nranks', nranks, 'rank', rank, 'fix_seed', seed
            is not None, 'seed', seed if seed is not None else 0)
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
        return remapped_label, sampled_class_center

    check_variable_and_dtype(label, 'label', ['int64', 'int32'],
                             'class_center_sample')
    op_type = 'class_center_sample'
    helper = LayerHelper(op_type, **locals())
    remapped_label = helper.create_variable_for_type_inference(
        dtype=label.dtype)
    sampled_class_center = helper.create_variable_for_type_inference(
        dtype=label.dtype)
1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
    helper.append_op(type=op_type,
                     inputs={'Label': label},
                     outputs={
                         'RemappedLabel': remapped_label,
                         'SampledLocalClassCenter': sampled_class_center
                     },
                     attrs={
                         'num_classes': num_classes,
                         'num_samples': num_samples,
                         'ring_id': ring_id,
                         'nranks': nranks,
                         'rank': rank,
                         'fix_seed': seed is not None,
                         'seed': seed if seed is not None else 0
                     })
2011
    return remapped_label, sampled_class_center
X
xiaoting 已提交
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038


def fold(x,
         output_sizes,
         kernel_sizes,
         strides=1,
         paddings=0,
         dilations=1,
         name=None):
    r"""
    
    This Op is used to combines an array of sliding local blocks into a large containing
    tensor. also known as col2im when operated on batched 2D image tensor. Fold calculates each 
    combined value in the resulting large tensor by summing all values from all containing blocks. 


    For each input :math:`x` with shape [N, C_in , L], the output shape [N, C_out, H_out, W_out]
    can be calculated as following.

    .. math::
        H_out &= output_size[0]
        W_out &= output_size[1]
        C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1]

    Parameters:
        x(Tensor):                3-D Tensor, input tensor of format [N, C, L],
                                  data type can be float32 or float64
X
xiaoting 已提交
2039
        output_sizes(int|list|tuple):       The size of output size, should be [output_size_h, output_size_w]
X
xiaoting 已提交
2040
                                  or an interger o treated as [o, o].
X
xiaoting 已提交
2041
        kernel_sizes(int|list|tuple):   The size of convolution kernel, should be [k_h, k_w]
X
xiaoting 已提交
2042
                                  or an integer k treated as [k, k].
X
xiaoting 已提交
2043
        strides(int|list|tuple):        The strides, should be [stride_h, stride_w]
X
xiaoting 已提交
2044 2045
                                  or an integer stride treated as [sride, stride].
                                  For default, strides will be [1, 1].
X
xiaoting 已提交
2046
        paddings(int|list|tuple):       The paddings of each dimension, should be
X
xiaoting 已提交
2047 2048 2049 2050 2051 2052
                                  [padding_top, padding_left, padding_bottom, padding_right]
                                  or [padding_h, padding_w] or an integer padding.
                                  If [padding_h, padding_w] was given, it will expanded to
                                  [padding_h, padding_w, padding_h, padding_w]. If an integer
                                  padding was given, [padding, padding, padding, padding] will
                                  be used. For default, paddings will be [0, 0, 0, 0]
X
xiaoting 已提交
2053
        dilations(int|list|tuple):      the dilations of convolution kernel, should be
X
xiaoting 已提交
2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
                                  [dilation_h, dilation_w], or an integer dilation treated as
                                  [dilation, dilation]. For default, it will be [1, 1].
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`


    Returns:
        The tensor formed by combining a group of sliding local blocks
        The output shape is [N, Cout, H, W] as decriabled above.

    Examples:

        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

X
xiaoting 已提交
2072 2073 2074
            x = paddle.randn([2,3*2*2,12])
            y = F.fold(x, output_sizes=[4, 5], kernel_sizes=2)
            # y.shape = [2,3,4,5]
X
xiaoting 已提交
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084

    """

    helper = LayerHelper("fold", **locals())

    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold')

    assert len(x.shape) == 3, \
            "input should be the format of [N, C, L]"

X
xiaoting 已提交
2085 2086 2087
    def _is_list_or_turple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

X
xiaoting 已提交
2088 2089 2090
    if isinstance(output_sizes, int):
        output_sizes = [output_sizes, output_sizes]
    else:
X
xiaoting 已提交
2091 2092
        assert _is_list_or_turple_(output_sizes) and (len(output_sizes) == 2), \
            "output_sizes should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
2093 2094 2095 2096

    if isinstance(kernel_sizes, int):
        kernel_sizes = [kernel_sizes, kernel_sizes]
    else:
X
xiaoting 已提交
2097 2098
        assert _is_list_or_turple_(kernel_sizes) and (len(kernel_sizes) == 2), \
            "kernel_sizes should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
2099 2100 2101 2102

    if isinstance(strides, int):
        strides = [strides, strides]
    else:
X
xiaoting 已提交
2103 2104
        assert _is_list_or_turple_(strides) and (len(strides) == 2), \
            "strides should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
2105 2106 2107 2108

    if isinstance(dilations, int):
        dilations = [dilations, dilations]
    else:
X
xiaoting 已提交
2109 2110
        assert _is_list_or_turple_(dilations) and (len(dilations) == 2), \
            "dilations should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127

    if isinstance(paddings, int):
        paddings = [paddings] * 4
    elif isinstance(paddings, list):
        if len(paddings) == 2:
            paddings = paddings * 2
        elif len(paddings) == 4:
            pass
        else:
            raise ValueError(
                "paddings should either be an integer or a list of 2 or 4 integers"
            )
    else:
        raise ValueError(
            "Unexpected type of paddings, it should be either an integer or a list"
            "of 2 or 4 integers")

X
xiaoting 已提交
2128 2129 2130 2131
    if in_dygraph_mode():
        out = _C_ops.final_state_fold(x, output_sizes, kernel_sizes, strides,
                                      paddings, dilations)
    elif in_dynamic_mode():
X
xiaoting 已提交
2132 2133 2134 2135 2136
        out = _C_ops.fold(x, "output_sizes", output_sizes, "kernel_sizes",
                          kernel_sizes, "strides", strides, "paddings",
                          paddings, "dilations", dilations)
    else:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
        helper.append_op(type="fold",
                         inputs={"X": x},
                         outputs={"Y": out},
                         attrs={
                             "output_sizes": output_sizes,
                             "kernel_sizes": kernel_sizes,
                             "strides": strides,
                             "paddings": paddings,
                             "dilations": dilations
                         })
X
xiaoting 已提交
2147
    return out