common.py 73.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
xiaoting 已提交
15
import warnings
16 17
import paddle
from ...fluid.framework import in_dygraph_mode, default_main_program
X
xiaoting 已提交
18
from paddle.fluid.layer_helper import LayerHelper
19 20 21 22
from paddle.fluid.layers.tensor import fill_constant
from ...tensor import concat
from ...tensor.creation import zeros
from paddle.static import Variable
23 24
from ...fluid.layers import core
from ...fluid import dygraph_utils
25
# TODO: define the common functions to build a neural network  
Z
zhiboniu 已提交
26
from ...fluid.layers import unfold  # noqa: F401
27 28
from ...tensor.manipulation import squeeze
from ...tensor.manipulation import unsqueeze
Y
Yang Zhang 已提交
29 30 31
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
32
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
33
from ...fluid.framework import in_dygraph_mode, _varbase_creator
X
xiaoting 已提交
34

35 36
from ...fluid.framework import in_dygraph_mode
from ...fluid import core, dygraph_utils
37 38
from ...fluid import core, layers
from ...fluid.data_feeder import check_variable_and_dtype
W
wanghuancoder 已提交
39
from paddle import _C_ops
40

41 42
__all__ = []

X
xiaoting 已提交
43

X
xiaoting 已提交
44
def interpolate(x,
45 46 47 48
                size=None,
                scale_factor=None,
                mode='nearest',
                align_corners=False,
X
xiaoting 已提交
49
                align_mode=0,
50 51
                data_format='NCHW',
                name=None):
X
xiaoting 已提交
52
    """
S
swtkiwi 已提交
53

X
xiaoting 已提交
54
    This op resizes a batch of images.
55 56
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
X
xiaoting 已提交
57
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
58 59
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
X
xiaoting 已提交
60
    and the resizing only applies on the three dimensions(depth, height and width).
X
xiaoting 已提交
61

X
xiaoting 已提交
62
    Supporting resample methods:
63 64 65 66 67
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation
68
        'area': Area interpolation
69 70 71 72

    Linear interpolation is the method of using a line connecting two known quantities 
    to determine the value of an unknown quantity between the two known quantities. 
    
X
xiaoting 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.

    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
    The linear interpolation is performed on three directions.
X
xiaoting 已提交
87
    align_corners and align_mode are optional parameters,the calculation method
X
xiaoting 已提交
88 89 90 91 92 93 94
    of interpolation can be selected by them.

    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.

95 96 97 98 99 100
    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to 
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or 
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

X
xiaoting 已提交
101 102 103 104
    Example:

    .. code-block:: text

105
        For scale_factor:
X
xiaoting 已提交
106 107 108 109 110
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)

111 112 113 114 115 116 117 118 119 120 121
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
        
X
xiaoting 已提交
122
        Nearest neighbor interpolation:
X
xiaoting 已提交
123

X
xiaoting 已提交
124 125 126 127 128
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
129

X
xiaoting 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

171 172 173
    For details of linear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Linear_interpolation.
    
X
xiaoting 已提交
174 175
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
176
    
X
xiaoting 已提交
177 178
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
179
    
X
xiaoting 已提交
180 181
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
182
    
X
xiaoting 已提交
183 184
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
185
    
X
xiaoting 已提交
186
    Parameters:
X
xiaoting 已提交
187
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
X
xiaoting 已提交
188
                          its data format is specified by :attr:`data_format`.
X
xiaoting 已提交
189
        size (list|tuple|Tensor|None): Output shape of image resize
190 191
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
192
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
193
             If a Tensor, its dimensions size should be a 1.
194 195 196
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor.
X
xiaoting 已提交
197
             Default: None.
198
        mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear',
199
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
X
xiaoting 已提交
200 201
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
X
xiaoting 已提交
202
                               corner pixels.This only has an effect when 'linear', 'bilinear', 'bicubic' or 'trilinear'.
203 204 205 206
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
X
xiaoting 已提交
207
        data_format (str, optional): Specify the data format of the input, and the data format of the output
208
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`,  `"NCHW"`, `"NHWC"`, `"NCDHW"`,
X
xiaoting 已提交
209 210 211
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
212 213 214
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
X
xiaoting 已提交
215
    Returns:
216
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
X
xiaoting 已提交
217 218 219
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
    Raises:
X
xiaoting 已提交
220
        TypeError: size should be a list or tuple or Tensor.
221
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
222
                    'trilinear', 'bicubic', 'area' or 'nearest' currently.
223 224 225 226 227 228 229 230
        ValueError: 'linear' only support 3-D tensor.
        ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
X
xiaoting 已提交
231 232
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
233 234
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.

X
xiaoting 已提交
235 236 237 238 239
    Examples:
        .. code-block:: python

	    import paddle
	    import numpy as np
X
xiaoting 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
            import paddle.nn.functional as F
            
            # given out size
            input_data = np.random.rand(2,3,6,10).astype("float32")
            x = paddle.to_tensor(input_data)
            output_1 = F.interpolate(x=x, size=[12,12])
    	    print(output_1.shape)
	    # [2L, 3L, 12L, 12L]
            
            # given scale
            output_2 = F.interpolate(x=x, scale_factor=[2,1])
            print(output_2.shape)
            # [2L, 3L, 12L, 10L]
            
            # bilinear interp
            output_3 = F.interpolate(x=x, scale_factor=[2,1], mode="bilinear")
            print(output_2.shape)
            # [2L, 3L, 12L, 10L]
X
xiaoting 已提交
258
    """
259 260 261 262 263 264 265 266 267 268
    data_format = data_format.upper()
    resample = mode.upper()
    resample_type = mode.lower()

    resample_methods = [
        'LINEAR',
        'BILINEAR',
        'TRILINEAR',
        'NEAREST',
        'BICUBIC',
269
        'AREA',
270
    ]
X
xiaoting 已提交
271 272
    if resample not in resample_methods:
        raise ValueError(
273
            "The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', "
274
            " 'bicubic' or 'nearest' currently.")
X
xiaoting 已提交
275

X
xiaoting 已提交
276
    if resample in ['LINEAR'] and len(x.shape) != 3:
277
        raise ValueError("'linear' only support 3-D tensor.")
278

X
xiaoting 已提交
279
    if resample in ['BILINEAR', 'NEAREST', 'BICUBIC'] and len(x.shape) != 4:
X
xiaoting 已提交
280
        raise ValueError(
281
            "'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.")
X
xiaoting 已提交
282
    if resample == 'TRILINEAR' and len(x.shape) != 5:
283 284 285 286
        raise ValueError("'trilinear'only support 5-D tensor.")

    if size is None and scale_factor is None:
        raise ValueError("One of size and scale_factor must not be None.")
X
xiaoting 已提交
287 288 289

    if not isinstance(align_corners, bool):
        raise TypeError("Attr align_corners should be a bool value")
290

X
xiaoting 已提交
291 292
    if align_mode != 0 and align_mode != 1:
        raise ValueError("align_mode can only be 0 or 1")
X
xiaoting 已提交
293 294 295 296
    if align_corners != 0 and resample == 'NEAREST':
        raise ValueError(
            "align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
        )
297 298 299 300 301 302 303 304 305

    if resample == 'AREA' and len(x.shape) == 3:
        return paddle.nn.functional.adaptive_avg_pool1d(x, size)

    if resample == 'AREA' and len(x.shape) == 4:
        return paddle.nn.functional.adaptive_avg_pool2d(x, size)
    if resample == 'AREA' and len(x.shape) == 5:
        return paddle.nn.functional.adaptive_avg_pool3d(x, size)

X
xiaoting 已提交
306
    helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals())
307
    dtype = helper.input_dtype(input_param_name='x')
X
xiaoting 已提交
308
    if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']:
309 310
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
311
            " received but only `NCW` or `NWC` supported for 3-D input.")
X
xiaoting 已提交
312
    elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
X
xiaoting 已提交
313 314 315
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCHW` or `NHWC` supported for 4-D input.")
X
xiaoting 已提交
316
    elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
X
xiaoting 已提交
317 318 319 320 321 322 323
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCDHW` or `NDHWC` supported for 5-D input.")

    def _is_list_or_turple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

324
    if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
X
xiaoting 已提交
325
        data_layout = 'NCHW'
326
    if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
X
xiaoting 已提交
327 328
        data_layout = 'NHWC'

X
xiaoting 已提交
329 330 331 332
    if resample == 'NEAREST':
        align_corners = False

    inputs = {"X": x}
X
xiaoting 已提交
333 334 335 336 337 338 339 340 341 342
    attrs = {
        "out_d": -1,
        "out_h": -1,
        "out_w": -1,
        "interp_method": resample_type,
        "align_corners": align_corners,
        "align_mode": align_mode,
        "data_layout": data_layout
    }

343 344
    out_shape = size
    scale = scale_factor
345 346
    if out_shape is not None and scale is not None:
        raise ValueError("Only one of size or scale_factor should be defined.")
X
xiaoting 已提交
347
    if out_shape is not None:
348 349

        if isinstance(out_shape, Variable) and not in_dygraph_mode():
X
xiaoting 已提交
350 351
            out_shape.stop_gradient = True
            inputs['OutSize'] = out_shape
352

X
xiaoting 已提交
353
        else:
354 355 356 357 358 359
            if in_dygraph_mode():
                if isinstance(out_shape, Variable):
                    out_shape = list(out_shape.numpy())
                for i, dim in enumerate(out_shape):
                    if isinstance(dim, Variable):
                        out_shape[i] = dim.numpy()[0]
X
xiaoting 已提交
360
            if not (_is_list_or_turple_(out_shape)):
361
                raise TypeError("size should be a list or tuple or Variable.")
X
xiaoting 已提交
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
            # Validate the shape
            contain_var = False
            for dim_idx, dim_size in enumerate(out_shape):
                if isinstance(dim_size, Variable):
                    contain_var = True
                    continue
                assert dim_size > 0, (
                    "Each dimension size given in out_shape must be greater than 0."
                )

            if contain_var:
                new_size_tensor = []
                size_list = []
                for dim in out_shape:
                    if isinstance(dim, Variable):
                        dim.stop_gradient = True
                        new_size_tensor.append(dim)
                        size_list.append(-1)
                    else:
                        assert (isinstance(dim, int))
                        temp_out = helper.create_variable_for_type_inference(
                            'int32')
                        fill_constant(
                            [1], 'int32', dim, force_cpu=True, out=temp_out)
                        new_size_tensor.append(temp_out)
                        size_list.append(dim)
                inputs['SizeTensor'] = new_size_tensor

X
xiaoting 已提交
390
            if len(x.shape) == 3:
391 392
                if len(out_shape) != 1:
                    raise ValueError(
393
                        "size length should be 2 for input 3-D tensor")
394 395 396 397 398
                if contain_var:
                    attrs['out_w'] = size_list[0]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_w'] = out_shape[0]
X
xiaoting 已提交
399
            if len(x.shape) == 4:
X
xiaoting 已提交
400
                if len(out_shape) != 2:
401
                    raise ValueError("size length should be 2 for "
X
xiaoting 已提交
402 403 404 405 406 407 408 409
                                     "input 4-D tensor.")
                if contain_var:
                    attrs['out_h'] = size_list[0]
                    attrs['out_w'] = size_list[1]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_h'] = out_shape[0]
                    attrs['out_w'] = out_shape[1]
X
xiaoting 已提交
410
            if len(x.shape) == 5:
X
xiaoting 已提交
411
                if len(out_shape) != 3:
412
                    raise ValueError("size length should be 3 for "
X
xiaoting 已提交
413 414 415 416 417 418 419 420 421 422 423 424
                                     "input 5-D tensor.")
                if contain_var:
                    attrs['out_d'] = size_list[0]
                    attrs['out_h'] = size_list[1]
                    attrs['out_w'] = size_list[2]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_d'] = out_shape[0]
                    attrs['out_h'] = out_shape[1]
                    attrs['out_w'] = out_shape[2]

    else:
425 426
        if in_dygraph_mode() and isinstance(scale, Variable):
            scale = list(scale.numpy())
X
xiaoting 已提交
427 428 429 430 431 432
        if isinstance(scale, Variable):
            scale.stop_gradient = True
            inputs["Scale"] = scale
        elif isinstance(scale, float) or isinstance(scale, int):
            if scale <= 0:
                raise ValueError("Attr(scale) should be greater than zero.")
X
xiaoting 已提交
433 434 435 436
            scale_list = []
            for i in range(len(x.shape) - 2):
                scale_list.append(scale)
            attrs['scale'] = list(map(float, scale_list))
X
xiaoting 已提交
437
        elif isinstance(scale, list) or isinstance(scale, tuple):
X
xiaoting 已提交
438 439 440 441 442 443 444 445
            if len(scale) != len(x.shape) - 2:
                raise ValueError("scale_shape length should be {} for "
                                 "input {}-D tensor.".format(
                                     len(x.shape) - 2, len(x.shape)))
            for value in scale:
                if value <= 0:
                    raise ValueError("Attr(scale) should be greater than zero.")
            attrs['scale'] = list(map(float, scale))
X
xiaoting 已提交
446 447
        else:
            raise TypeError(
448 449
                "Attr(scale)'s type should be float, int, list, tuple, or Tensor."
            )
X
xiaoting 已提交
450 451 452 453 454 455 456 457 458

    if in_dygraph_mode():
        attr_list = []
        for k, v in attrs.items():
            attr_list.append(k)
            attr_list.append(v)
        dy_attr = tuple(attr_list)

        if resample_type == "linear":
W
wanghuancoder 已提交
459
            out = _C_ops.linear_interp_v2(x, *dy_attr)
460
        elif resample_type == "bilinear":
W
wanghuancoder 已提交
461
            out = _C_ops.bilinear_interp_v2(x, *dy_attr)
462
        elif resample_type == "trilinear":
W
wanghuancoder 已提交
463
            out = _C_ops.trilinear_interp_v2(x, *dy_attr)
464
        elif resample_type == "nearest":
W
wanghuancoder 已提交
465
            out = _C_ops.nearest_interp_v2(x, *dy_attr)
466
        elif resample_type == "bicubic":
W
wanghuancoder 已提交
467
            out = _C_ops.bicubic_interp_v2(x, *dy_attr)
X
xiaoting 已提交
468
        return out
X
xiaoting 已提交
469 470
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
X
xiaoting 已提交
471
        type='{}_interp_v2'.format(resample_type),
X
xiaoting 已提交
472 473 474 475
        inputs=inputs,
        outputs={"Out": out},
        attrs=attrs)
    return out
L
littletomatodonkey 已提交
476 477


X
xiaoting 已提交
478 479 480 481 482 483 484 485 486 487
def upsample(x,
             size=None,
             scale_factor=None,
             mode='nearest',
             align_corners=False,
             align_mode=0,
             data_format='NCHW',
             name=None):
    """
    This op resizes a batch of images.
488

X
xiaoting 已提交
489 490 491
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
492 493
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
X
xiaoting 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
    and the resizing only applies on the three dimensions(depth, height and width).

    Supporting resample methods:
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation
    Linear interpolation is the method of using a line connecting two known quantities 
    to determine the value of an unknown quantity between the two known quantities. 
    
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.
    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.
    
    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.
518

X
xiaoting 已提交
519 520 521
    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
522

X
xiaoting 已提交
523 524 525
    The linear interpolation is performed on three directions.
    align_corners and align_mode are optional parameters,the calculation method
    of interpolation can be selected by them.
526 527 528 529 530 531 532

    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

X
xiaoting 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
    Example:
    .. code-block:: text
        For scale_factor:
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
        Nearest neighbor interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
          else:
              align_corners = True
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
        
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
    https://en.wikipedia.org/wiki/Linear_interpolation.
    For details of linear interpolation, please refer to Wikipedia:
    
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
    
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
    
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
    
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
    
    Parameters:
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
623
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
624
             If a Tensor , its dimensions size should be a 1.
625 626 627 628
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if 
             it is either a list or a tuple or a Tensor.
X
xiaoting 已提交
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
             Default: None.
        mode (str): The resample method. It supports 'linear', 'nearest', 'bilinear',
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
                               corner pixels.
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
    Raises:
        TypeError: size should be a list or tuple or Tensor.
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
                    'trilinear', 'bicubic', or 'nearest' currently.
        ValueError: 'linear' only support 3-D tensor.
        ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
        Examples:
        .. code-block:: python
            import paddle
            import numpy as np
            import paddle.nn.functional as F

X
xiaoting 已提交
672
            input_data = np.random.rand(2,3,6,10).astype("float32")
X
xiaoting 已提交
673
            input = paddle.to_tensor(input_data)
X
xiaoting 已提交
674
            output = F.upsample(x=input, size=[12,12])
X
xiaoting 已提交
675 676 677 678 679 680 681 682
            print(output.shape)
            # [2L, 3L, 12L, 12L]

    """
    return interpolate(x, size, scale_factor, mode, align_corners, align_mode,
                       data_format)


683 684 685 686
def bilinear(x1, x2, weight, bias=None, name=None):
    """

    This layer performs bilinear on two inputs.
687
    See :ref:`api_nn_Bilinear` for details and output shape.
688 689 690 691 692 693 694 695 696 697

    Parameters:
       x1 (Tensor): the first input tensor, it's data type should be float32, float64.
       x2 (Tensor): the second input tensor, it's data type should be float32, float64.
       weight (Parameter): The learnable weights of this layer, shape is [out_features, in1_features, in2_features].
       bias (Parameter, optional): The learnable bias(Bias) of this layer, shape is [1, out_features]. If it is set to None, no bias will be added to the output units. The default value is None.
       name (str, optional): The default value is None. Normally there is no need for user
           to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Returns:
698
       Tensor: A 2-D Tensor of shape [batch_size, out_features].
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716

    Examples:
       .. code-block:: python

        import paddle
        import numpy
        import paddle.nn.functional as F

        x1 = numpy.random.random((5, 5)).astype('float32')
        x2 = numpy.random.random((5, 4)).astype('float32')
        w = numpy.random.random((1000, 5, 4)).astype('float32')
        b = numpy.random.random((1, 1000)).astype('float32')

        result = F.bilinear(paddle.to_tensor(x1), paddle.to_tensor(x2), paddle.to_tensor(w), paddle.to_tensor(b))           # result shape [5, 1000]

    """

    if in_dygraph_mode():
W
wanghuancoder 已提交
717
        return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734

    check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
    check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')

    inputs = {"X": x1, "Y": x2, "Weight": weight}
    if bias is not None:
        inputs["Bias"] = bias

    helper = LayerHelper("bilinear", **locals())
    out = helper.create_variable_for_type_inference(dtype=x1.dtype)

    helper.append_op(
        type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})

    return out


735 736 737 738 739 740 741 742 743 744 745 746 747 748
def dropout(x,
            p=0.5,
            axis=None,
            training=True,
            mode="upscale_in_train",
            name=None):
    """
    Dropout is a regularization technique for reducing overfitting by preventing
    neuron co-adaption during training. The dropout operator randomly sets the
    outputs of some units to zero, while upscale others according to the given
    dropout probability.

    Args:
        x (Tensor): The input tensor. The data type is float32 or float64.
749 750
        p (float|int): Probability of setting units to zero. Default 0.5.
        axis (int|list|tuple): The axis along which the dropout is performed. Default None.
751
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
752
        mode(str): ['upscale_in_train'(default) | 'downscale_in_infer'].
753 754 755 756 757 758 759 760 761 762

                           1. upscale_in_train(default), upscale the output at training time

                              - train: out = input * mask / ( 1.0 - dropout_prob )
                              - inference: out = input

                           2. downscale_in_infer, downscale the output at inference

                              - train: out = input * mask
                              - inference: out = input * (1.0 - dropout_prob)
763
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
764 765 766 767

    Returns:
        A Tensor representing the dropout, has same shape and data type as `x` .

768

769 770
    Examples:
        We use ``p=0.5`` in the following description for simplicity.
771

772
        1. When ``axis=None`` , this is commonly used dropout, which dropout each element of x randomly.
773 774 775

        ..  code-block:: text

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
            Let's see a simple case when x is a 2d tensor with shape 2*3:
            [[1 2 3]
             [4 5 6]]
            we generate mask with the same shape as x, which is 2*3. The value of mask is
            sampled from a Bernoulli distribution randomly. For example, we may get such mask:
            [[0 1 0]
             [1 0 1]]
            So the output is obtained from elementwise multiply of x and mask:
            [[0 2 0]
             [4 0 6]]
            Using default setting, i.e. ``mode='upscale_in_train'`` ,
            if in training phase, the final upscale output is:
            [[0 4 0 ]
             [8 0 12]]
            if in test phase, the output is the same as input:
            [[1 2 3]
             [4 5 6]]
            we can also set ``mode='downscale_in_infer'`` , then
            if in training phase, the final output is:
            [[0 2 0]
             [4 0 6]]
            if in test phase, the scale output is:
            [[0.5 1.  1.5]
             [2.  2.5 3. ]]

801 802


803
        2. When ``axis!=None`` , this is useful for dropping whole channels from an image or sequence.
804 805 806

        ..  code-block:: text

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
            Let's see the simple case when x is a 2d tensor with shape 2*3 again:
            [[1 2 3]
             [4 5 6]]
            (1) If ``axis=0`` , this means the dropout is only performed in axis `0` .
                we generate mask with the shape 2*1. Only in axis `0` the value is randomly selected.
                For example, we may get such mask:
                [[1]
                 [0]]
                The output is obtained from elementwise multiply of x and mask. Doing that the mask will be
                broadcast from 2*1 to 2*3:
                [[1 1 1]
                 [0 0 0]]
                and the result after elementwise multiply is:
                [[1 2 3]
                 [0 0 0]]
                then we can do upscale or downscale according to the setting of other arguments.
            (2) If ``axis=1`` , this means the dropout is only performed in axis `1` .
                we generate mask with the shape 1*3. Only in axis `1` the value is randomly selected.
                For example, we may get such mask:
                [[1 0 1]]
                Doing elementwise multiply the mask will be broadcast from 1*3 to 2*3:
                [[1 0 1]
                 [1 0 1]]
                and the result after elementwise multiply is:
                [[1 0 3]
                 [4 0 6]]
            (3) What about ``axis=[0, 1]`` ? This means the dropout is performed in all axes of x,
                which is the same case as default setting ``axis=None`` .
835
            (4) You may note that logically `axis=None` means the dropout is performed in none axis of x,
836 837 838 839 840 841 842 843 844 845
                We generate mask with the shape 1*1. Whole input is randomly selected or dropped.
                For example, we may get such mask:
                [[0]]
                Doing elementwise multiply the mask will be broadcast from 1*1 to 2*3:
                [[0 0 0]
                 [0 0 0]]
                and the result after elementwise multiply is:
                [[0 0 0]
                 [0 0 0]]
                Actually this is not what we want because all elements may set to zero~
846 847 848

        When x is a 4d tensor with shape `NCHW`, we can set ``axis=[0,1]`` and the dropout will be performed in channel `N` and `C`, `H` and `W` is tied, i.e. paddle.nn.dropout(x, p, axis=[0,1]) . Please refer to ``paddle.nn.functional.dropout2d`` for more details.
        Similarly, when x is a 5d tensor with shape `NCDHW`, we can set ``axis=[0,1]`` to perform dropout3d. Please refer to ``paddle.nn.functional.dropout3d`` for more details.
849 850

        .. code-block:: python
851

852 853 854 855 856 857 858 859 860 861
            import paddle
            import numpy as np

            x = np.array([[1,2,3], [4,5,6]]).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout(x, 0.5)
            y_test = paddle.nn.functional.dropout(x, 0.5, training=False) 
            y_0 = paddle.nn.functional.dropout(x, axis=0)
            y_1 = paddle.nn.functional.dropout(x, axis=1)
            y_01 = paddle.nn.functional.dropout(x, axis=[0,1])
862 863 864 865 866 867
            print(x)
            print(y_train)
            print(y_test)
            print(y_0)
            print(y_1)
            print(y_01)
868 869

    """
870 871 872 873
    # fast return for p == 0
    if p == 0:
        return x

874 875 876 877 878 879 880
    if not isinstance(p, (float, int)):
        raise TypeError("p argument should be a number")
    if p < 0 or p > 1:
        raise ValueError("p argument should between 0 and 1")
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'")
881
    if axis and not isinstance(axis, (int, list, tuple)):
882 883 884 885 886 887 888 889 890
        raise TypeError("datatype of axis argument should be int or list")

    if axis == None:  # commonly used dropout
        seed = None
        mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

        if in_dygraph_mode():
            if default_main_program().random_seed != 0:
                seed = default_main_program().random_seed
W
wanghuancoder 已提交
891
            out, mask = _C_ops.dropout(
892 893 894 895 896 897 898 899 900 901 902 903 904
                x, 'dropout_prob', p, 'is_test', not training, 'fix_seed',
                seed is not None, 'seed', seed
                if seed is not None else 0, 'dropout_implementation', mode)
            return out

        helper = LayerHelper('dropout', **locals())
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'dropout')

        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        mask = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)

905 906 907 908 909 910 911 912 913 914 915 916
        def get_attrs(prog, dropout_prob, is_test, seed):
            if (seed is None or seed == 0) and prog.random_seed != 0:
                seed = prog.random_seed
            attrs = {
                'dropout_prob': dropout_prob,
                'is_test': is_test,
                'fix_seed': seed is not None,
                'seed': seed if seed is not None else 0,
                'dropout_implementation': mode,
            }
            return attrs

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
        attrs = get_attrs(helper.main_program, p, not training, seed)

        helper.append_op(
            type='dropout',
            inputs={'X': [x]},
            outputs={'Out': [out],
                     'Mask': [mask]},
            attrs=attrs)
        return out
    else:  #sometimes called dropout_nd #TODO: optimize with c++
        if not in_dygraph_mode():
            check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'dropout')
        dtype = x.dtype
        keep_prob = 1 - p
        if training:
            if p == 1.:
933
                return paddle.scale(x, scale=0.)
934

935
            scale_input = paddle.scale(
936 937 938 939
                x, scale=1 / keep_prob) if mode == 'upscale_in_train' else x

            #get mask shape
            input_shape = x.shape
940
            drop_axes = [axis] if isinstance(axis, int) else list(axis)
941 942
            if min(drop_axes) < 0 or max(drop_axes) > len(input_shape) - 1:
                raise ValueError("axis value should be greater than or equal to 0 and less than dimensions of x:{}, but get axis value:{} " \
943 944 945
                                 .format(len(input_shape), max(drop_axes)))
            if len(drop_axes) > len(input_shape):
                raise ValueError(
946
                    "length of axis should not be greater than dimensions of x:{}, but get length of axis: {}".
947 948 949 950 951 952
                    format(len(input_shape), len(drop_axes)))
            mask_shape = [1] * len(input_shape)
            for i in drop_axes:
                mask_shape[i] = input_shape[i]

            #get mask
953
            random_tensor = paddle.uniform(
954 955
                mask_shape, dtype='float32', min=0., max=1.0)
            p = layers.fill_constant(shape=[1], dtype='float32', value=p)
956
            keep_mask = paddle.greater_equal(random_tensor, p)
957

958 959
            scale_input = paddle.cast(scale_input, dtype)
            keep_mask = paddle.cast(keep_mask, dtype)
960 961 962
            ret = paddle.multiply(scale_input, keep_mask, name=name)
            return ret
        else:  # test
963
            ret = paddle.scale(
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
                x, scale=keep_prob) if mode == 'downscale_in_infer' else x
            return ret


def dropout2d(x, p=0.5, training=True, data_format='NCHW', name=None):
    """
    Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
    a channel is a 2D feature map with the shape `HW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.

    See ``paddle.nn.functional.dropout`` for more details.

    Args:
        x (Tensor):  The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C].
                     The data type is float32 or float64.
        p (float): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
981
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCHW` or `NHWC` . The default is `NCHW` . When it is `NCHW` , the data is stored in the order of: [batch_size, input_channels, input_height, input_width].
982 983 984 985 986
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout2d, has same shape and data type as `x` .

987

988 989
    Examples:
        .. code-block:: python
990

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout2d(x)  #train
            y_test = paddle.nn.functional.dropout2d(x, training=False) #test
            for i in range(2):
                for j in range(3):
                    print(x.numpy()[i,j,:,:])
                    print(y_train.numpy()[i,j,:,:]) # may all 0
                    print(y_test.numpy()[i,j,:,:])
    """
    input_shape = x.shape
    if len(input_shape) != 4:
        raise ValueError("dimensions of x should be 4, but received {} != 4"\
        .format(len(input_shape)))

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    return dropout(
        x,
        p=p,
        axis=[0, 1] if data_format == 'NCHW' else [0, 3],
        training=training,
        mode="upscale_in_train",
        name=name)


def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None):
    """
    Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
    a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.

    See ``paddle.nn.functional.dropout`` for more details.

    Args:
        x (Tensor):  The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C].
                     The data type is float32 or float64.
        p (float): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
1036
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from ``NCDHW`` or ``NDHWC``. The default is ``NCDHW`` . When it is ``NCDHW`` , the data is stored in the order of: [batch_size, input_channels, input_depth, input_height, input_width].
1037 1038 1039 1040 1041
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout3d, has same shape and data type with `x` .

1042

1043 1044
    Examples:
        .. code-block:: python
1045

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout3d(x)  #train
            y_test = paddle.nn.functional.dropout3d(x, training=False) #test
            print(x.numpy()[0,0,:,:,:])
            print(y_train.numpy()[0,0,:,:,:]) # may all 0
            print(y_test.numpy()[0,0,:,:,:])
    """

    input_shape = x.shape
    if len(input_shape) != 5:
        raise ValueError("dimensions of x should be 5, but received {} != 5" \
        .format(len(input_shape)))

    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    return dropout(
        x,
        p=p,
        axis=[0, 1] if data_format == 'NCDHW' else [0, 4],
        training=training,
        mode="upscale_in_train",
        name=name)


1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
def alpha_dropout(x, p=0.5, training=True, name=None):
    """
    Alpha Dropout is a type of Dropout that maintains the self-normalizing property.
    For an input with zero mean and unit standard deviation, the output of Alpha Dropout
    maintains the original mean and standard deviation of the input.
    Alpha Dropout fits well to SELU activate function by randomly setting activations to the negative saturation value.

    Args:
        x (Tensor): The input tensor. The data type is float32 or float64.
        p (float | int): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout, has same shape and data type as `x`.

    Examples:
        .. code-block:: python
1095

1096 1097 1098 1099 1100 1101 1102
            import paddle
            import numpy as np

            x = np.array([[-1, 1], [-1, 1]]).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.alpha_dropout(x, 0.5)
            y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False)
1103 1104
            print(x)
            print(y_train)
1105
            # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
1106
            print(y_test)
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
    """
    if not isinstance(p, (float, int)):
        raise TypeError("p argument should be a float or int")
    if p < 0 or p > 1:
        raise ValueError("p argument should between 0 and 1")

    if not in_dygraph_mode():
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'alpha_dropout')

    if training:
1118
        if p == 1:
1119
            return paddle.scale(x, scale=0.)
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
        #get transformation params
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        alpha_p = -alpha * scale
        a = ((1 - p) * (1 + p * alpha_p**2))**-0.5
        b = -a * alpha_p * p

        dtype = x.dtype
        input_shape = x.shape

        #get mask
1131
        random_tensor = paddle.uniform(
1132 1133
            input_shape, dtype='float32', min=0., max=1.0)
        p = layers.fill_constant(shape=[1], dtype='float32', value=p)
1134 1135 1136
        keep_mask = paddle.greater_equal(random_tensor, p)
        keep_mask = paddle.cast(keep_mask, dtype)
        drop_mask = paddle.subtract(
1137 1138 1139 1140 1141 1142
            layers.fill_constant(
                shape=input_shape, dtype=dtype, value=1.),
            keep_mask)

        #apply mask
        b = layers.fill_constant(shape=[1], dtype=dtype, value=b)
1143 1144 1145 1146
        y = paddle.add(paddle.multiply(x, keep_mask),
                       paddle.scale(
                           drop_mask, scale=alpha_p))
        res = paddle.add(paddle.scale(y, scale=a), b, name=name)
1147 1148 1149 1150 1151
        return res
    else:  # test
        return x


L
littletomatodonkey 已提交
1152 1153 1154
def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
    """
    Pad tensor according to 'pad' and 'mode'.
L
littletomatodonkey 已提交
1155 1156 1157
    If mode is 'constant' and length of pad is twice as length of x dimension,
    then the padding will be started from the first dimension and moved back onto x
    according to 'pad' and 'value'.
L
littletomatodonkey 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
    If mode is 'reflect', pad[0] and pad[1] must be no greater
    than width-1. The height and depth dimension has the same condition.

    Parameters:
        x (Tensor): The input tensor with data type float32/double/int32/int64_t.
        pad (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. 1. If input dimension is 3, then the pad has the form (pad_left,
            pad_right). 2. If the input dimension is 4, then the pad has the form (pad_left, pad_right, 
            pad_top, pad_bottom). 3. If the input dimension is 5, then the pad has the form 
            (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
            
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'
        value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
        data_format (str): An string from: "NCL", "NLC", NHWC", "NCHW", "NCDHW", "NDHWC". Specify the data format of
           the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
                    
    Returns: a Tensor padded according to pad and mode and data type is same as input.
    Return Type: Tensor

    Examples:
        .. code-block:: text

            x = [[[[[1., 2., 3.],
                    [4., 5., 6.]]]]]

            Case 0:
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'constant'
                value = 0
                Out = [[[[[0. 0. 0. 0. 0. 0. 0.]
                          [0. 0. 1. 2. 3. 0. 0.]
                          [0. 0. 4. 5. 6. 0. 0.]
                          [0. 0. 0. 0. 0. 0. 0.]]]]]

            Case 1:
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'reflect'
                Out = [[[[[6. 5. 4. 5. 6. 5. 4.]
                          [3. 2. 1. 2. 3. 2. 1.]
                          [6. 5. 4. 5. 6. 5. 4.]
                          [3. 2. 1. 2. 3. 2. 1.]]]]]

            Case 2:
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'replicate'
                Out = [[[[[1. 1. 1. 2. 3. 3. 3.]
                          [1. 1. 1. 2. 3. 3. 3.]
                          [4. 4. 4. 5. 6. 6. 6.]
                          [4. 4. 4. 5. 6. 6. 6.]]]]]

            Case 3:
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'circular'
                Out = [[[[[5. 6. 4. 5. 6. 4. 5.]
                          [2. 3. 1. 2. 3. 1. 2.]
                          [5. 6. 4. 5. 6. 4. 5.]
                          [2. 3. 1. 2. 3. 1. 2.]]]]]

    Code Examples:
        .. code-block:: python
L
littletomatodonkey 已提交
1226

L
littletomatodonkey 已提交
1227 1228 1229 1230 1231 1232
            import numpy as np
            import paddle
            import paddle.nn.functional as F
            
            # example 1
            x_shape = (1, 1, 3)
L
littletomatodonkey 已提交
1233 1234 1235
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.pad(x, [2, 3], value=1, mode='constant', data_format="NCL")
            print(y)
L
littletomatodonkey 已提交
1236
            # [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
1237
            
L
littletomatodonkey 已提交
1238 1239
            # example 2
            x_shape = (1, 1, 2, 3)
L
littletomatodonkey 已提交
1240 1241 1242
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.pad(x, [1, 2, 1, 1], value=1, mode='circular')
            print(y)
L
littletomatodonkey 已提交
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
            # [[[[6. 4. 5. 6. 4. 5.]
            #    [3. 1. 2. 3. 1. 2.]
            #    [6. 4. 5. 6. 4. 5.]
            #    [3. 1. 2. 3. 1. 2.]]]]
    """
    assert mode in ['reflect', 'replicate', 'constant', 'circular'], \
            "mode should be one of constant, reflect, replicate, circular, but got {}.".format(mode)

    data_format = data_format.upper()
    assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], \
        "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \
        "but got {}".format(data_format)

    x_dim = len(x.shape)

L
littletomatodonkey 已提交
1258 1259 1260
    if mode == "constant" and isinstance(pad, list) and len(pad) == x_dim * 2:
        return layers.pad(x, pad, pad_value=value)

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
    assert x_dim in [
        3, 4, 5
    ], "input tesor dimension must be in [3, 4, 5] but got {}".format(x_dim)

    supported_format_map = {
        3: ["NCL", "NLC"],
        4: ["NCHW", "NHWC"],
        5: ["NCDHW", "NDHWC"],
    }
    assert data_format in supported_format_map[x_dim], \
    "input tensor dimension is {}, it's data format should be in {} but got {}".format(
        x_dim, supported_format_map[x_dim], data_format)

L
littletomatodonkey 已提交
1274 1275 1276 1277 1278 1279 1280 1281
    unsqueezed_dim = []

    if isinstance(pad, Variable):
        if data_format in ["NCL", "NCHW", "NCDHW"]:
            data_format = "NCDHW"
            if x_dim == 3:
                pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
                unsqueezed_dim = [3, 4]
1282
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1283 1284 1285
            elif x_dim == 4:
                pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
                unsqueezed_dim = [2]
1286
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1287 1288 1289 1290 1291
        elif data_format in ["NLC", "NHWC", "NDHWC"]:
            data_format = "NDHWC"
            if x_dim == 3:
                pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
                unsqueezed_dim = [2, 3]
1292
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1293 1294 1295
            elif x_dim == 4:
                pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
                unsqueezed_dim = [1]
1296
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1297 1298 1299 1300 1301 1302
    else:
        if data_format in ["NCL", "NCHW", "NCDHW"]:
            data_format = "NCDHW"
            if x_dim == 3:
                pad = [0, 0, 0, 0] + pad
                unsqueezed_dim = [3, 4]
1303
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1304 1305 1306
            elif x_dim == 4:
                pad = pad + [0, 0]
                unsqueezed_dim = [2]
1307
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1308 1309 1310 1311 1312
        elif data_format in ["NLC", "NHWC", "NDHWC"]:
            data_format = "NDHWC"
            if x_dim == 3:
                pad = [0, 0, 0, 0] + pad
                unsqueezed_dim = [2, 3]
1313
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1314 1315 1316
            elif x_dim == 4:
                pad = pad + [0, 0]
                unsqueezed_dim = [1]
1317
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1318 1319 1320 1321

    if in_dygraph_mode():
        if isinstance(pad, Variable):
            pad = pad.numpy()
W
wanghuancoder 已提交
1322 1323
        out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
                           "data_format", data_format, "name", name)
L
littletomatodonkey 已提交
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
    else:
        attrs = {'mode': mode, 'value': value, 'data_format': data_format}
        inputs = {'X': [x]}
        if isinstance(pad, Variable):
            inputs['Paddings'] = [pad]
            attrs['paddings'] = []
        else:
            attrs['paddings'] = pad

        helper = LayerHelper('pad3d', **locals())

        dtype = helper.input_dtype(input_param_name='input')
        out = helper.create_variable_for_type_inference(dtype)
        helper.append_op(
            type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs)

    if len(unsqueezed_dim) != 0:
1341
        out = squeeze(out, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1342 1343 1344 1345

    return out


Y
Yang Zhang 已提交
1346
def cosine_similarity(x1, x2, axis=1, eps=1e-8):
L
littletomatodonkey 已提交
1347
    """
Y
Yang Zhang 已提交
1348
    Compute cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1349 1350 1351 1352

    Parameters:
        x1 (Tensor): First input. float32/double.
        x2 (Tensor): Second input. float32/double.
Y
Yang Zhang 已提交
1353
        axis (int): Dimension of vectors to compute cosine similarity. Default is 1.
L
littletomatodonkey 已提交
1354 1355
        eps(float): Small value to avoid division by zero. Default is 1e-8.
                    
Y
Yang Zhang 已提交
1356
    Returns: a Tensor representing cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1357 1358 1359 1360
    Return Type: Tensor

    Examples:
        .. code-block:: text
1361

L
littletomatodonkey 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370
            Case 0:
                x1 = [[0.8024077  0.9927354  0.27238318 0.8344984 ]
                     [0.48949873 0.5797396  0.65444374 0.66510963]
                     [0.1031398  0.9614342  0.08365563 0.6796464 ]
                     [0.10760343 0.7461209  0.7726148  0.5801006 ]]
                x2 = [[0.62913156 0.1536727  0.9847992  0.04591406]
                     [0.9098952  0.15715368 0.8671125  0.3156102 ]
                     [0.4427798  0.54136837 0.5276275  0.32394758]
                     [0.3769419  0.8535014  0.48041078 0.9256797 ]]
Y
Yang Zhang 已提交
1371
                axis = 1
L
littletomatodonkey 已提交
1372 1373 1374 1375 1376
                eps = 1e-8
                Out: [0.5275037  0.8368967  0.75037485 0.9245899]

    Code Examples:
        .. code-block:: python
1377

L
littletomatodonkey 已提交
1378 1379 1380 1381 1382 1383 1384 1385 1386
            import paddle
            import paddle.nn as nn
            import numpy as np

            np.random.seed(0)
            x1 = np.random.rand(2,3)
            x2 = np.random.rand(2,3)
            x1 = paddle.to_tensor(x1)
            x2 = paddle.to_tensor(x2)
Y
Yang Zhang 已提交
1387
            result = paddle.nn.functional.cosine_similarity(x1, x2, axis=0)
L
littletomatodonkey 已提交
1388
            print(result)
L
littletomatodonkey 已提交
1389 1390 1391
            # [0.99806249 0.9817672  0.94987036]
            
    """
1392 1393 1394
    w12 = sum(paddle.multiply(x1, x2), axis=axis)
    w1 = sum(paddle.multiply(x1, x1), axis=axis)
    w2 = sum(paddle.multiply(x2, x2), axis=axis)
Y
Yang Zhang 已提交
1395
    n12 = sqrt(clip(w1 * w2, min=eps * eps))
L
littletomatodonkey 已提交
1396 1397
    cos_sim = w12 / n12
    return cos_sim
1398 1399 1400


def linear(x, weight, bias=None, name=None):
1401
    r"""
1402

1403 1404
    Fully-connected linear transformation operator. For each input :math:`X` ,
    the equation is:
1405 1406 1407

    .. math::

1408
        Out = XW + b
1409

1410
    where :math:`W` is the weight and :math:`b` is the bias.
1411

1412 1413 1414 1415 1416 1417 1418
    If the weight is a 2-D tensor of shape :math:`[in\_features, out\_features]` ,
    input should be a multi-dimensional tensor of shape
    :math:`[batch\_size, *, in\_features]` , where :math:`*` means any number of
    additional dimensions. The linear operator multiplies input tensor with
    weight and produces an output tensor of shape :math:`[batch\_size, *, out\_features]` , 
    If :math:`bias` is not None, the bias should be a 1-D tensor of shape
    :math:`[out\_features]` and will be added to the output.
1419

1420 1421 1422 1423 1424 1425 1426
    Parameters:
        x (Tensor): Input tensor. The data type should be float16, float32 or float64.
        weight (Tensor): Weight tensor. The data type should be float16, float32 or float64.
        bias (Tensor, optional): Bias tensor. The data type should be float16, float32 or float64.
                                 If it is set to None, no bias will be added to the output units.
        name (str, optional): Normally there is no need for user to set this parameter.
                              For detailed information, please refer to :ref:`api_guide_Name` .
1427 1428

    Returns:
1429 1430
        Tensor, the shape is :math:`[batch\_size, *, out\_features]` and the
        data type is the same with input :math:`x` .
1431 1432 1433 1434 1435 1436

    Examples:
        .. code-block:: python
          
          import paddle
          
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
          x = paddle.randn((3, 2), dtype="float32")
          # x: [[-0.32342386 -1.200079  ]
          #     [ 0.7979031  -0.90978354]
          #     [ 0.40597573  1.8095392 ]]
          weight = paddle.full(shape=[2, 4], fill_value="0.5", dtype="float32", name="weight")
          # weight: [[0.5 0.5 0.5 0.5]
          #          [0.5 0.5 0.5 0.5]]
          bias = paddle.ones(shape=[4], dtype="float32", name="bias")
          # bias: [1. 1. 1. 1.]
          y = paddle.nn.functional.linear(x, weight, bias)
          # y: [[0.23824859 0.23824859 0.23824859 0.23824859]
          #     [0.9440598  0.9440598  0.9440598  0.9440598 ]
          #     [2.1077576  2.1077576  2.1077576  2.1077576 ]]
1450 1451
    """
    if in_dygraph_mode():
1452
        pre_bias = _varbase_creator(dtype=x.dtype)
W
wanghuancoder 已提交
1453 1454
        _C_ops.matmul(x, weight, pre_bias, 'transpose_X', False, 'transpose_Y',
                      False, "alpha", 1)
1455 1456 1457 1458

        if bias is None:
            return pre_bias

W
wanghuancoder 已提交
1459
        return _C_ops.elementwise_add(pre_bias, bias)
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
    else:
        helper = LayerHelper('linear', **locals())
        dtype = x.dtype

        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'linear')
        check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')

        inputs = {'X': [x], 'Y': [weight]}
        attrs = {
            'transpose_X': False,
            'transpose_Y': False,
            'alpha': 1,
        }
        tmp = helper.create_variable_for_type_inference(dtype)
        helper.append_op(
            type='matmul', inputs=inputs, outputs={'Out': tmp}, attrs=attrs)
        if bias is not None:
            res = helper.create_variable_for_type_inference(dtype)
            helper.append_op(
                type='elementwise_add',
                inputs={'X': [tmp],
                        'Y': [bias]},
                outputs={'Out': [res]},
                attrs={'axis': len(x.shape) - 1})
        else:
            res = tmp
        return res
1488 1489 1490


def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
1491
    r"""
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
    Label smoothing is a mechanism to regularize the classifier layer and is called
    label-smoothing regularization (LSR).

    Label smoothing is proposed to encourage the model to be less confident,
    since optimizing the log-likelihood of the correct label directly may
    cause overfitting and reduce the ability of the model to adapt. Label
    smoothing replaces the ground-truth label :math:`y` with the weighted sum
    of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
    i.e.

    .. math::

        \\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,

    where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
    respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
    uniform distribution is used for :math:`\mu`.

    See more details about label smoothing in https://arxiv.org/abs/1512.00567.

    Parameters:
        label(Tensor): The input variable containing the label data. The
                        label data should use one-hot representation. It's
                        a multidimensional tensor with a shape of
                        :math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64".
        prior_dist(Tensor, optional): The prior distribution to be used to smooth
                        labels. If not provided, an uniform distribution
                        is used. It's a multidimensional tensor with a shape of
                        :math:`[1, class\_num]` . The default value is None.
        epsilon(float, optional): The weight used to mix up the original ground-truth
                        distribution and the fixed distribution. The default value is
                        0.1.
        name(str, optional): The default value is None. Normally there is no need for user
                        to set this property. For more information, please refer to
                        :ref:`api_guide_Name`.

    Returns:
        Tensor: The tensor containing the smoothed labels.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np
            
            x_data = np.array([[[0, 1, 0],
                                [ 1,  0, 1]]]).astype("float32")
            print(x_data.shape)
            paddle.disable_static()
            x = paddle.to_tensor(x_data, stop_gradient=False)
            output = paddle.nn.functional.label_smooth(x)
1543
            print(output)
1544 1545 1546 1547 1548 1549 1550 1551
            
            #[[[0.03333334 0.93333334 0.03333334]
            #  [0.93333334 0.03333334 0.93333334]]]
    """
    if epsilon > 1. or epsilon < 0.:
        raise ValueError("The value of epsilon must be between 0 and 1.")

    if in_dygraph_mode():
W
wanghuancoder 已提交
1552
        return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon))
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566

    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'label_smooth')

    helper = LayerHelper("label_smooth", **locals())
    label.stop_gradient = True
    smooth_label = helper.create_variable_for_type_inference(label.dtype)
    helper.append_op(
        type="label_smooth",
        inputs={"X": label,
                "PriorDist": prior_dist} if prior_dist else {"X": label},
        outputs={"Out": smooth_label},
        attrs={"epsilon": float(epsilon)})
    return smooth_label
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719


def class_center_sample(label, num_classes, num_samples, group=None, seed=None):
    """
    Class center sample method is proposed from the paper PartialFC that only sample a subset of the class centers.
    The process of sampling subset class centers is straightforward: 

    1. First select the positive class centers;
    2. Then randomly sample negative class centers.

    Specifically, given a label tensor, shape [batch_size], select all the positive class centers and randomly 
    sample negative class centers, then remap the input label tensor using the sampled class centers.

    For more information, Partial FC: Training 10 Million Identities on a Single Machine
    arxiv: https://arxiv.org/abs/2010.05222
    
    .. hint::
        If the number of the positive class centers is greater than the input num_samples, it keeps all the positive 
        class centers and the shape of sampled_class_center will be [num_positive_class_centers].
    
        The API supports CPU, single GPU and multi GPU.

    Args:
    	label (Tensor): 1-D tensor with shape [N], each label in [0, num_classes)
    	num_classes (int): A positive integer to specify the number of classes at local rank.
            Note that num_classes of each GPU can be different.
    	num_samples (int): A positive integer to specify the number of class center to sample.
        group (Group, optional): The abstract representation of group.
            See paddle.distributed.collective.Group. Default is ``None``.
        seed (int, optional): Random seed. Default is ``None``.

    Returns:
        Tuple of two ``Tensor`` : (remapped_label, sampled_class_center), remapped label using sampled class center,
        sampled class center from [0, num_classes).

    Examples:

    .. code-block:: python

        # CPU or single GPU
        import paddle
        num_classes = 20
        batch_size = 10
        num_samples = 6
        label = paddle.randint(low=0, high=num_classes, shape=[batch_size], dtype='int64')
        remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes, num_samples)

        print(label)
        print(remapped_label)
        print(sampled_class_index)

        # the output is
        #Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [11, 5 , 1 , 3 , 12, 2 , 15, 19, 18, 19])
        #Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [4, 3, 0, 2, 5, 1, 6, 8, 7, 8])
        #Tensor(shape=[9], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [1 , 2 , 3 , 5 , 11, 12, 15, 18, 19])

    .. code-block:: python

        # required: distributed
        # Multi GPU, test_class_center_sample.py
        import paddle
        import paddle.distributed as dist
        strategy = dist.fleet.DistributedStrategy()
        dist.fleet.init(is_collective=True, strategy=strategy)
        batch_size = 10
        num_samples = 6
        rank_id = dist.get_rank()
        # num_classes of each GPU can be different, e.g num_classes_list = [10, 8]
        num_classes_list = [10, 10]
        num_classes = paddle.sum(paddle.to_tensor(num_classes_list))
        label = paddle.randint(low=0, high=num_classes.item(), shape=[batch_size], dtype='int64')
        label_list = []
        dist.all_gather(label_list, label)
        label = paddle.concat(label_list, axis=0)
        remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes_list[rank_id], num_samples)

        print(label)
        print(remapped_label)
        print(sampled_class_index)

        #python -m paddle.distributed.launch --gpus=0,1 test_class_center_sample.py
        # rank 0 output:
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
        #Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [0, 2, 4, 8, 9, 3])
        
        # rank 1 output:
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
        #Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [0, 1, 2, 3, 5, 7, 8])
    """
    if group is not None and not group.is_member():
        return

    ring_id = 0 if group is None else group.id
    rank = 0
    nranks = 1
    if core.is_compiled_with_dist():
        parallel_env = paddle.distributed.ParallelEnv()
        global_rank = parallel_env.rank
        rank = global_rank if group is None else group.get_group_rank(
            global_rank)
        nranks = parallel_env.world_size if group is None else group.nranks

    if num_samples > num_classes:
        raise ValueError(
            'Expected num_samples less than or equal to {}, got num_samples {}'.
            format(num_classes, num_samples))

    if (seed is None or seed == 0) and default_main_program().random_seed != 0:
        seed = default_main_program().random_seed

    if in_dygraph_mode():
        remapped_label, sampled_class_center = core.ops.class_center_sample(
            label, 'num_classes', num_classes, 'num_samples', num_samples,
            'ring_id', ring_id, 'nranks', nranks, 'rank', rank, 'fix_seed',
            seed is not None, 'seed', seed if seed is not None else 0)
        return remapped_label, sampled_class_center

    check_variable_and_dtype(label, 'label', ['int64', 'int32'],
                             'class_center_sample')
    op_type = 'class_center_sample'
    helper = LayerHelper(op_type, **locals())
    remapped_label = helper.create_variable_for_type_inference(
        dtype=label.dtype)
    sampled_class_center = helper.create_variable_for_type_inference(
        dtype=label.dtype)
    helper.append_op(
        type=op_type,
        inputs={'Label': label},
        outputs={
            'RemappedLabel': remapped_label,
            'SampledLocalClassCenter': sampled_class_center
        },
        attrs={
            'num_classes': num_classes,
            'num_samples': num_samples,
            'ring_id': ring_id,
            'nranks': nranks,
            'rank': rank,
            'fix_seed': seed is not None,
            'seed': seed if seed is not None else 0
        })
    return remapped_label, sampled_class_center