common.py 84.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
xiaoting 已提交
15
import warnings
16
import paddle
X
xiaoting 已提交
17
from paddle.fluid.layer_helper import LayerHelper
18 19 20 21
from paddle.fluid.layers.tensor import fill_constant
from ...tensor import concat
from ...tensor.creation import zeros
from paddle.static import Variable
22
from ...fluid import dygraph_utils
23
# TODO: define the common functions to build a neural network  
Z
zhiboniu 已提交
24
from ...fluid.layers import unfold  # noqa: F401
25 26
from ...tensor.manipulation import squeeze
from ...tensor.manipulation import unsqueeze
Y
Yang Zhang 已提交
27 28 29
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
30
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
H
hong 已提交
31
from ...fluid.framework import _varbase_creator, _in_legacy_dygraph, in_dygraph_mode, _non_static_mode
X
xiaoting 已提交
32

Z
zhiboniu 已提交
33 34
from ...fluid import dygraph_utils
from ...fluid import layers
35
from ...fluid.data_feeder import check_variable_and_dtype
Z
zhiboniu 已提交
36

W
wanghuancoder 已提交
37
from paddle import _C_ops
Z
zhiboniu 已提交
38 39 40
from paddle.framework import in_dynamic_mode
from paddle.tensor.creation import full
from paddle.framework import core
41
from paddle.fluid.framework import _in_legacy_dygraph
Z
zhiboniu 已提交
42
from paddle.static import default_main_program
43

44 45
__all__ = []

X
xiaoting 已提交
46

X
xiaoting 已提交
47
def interpolate(x,
48 49 50 51
                size=None,
                scale_factor=None,
                mode='nearest',
                align_corners=False,
X
xiaoting 已提交
52
                align_mode=0,
53 54
                data_format='NCHW',
                name=None):
X
xiaoting 已提交
55
    """
S
swtkiwi 已提交
56

X
xiaoting 已提交
57
    This op resizes a batch of images.
58 59
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
X
xiaoting 已提交
60
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
61 62
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
X
xiaoting 已提交
63
    and the resizing only applies on the three dimensions(depth, height and width).
X
xiaoting 已提交
64

X
xiaoting 已提交
65
    Supporting resample methods:
66 67 68 69 70
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation
71
        'area': Area interpolation
72 73 74 75

    Linear interpolation is the method of using a line connecting two known quantities 
    to determine the value of an unknown quantity between the two known quantities. 
    
X
xiaoting 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88 89
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.

    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
    The linear interpolation is performed on three directions.
X
xiaoting 已提交
90
    align_corners and align_mode are optional parameters,the calculation method
X
xiaoting 已提交
91 92 93 94 95 96 97
    of interpolation can be selected by them.

    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.

98 99 100 101 102 103
    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to 
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or 
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

X
xiaoting 已提交
104 105 106 107
    Example:

    .. code-block:: text

108
        For scale_factor:
X
xiaoting 已提交
109 110 111 112 113
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)

114 115 116 117 118 119 120 121 122 123 124
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
        
X
xiaoting 已提交
125
        Nearest neighbor interpolation:
X
xiaoting 已提交
126

X
xiaoting 已提交
127 128 129 130 131
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
132

X
xiaoting 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

174 175 176
    For details of linear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Linear_interpolation.
    
X
xiaoting 已提交
177 178
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
179
    
X
xiaoting 已提交
180 181
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
182
    
X
xiaoting 已提交
183 184
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
185
    
X
xiaoting 已提交
186 187
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
188
    
X
xiaoting 已提交
189
    Parameters:
X
xiaoting 已提交
190
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
X
xiaoting 已提交
191
                          its data format is specified by :attr:`data_format`.
X
xiaoting 已提交
192
        size (list|tuple|Tensor|None): Output shape of image resize
193 194
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
195
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
196
             If a Tensor, its dimensions size should be a 1.
197 198 199
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor.
X
xiaoting 已提交
200
             Default: None.
201
        mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear',
202
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
X
xiaoting 已提交
203 204
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
X
xiaoting 已提交
205
                               corner pixels.This only has an effect when 'linear', 'bilinear', 'bicubic' or 'trilinear'.
206 207 208 209
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
X
xiaoting 已提交
210
        data_format (str, optional): Specify the data format of the input, and the data format of the output
211
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`,  `"NCHW"`, `"NHWC"`, `"NCDHW"`,
X
xiaoting 已提交
212 213 214
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
215 216 217
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
X
xiaoting 已提交
218
    Returns:
219
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
X
xiaoting 已提交
220 221 222
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
    Raises:
X
xiaoting 已提交
223
        TypeError: size should be a list or tuple or Tensor.
224
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
225
                    'trilinear', 'bicubic', 'area' or 'nearest' currently.
226
        ValueError: 'linear' only support 3-D tensor.
227 228
        ValueError: 'bilinear' and 'bicubic' only support 4-D tensor.
        ValueError: 'nearest' only support 4-D or 5-D tensor.
229 230 231 232 233 234
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
X
xiaoting 已提交
235 236
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
237 238
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.

X
xiaoting 已提交
239 240 241
    Examples:
        .. code-block:: python

242 243
	        import paddle
	        import numpy as np
X
xiaoting 已提交
244 245 246 247 248 249 250
            import paddle.nn.functional as F
            
            # given out size
            input_data = np.random.rand(2,3,6,10).astype("float32")
            x = paddle.to_tensor(input_data)
            output_1 = F.interpolate(x=x, size=[12,12])
    	    print(output_1.shape)
251
	        # [2L, 3L, 12L, 12L]
X
xiaoting 已提交
252 253 254 255 256 257 258 259 260 261
            
            # given scale
            output_2 = F.interpolate(x=x, scale_factor=[2,1])
            print(output_2.shape)
            # [2L, 3L, 12L, 10L]
            
            # bilinear interp
            output_3 = F.interpolate(x=x, scale_factor=[2,1], mode="bilinear")
            print(output_2.shape)
            # [2L, 3L, 12L, 10L]
X
xiaoting 已提交
262
    """
263 264 265 266 267 268 269 270 271 272
    data_format = data_format.upper()
    resample = mode.upper()
    resample_type = mode.lower()

    resample_methods = [
        'LINEAR',
        'BILINEAR',
        'TRILINEAR',
        'NEAREST',
        'BICUBIC',
273
        'AREA',
274
    ]
X
xiaoting 已提交
275 276
    if resample not in resample_methods:
        raise ValueError(
277
            "The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', "
278
            " 'bicubic' or 'nearest' currently.")
X
xiaoting 已提交
279

X
xiaoting 已提交
280
    if resample in ['LINEAR'] and len(x.shape) != 3:
281
        raise ValueError("'linear' only support 3-D tensor.")
282

283 284 285 286 287
    if resample in ['NEAREST'] and len(x.shape) != 4 and len(x.shape) != 5:
        raise ValueError("'NEAREST' only support 4-D  or 5-D tensor.")

    if resample in ['BILINEAR', 'BICUBIC'] and len(x.shape) != 4:
        raise ValueError("'bilinear' and 'bicubic' only support 4-D tensor.")
X
xiaoting 已提交
288
    if resample == 'TRILINEAR' and len(x.shape) != 5:
289 290 291 292
        raise ValueError("'trilinear'only support 5-D tensor.")

    if size is None and scale_factor is None:
        raise ValueError("One of size and scale_factor must not be None.")
X
xiaoting 已提交
293 294 295

    if not isinstance(align_corners, bool):
        raise TypeError("Attr align_corners should be a bool value")
296

X
xiaoting 已提交
297 298
    if align_mode != 0 and align_mode != 1:
        raise ValueError("align_mode can only be 0 or 1")
X
xiaoting 已提交
299 300 301 302
    if align_corners != 0 and resample == 'NEAREST':
        raise ValueError(
            "align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
        )
303

X
xiaoting 已提交
304
    if resample == 'AREA':
305 306
        if isinstance(size, list) or isinstance(size, tuple) or isinstance(
                size, Variable):
X
xiaoting 已提交
307 308 309 310 311 312 313 314
            if len(size) == 0:
                raise ValueError("output size can not be empty")
        if len(x.shape) == 3:
            return paddle.nn.functional.adaptive_avg_pool1d(x, size)
        elif len(x.shape) == 4:
            return paddle.nn.functional.adaptive_avg_pool2d(x, size)
        elif len(x.shape) == 5:
            return paddle.nn.functional.adaptive_avg_pool3d(x, size)
315

X
xiaoting 已提交
316
    helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals())
317
    dtype = helper.input_dtype(input_param_name='x')
X
xiaoting 已提交
318
    if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']:
319 320
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
321
            " received but only `NCW` or `NWC` supported for 3-D input.")
X
xiaoting 已提交
322
    elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
X
xiaoting 已提交
323 324 325
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCHW` or `NHWC` supported for 4-D input.")
X
xiaoting 已提交
326
    elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
X
xiaoting 已提交
327 328 329 330 331 332 333
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCDHW` or `NDHWC` supported for 5-D input.")

    def _is_list_or_turple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

334
    if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
X
xiaoting 已提交
335
        data_layout = 'NCHW'
336
    if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
X
xiaoting 已提交
337 338
        data_layout = 'NHWC'

X
xiaoting 已提交
339 340 341 342
    if resample == 'NEAREST':
        align_corners = False

    inputs = {"X": x}
X
xiaoting 已提交
343 344 345 346 347 348 349 350 351 352
    attrs = {
        "out_d": -1,
        "out_h": -1,
        "out_w": -1,
        "interp_method": resample_type,
        "align_corners": align_corners,
        "align_mode": align_mode,
        "data_layout": data_layout
    }

353 354
    out_shape = size
    scale = scale_factor
355 356
    if out_shape is not None and scale is not None:
        raise ValueError("Only one of size or scale_factor should be defined.")
X
xiaoting 已提交
357
    if out_shape is not None:
Z
zhiboniu 已提交
358
        if isinstance(out_shape, Variable) and not in_dynamic_mode():
X
xiaoting 已提交
359 360 361
            out_shape.stop_gradient = True
            inputs['OutSize'] = out_shape
        else:
Z
zhiboniu 已提交
362
            if in_dynamic_mode():
363 364
                if isinstance(out_shape, Variable):
                    out_shape = list(out_shape.numpy())
X
xiaoting 已提交
365 366
                else:
                    out_shape = list(out_shape)
367 368 369
                for i, dim in enumerate(out_shape):
                    if isinstance(dim, Variable):
                        out_shape[i] = dim.numpy()[0]
X
xiaoting 已提交
370
            if not (_is_list_or_turple_(out_shape)):
371
                raise TypeError("size should be a list or tuple or Variable.")
X
xiaoting 已提交
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
            # Validate the shape
            contain_var = False
            for dim_idx, dim_size in enumerate(out_shape):
                if isinstance(dim_size, Variable):
                    contain_var = True
                    continue
                assert dim_size > 0, (
                    "Each dimension size given in out_shape must be greater than 0."
                )

            if contain_var:
                new_size_tensor = []
                size_list = []
                for dim in out_shape:
                    if isinstance(dim, Variable):
                        dim.stop_gradient = True
                        new_size_tensor.append(dim)
                        size_list.append(-1)
                    else:
                        assert (isinstance(dim, int))
                        temp_out = helper.create_variable_for_type_inference(
                            'int32')
                        fill_constant(
                            [1], 'int32', dim, force_cpu=True, out=temp_out)
                        new_size_tensor.append(temp_out)
                        size_list.append(dim)
                inputs['SizeTensor'] = new_size_tensor

X
xiaoting 已提交
400
            if len(x.shape) == 3:
401 402
                if len(out_shape) != 1:
                    raise ValueError(
403
                        "size length should be 2 for input 3-D tensor")
404 405 406 407 408
                if contain_var:
                    attrs['out_w'] = size_list[0]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_w'] = out_shape[0]
X
xiaoting 已提交
409
            if len(x.shape) == 4:
X
xiaoting 已提交
410
                if len(out_shape) != 2:
411
                    raise ValueError("size length should be 2 for "
X
xiaoting 已提交
412 413 414 415 416 417 418 419
                                     "input 4-D tensor.")
                if contain_var:
                    attrs['out_h'] = size_list[0]
                    attrs['out_w'] = size_list[1]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_h'] = out_shape[0]
                    attrs['out_w'] = out_shape[1]
X
xiaoting 已提交
420
            if len(x.shape) == 5:
X
xiaoting 已提交
421
                if len(out_shape) != 3:
422
                    raise ValueError("size length should be 3 for "
X
xiaoting 已提交
423 424 425 426 427 428 429 430 431 432 433 434
                                     "input 5-D tensor.")
                if contain_var:
                    attrs['out_d'] = size_list[0]
                    attrs['out_h'] = size_list[1]
                    attrs['out_w'] = size_list[2]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_d'] = out_shape[0]
                    attrs['out_h'] = out_shape[1]
                    attrs['out_w'] = out_shape[2]

    else:
Z
zhiboniu 已提交
435
        if in_dynamic_mode() and isinstance(scale, Variable):
436
            scale = list(scale.numpy())
X
xiaoting 已提交
437 438 439 440 441 442
        if isinstance(scale, Variable):
            scale.stop_gradient = True
            inputs["Scale"] = scale
        elif isinstance(scale, float) or isinstance(scale, int):
            if scale <= 0:
                raise ValueError("Attr(scale) should be greater than zero.")
X
xiaoting 已提交
443 444 445 446
            scale_list = []
            for i in range(len(x.shape) - 2):
                scale_list.append(scale)
            attrs['scale'] = list(map(float, scale_list))
X
xiaoting 已提交
447
        elif isinstance(scale, list) or isinstance(scale, tuple):
X
xiaoting 已提交
448 449 450 451 452 453 454 455
            if len(scale) != len(x.shape) - 2:
                raise ValueError("scale_shape length should be {} for "
                                 "input {}-D tensor.".format(
                                     len(x.shape) - 2, len(x.shape)))
            for value in scale:
                if value <= 0:
                    raise ValueError("Attr(scale) should be greater than zero.")
            attrs['scale'] = list(map(float, scale))
X
xiaoting 已提交
456 457
        else:
            raise TypeError(
458 459
                "Attr(scale)'s type should be float, int, list, tuple, or Tensor."
            )
X
xiaoting 已提交
460

Z
zhiboniu 已提交
461
    if in_dynamic_mode():
X
xiaoting 已提交
462 463 464 465 466 467 468
        attr_list = []
        for k, v in attrs.items():
            attr_list.append(k)
            attr_list.append(v)
        dy_attr = tuple(attr_list)

        if resample_type == "linear":
W
wanghuancoder 已提交
469
            out = _C_ops.linear_interp_v2(x, *dy_attr)
470
        elif resample_type == "bilinear":
W
wanghuancoder 已提交
471
            out = _C_ops.bilinear_interp_v2(x, *dy_attr)
472
        elif resample_type == "trilinear":
W
wanghuancoder 已提交
473
            out = _C_ops.trilinear_interp_v2(x, *dy_attr)
474
        elif resample_type == "nearest":
W
wanghuancoder 已提交
475
            out = _C_ops.nearest_interp_v2(x, *dy_attr)
476
        elif resample_type == "bicubic":
W
wanghuancoder 已提交
477
            out = _C_ops.bicubic_interp_v2(x, *dy_attr)
X
xiaoting 已提交
478
        return out
X
xiaoting 已提交
479 480
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
X
xiaoting 已提交
481
        type='{}_interp_v2'.format(resample_type),
X
xiaoting 已提交
482 483 484 485
        inputs=inputs,
        outputs={"Out": out},
        attrs=attrs)
    return out
L
littletomatodonkey 已提交
486 487


X
xiaoting 已提交
488 489 490 491 492 493 494 495 496 497
def upsample(x,
             size=None,
             scale_factor=None,
             mode='nearest',
             align_corners=False,
             align_mode=0,
             data_format='NCHW',
             name=None):
    """
    This op resizes a batch of images.
498

X
xiaoting 已提交
499 500 501
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
502 503
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
X
xiaoting 已提交
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
    and the resizing only applies on the three dimensions(depth, height and width).

    Supporting resample methods:
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation
    Linear interpolation is the method of using a line connecting two known quantities 
    to determine the value of an unknown quantity between the two known quantities. 
    
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.
    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.
    
    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.
528

X
xiaoting 已提交
529 530 531
    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
532

X
xiaoting 已提交
533 534 535
    The linear interpolation is performed on three directions.
    align_corners and align_mode are optional parameters,the calculation method
    of interpolation can be selected by them.
536 537 538 539 540 541 542

    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

X
xiaoting 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
    Example:
    .. code-block:: text
        For scale_factor:
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
        Nearest neighbor interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
          else:
              align_corners = True
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
        
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
    https://en.wikipedia.org/wiki/Linear_interpolation.
    For details of linear interpolation, please refer to Wikipedia:
    
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
    
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
    
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
    
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
    
    Parameters:
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
633
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
634
             If a Tensor , its dimensions size should be a 1.
635 636 637 638
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if 
             it is either a list or a tuple or a Tensor.
X
xiaoting 已提交
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
             Default: None.
        mode (str): The resample method. It supports 'linear', 'nearest', 'bilinear',
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
                               corner pixels.
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
    Raises:
        TypeError: size should be a list or tuple or Tensor.
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
                    'trilinear', 'bicubic', or 'nearest' currently.
        ValueError: 'linear' only support 3-D tensor.
        ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
        Examples:
        .. code-block:: python
            import paddle
            import numpy as np
            import paddle.nn.functional as F

X
xiaoting 已提交
682
            input_data = np.random.rand(2,3,6,10).astype("float32")
X
xiaoting 已提交
683
            input = paddle.to_tensor(input_data)
X
xiaoting 已提交
684
            output = F.upsample(x=input, size=[12,12])
X
xiaoting 已提交
685 686 687 688 689 690 691 692
            print(output.shape)
            # [2L, 3L, 12L, 12L]

    """
    return interpolate(x, size, scale_factor, mode, align_corners, align_mode,
                       data_format)


693 694 695 696
def bilinear(x1, x2, weight, bias=None, name=None):
    """

    This layer performs bilinear on two inputs.
697
    See :ref:`api_nn_Bilinear` for details and output shape.
698 699 700 701 702 703 704 705 706 707

    Parameters:
       x1 (Tensor): the first input tensor, it's data type should be float32, float64.
       x2 (Tensor): the second input tensor, it's data type should be float32, float64.
       weight (Parameter): The learnable weights of this layer, shape is [out_features, in1_features, in2_features].
       bias (Parameter, optional): The learnable bias(Bias) of this layer, shape is [1, out_features]. If it is set to None, no bias will be added to the output units. The default value is None.
       name (str, optional): The default value is None. Normally there is no need for user
           to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Returns:
708
       Tensor: A 2-D Tensor of shape [batch_size, out_features].
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725

    Examples:
       .. code-block:: python

        import paddle
        import numpy
        import paddle.nn.functional as F

        x1 = numpy.random.random((5, 5)).astype('float32')
        x2 = numpy.random.random((5, 4)).astype('float32')
        w = numpy.random.random((1000, 5, 4)).astype('float32')
        b = numpy.random.random((1, 1000)).astype('float32')

        result = F.bilinear(paddle.to_tensor(x1), paddle.to_tensor(x2), paddle.to_tensor(w), paddle.to_tensor(b))           # result shape [5, 1000]

    """

Z
zhiboniu 已提交
726
    if in_dynamic_mode():
W
wanghuancoder 已提交
727
        return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744

    check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
    check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')

    inputs = {"X": x1, "Y": x2, "Weight": weight}
    if bias is not None:
        inputs["Bias"] = bias

    helper = LayerHelper("bilinear", **locals())
    out = helper.create_variable_for_type_inference(dtype=x1.dtype)

    helper.append_op(
        type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})

    return out


745 746 747 748 749 750 751 752 753 754 755 756 757 758
def dropout(x,
            p=0.5,
            axis=None,
            training=True,
            mode="upscale_in_train",
            name=None):
    """
    Dropout is a regularization technique for reducing overfitting by preventing
    neuron co-adaption during training. The dropout operator randomly sets the
    outputs of some units to zero, while upscale others according to the given
    dropout probability.

    Args:
        x (Tensor): The input tensor. The data type is float32 or float64.
759 760
        p (float|int): Probability of setting units to zero. Default 0.5.
        axis (int|list|tuple): The axis along which the dropout is performed. Default None.
761
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
762
        mode(str): ['upscale_in_train'(default) | 'downscale_in_infer'].
763 764 765 766 767 768 769 770 771 772

                           1. upscale_in_train(default), upscale the output at training time

                              - train: out = input * mask / ( 1.0 - dropout_prob )
                              - inference: out = input

                           2. downscale_in_infer, downscale the output at inference

                              - train: out = input * mask
                              - inference: out = input * (1.0 - dropout_prob)
773
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
774 775 776 777

    Returns:
        A Tensor representing the dropout, has same shape and data type as `x` .

778

779 780
    Examples:
        We use ``p=0.5`` in the following description for simplicity.
781

782
        1. When ``axis=None`` , this is commonly used dropout, which dropout each element of x randomly.
783 784 785

        ..  code-block:: text

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
            Let's see a simple case when x is a 2d tensor with shape 2*3:
            [[1 2 3]
             [4 5 6]]
            we generate mask with the same shape as x, which is 2*3. The value of mask is
            sampled from a Bernoulli distribution randomly. For example, we may get such mask:
            [[0 1 0]
             [1 0 1]]
            So the output is obtained from elementwise multiply of x and mask:
            [[0 2 0]
             [4 0 6]]
            Using default setting, i.e. ``mode='upscale_in_train'`` ,
            if in training phase, the final upscale output is:
            [[0 4 0 ]
             [8 0 12]]
            if in test phase, the output is the same as input:
            [[1 2 3]
             [4 5 6]]
            we can also set ``mode='downscale_in_infer'`` , then
            if in training phase, the final output is:
            [[0 2 0]
             [4 0 6]]
            if in test phase, the scale output is:
            [[0.5 1.  1.5]
             [2.  2.5 3. ]]

811 812


813
        2. When ``axis!=None`` , this is useful for dropping whole channels from an image or sequence.
814 815 816

        ..  code-block:: text

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
            Let's see the simple case when x is a 2d tensor with shape 2*3 again:
            [[1 2 3]
             [4 5 6]]
            (1) If ``axis=0`` , this means the dropout is only performed in axis `0` .
                we generate mask with the shape 2*1. Only in axis `0` the value is randomly selected.
                For example, we may get such mask:
                [[1]
                 [0]]
                The output is obtained from elementwise multiply of x and mask. Doing that the mask will be
                broadcast from 2*1 to 2*3:
                [[1 1 1]
                 [0 0 0]]
                and the result after elementwise multiply is:
                [[1 2 3]
                 [0 0 0]]
                then we can do upscale or downscale according to the setting of other arguments.
            (2) If ``axis=1`` , this means the dropout is only performed in axis `1` .
                we generate mask with the shape 1*3. Only in axis `1` the value is randomly selected.
                For example, we may get such mask:
                [[1 0 1]]
                Doing elementwise multiply the mask will be broadcast from 1*3 to 2*3:
                [[1 0 1]
                 [1 0 1]]
                and the result after elementwise multiply is:
                [[1 0 3]
                 [4 0 6]]
            (3) What about ``axis=[0, 1]`` ? This means the dropout is performed in all axes of x,
                which is the same case as default setting ``axis=None`` .
845
            (4) You may note that logically `axis=None` means the dropout is performed in none axis of x,
846 847 848 849 850 851 852 853 854 855
                We generate mask with the shape 1*1. Whole input is randomly selected or dropped.
                For example, we may get such mask:
                [[0]]
                Doing elementwise multiply the mask will be broadcast from 1*1 to 2*3:
                [[0 0 0]
                 [0 0 0]]
                and the result after elementwise multiply is:
                [[0 0 0]
                 [0 0 0]]
                Actually this is not what we want because all elements may set to zero~
856 857 858

        When x is a 4d tensor with shape `NCHW`, we can set ``axis=[0,1]`` and the dropout will be performed in channel `N` and `C`, `H` and `W` is tied, i.e. paddle.nn.dropout(x, p, axis=[0,1]) . Please refer to ``paddle.nn.functional.dropout2d`` for more details.
        Similarly, when x is a 5d tensor with shape `NCDHW`, we can set ``axis=[0,1]`` to perform dropout3d. Please refer to ``paddle.nn.functional.dropout3d`` for more details.
859 860

        .. code-block:: python
861

862 863 864 865 866 867 868 869 870 871
            import paddle
            import numpy as np

            x = np.array([[1,2,3], [4,5,6]]).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout(x, 0.5)
            y_test = paddle.nn.functional.dropout(x, 0.5, training=False) 
            y_0 = paddle.nn.functional.dropout(x, axis=0)
            y_1 = paddle.nn.functional.dropout(x, axis=1)
            y_01 = paddle.nn.functional.dropout(x, axis=[0,1])
872 873 874 875 876 877
            print(x)
            print(y_train)
            print(y_test)
            print(y_0)
            print(y_1)
            print(y_01)
878 879

    """
880 881 882 883
    # fast return for p == 0
    if p == 0:
        return x

884 885 886 887 888 889 890
    if not isinstance(p, (float, int)):
        raise TypeError("p argument should be a number")
    if p < 0 or p > 1:
        raise ValueError("p argument should between 0 and 1")
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'")
891
    if axis and not isinstance(axis, (int, list, tuple)):
892 893 894 895 896 897
        raise TypeError("datatype of axis argument should be int or list")

    if axis == None:  # commonly used dropout
        seed = None
        mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

H
hong 已提交
898
        if _non_static_mode():
899 900
            if default_main_program().random_seed != 0:
                seed = default_main_program().random_seed
H
hong 已提交
901 902 903 904 905 906

            if in_dygraph_mode():
                out, mask = _C_ops.final_state_dropout( x, None, p, not training, mode, \
                    seed if seed is not None else 0, seed is not None)

                return out
W
wanghuancoder 已提交
907
            out, mask = _C_ops.dropout(
908 909 910 911 912 913 914 915 916 917 918 919 920
                x, 'dropout_prob', p, 'is_test', not training, 'fix_seed',
                seed is not None, 'seed', seed
                if seed is not None else 0, 'dropout_implementation', mode)
            return out

        helper = LayerHelper('dropout', **locals())
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'dropout')

        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        mask = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)

921 922 923 924 925 926 927 928 929 930 931 932
        def get_attrs(prog, dropout_prob, is_test, seed):
            if (seed is None or seed == 0) and prog.random_seed != 0:
                seed = prog.random_seed
            attrs = {
                'dropout_prob': dropout_prob,
                'is_test': is_test,
                'fix_seed': seed is not None,
                'seed': seed if seed is not None else 0,
                'dropout_implementation': mode,
            }
            return attrs

933 934 935 936 937 938 939 940 941 942
        attrs = get_attrs(helper.main_program, p, not training, seed)

        helper.append_op(
            type='dropout',
            inputs={'X': [x]},
            outputs={'Out': [out],
                     'Mask': [mask]},
            attrs=attrs)
        return out
    else:  #sometimes called dropout_nd #TODO: optimize with c++
Z
zhiboniu 已提交
943
        if not in_dynamic_mode():
944 945 946 947 948
            check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'dropout')
        dtype = x.dtype
        keep_prob = 1 - p
        if training:
            if p == 1.:
949
                return paddle.scale(x, scale=0.)
950

951
            scale_input = paddle.scale(
952 953 954 955
                x, scale=1 / keep_prob) if mode == 'upscale_in_train' else x

            #get mask shape
            input_shape = x.shape
Z
zhiboniu 已提交
956
            if not in_dynamic_mode():
957
                input_shape_tensor = paddle.shape(x)
958
            drop_axes = [axis] if isinstance(axis, int) else list(axis)
959 960
            if min(drop_axes) < 0 or max(drop_axes) > len(input_shape) - 1:
                raise ValueError("axis value should be greater than or equal to 0 and less than dimensions of x:{}, but get axis value:{} " \
961 962 963
                                 .format(len(input_shape), max(drop_axes)))
            if len(drop_axes) > len(input_shape):
                raise ValueError(
964
                    "length of axis should not be greater than dimensions of x:{}, but get length of axis: {}".
965 966
                    format(len(input_shape), len(drop_axes)))
            mask_shape = [1] * len(input_shape)
Z
zhiboniu 已提交
967
            if not in_dynamic_mode():
968 969 970 971 972
                for i in drop_axes:
                    mask_shape[i] = input_shape_tensor[i]
            else:
                for i in drop_axes:
                    mask_shape[i] = input_shape[i]
973 974

            #get mask
975
            random_tensor = paddle.uniform(
976
                mask_shape, dtype='float32', min=0., max=1.0)
Z
zhiboniu 已提交
977
            p = full(shape=[1], fill_value=p, dtype='float32')
978
            keep_mask = paddle.greater_equal(random_tensor, p)
979

980 981
            scale_input = paddle.cast(scale_input, dtype)
            keep_mask = paddle.cast(keep_mask, dtype)
982 983 984
            ret = paddle.multiply(scale_input, keep_mask, name=name)
            return ret
        else:  # test
985
            ret = paddle.scale(
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
                x, scale=keep_prob) if mode == 'downscale_in_infer' else x
            return ret


def dropout2d(x, p=0.5, training=True, data_format='NCHW', name=None):
    """
    Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
    a channel is a 2D feature map with the shape `HW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.

    See ``paddle.nn.functional.dropout`` for more details.

    Args:
        x (Tensor):  The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C].
                     The data type is float32 or float64.
        p (float): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
1003
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCHW` or `NHWC` . The default is `NCHW` . When it is `NCHW` , the data is stored in the order of: [batch_size, input_channels, input_height, input_width].
1004 1005 1006 1007 1008
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout2d, has same shape and data type as `x` .

1009

1010 1011
    Examples:
        .. code-block:: python
1012

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout2d(x)  #train
            y_test = paddle.nn.functional.dropout2d(x, training=False) #test
            for i in range(2):
                for j in range(3):
                    print(x.numpy()[i,j,:,:])
                    print(y_train.numpy()[i,j,:,:]) # may all 0
                    print(y_test.numpy()[i,j,:,:])
    """
    input_shape = x.shape
    if len(input_shape) != 4:
        raise ValueError("dimensions of x should be 4, but received {} != 4"\
        .format(len(input_shape)))

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    return dropout(
        x,
        p=p,
        axis=[0, 1] if data_format == 'NCHW' else [0, 3],
        training=training,
        mode="upscale_in_train",
        name=name)


def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None):
    """
    Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
    a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.

    See ``paddle.nn.functional.dropout`` for more details.

    Args:
        x (Tensor):  The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C].
                     The data type is float32 or float64.
        p (float): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
1058
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from ``NCDHW`` or ``NDHWC``. The default is ``NCDHW`` . When it is ``NCDHW`` , the data is stored in the order of: [batch_size, input_channels, input_depth, input_height, input_width].
1059 1060 1061 1062 1063
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout3d, has same shape and data type with `x` .

1064

1065 1066
    Examples:
        .. code-block:: python
1067

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.dropout3d(x)  #train
            y_test = paddle.nn.functional.dropout3d(x, training=False) #test
            print(x.numpy()[0,0,:,:,:])
            print(y_train.numpy()[0,0,:,:,:]) # may all 0
            print(y_test.numpy()[0,0,:,:,:])
    """

    input_shape = x.shape
    if len(input_shape) != 5:
        raise ValueError("dimensions of x should be 5, but received {} != 5" \
        .format(len(input_shape)))

    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    return dropout(
        x,
        p=p,
        axis=[0, 1] if data_format == 'NCDHW' else [0, 4],
        training=training,
        mode="upscale_in_train",
        name=name)


1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
def alpha_dropout(x, p=0.5, training=True, name=None):
    """
    Alpha Dropout is a type of Dropout that maintains the self-normalizing property.
    For an input with zero mean and unit standard deviation, the output of Alpha Dropout
    maintains the original mean and standard deviation of the input.
    Alpha Dropout fits well to SELU activate function by randomly setting activations to the negative saturation value.

    Args:
        x (Tensor): The input tensor. The data type is float32 or float64.
        p (float | int): Probability of setting units to zero. Default 0.5.
        training (bool): A flag indicating whether it is in train phrase or not. Default True.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A Tensor representing the dropout, has same shape and data type as `x`.

    Examples:
        .. code-block:: python
1117

1118 1119 1120 1121 1122 1123 1124
            import paddle
            import numpy as np

            x = np.array([[-1, 1], [-1, 1]]).astype('float32')
            x = paddle.to_tensor(x)
            y_train = paddle.nn.functional.alpha_dropout(x, 0.5)
            y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False)
1125 1126
            print(x)
            print(y_train)
1127
            # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
1128
            print(y_test)
1129 1130 1131 1132 1133 1134
    """
    if not isinstance(p, (float, int)):
        raise TypeError("p argument should be a float or int")
    if p < 0 or p > 1:
        raise ValueError("p argument should between 0 and 1")

Z
zhiboniu 已提交
1135
    if not in_dynamic_mode():
1136 1137 1138 1139
        check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                                 'alpha_dropout')

    if training:
1140
        if p == 1:
1141
            return paddle.scale(x, scale=0.)
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
        #get transformation params
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        alpha_p = -alpha * scale
        a = ((1 - p) * (1 + p * alpha_p**2))**-0.5
        b = -a * alpha_p * p

        dtype = x.dtype
        input_shape = x.shape

        #get mask
1153
        random_tensor = paddle.uniform(
1154
            input_shape, dtype='float32', min=0., max=1.0)
Z
zhiboniu 已提交
1155
        p = full(shape=[1], fill_value=p, dtype='float32')
1156 1157 1158
        keep_mask = paddle.greater_equal(random_tensor, p)
        keep_mask = paddle.cast(keep_mask, dtype)
        drop_mask = paddle.subtract(
Z
zhiboniu 已提交
1159 1160
            full(
                shape=input_shape, fill_value=1., dtype=dtype), keep_mask)
1161 1162

        #apply mask
Z
zhiboniu 已提交
1163
        b = full(shape=[1], fill_value=b, dtype=dtype)
1164 1165 1166 1167
        y = paddle.add(paddle.multiply(x, keep_mask),
                       paddle.scale(
                           drop_mask, scale=alpha_p))
        res = paddle.add(paddle.scale(y, scale=a), b, name=name)
1168 1169 1170 1171 1172
        return res
    else:  # test
        return x


L
littletomatodonkey 已提交
1173 1174 1175
def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
    """
    Pad tensor according to 'pad' and 'mode'.
L
littletomatodonkey 已提交
1176 1177 1178
    If mode is 'constant' and length of pad is twice as length of x dimension,
    then the padding will be started from the first dimension and moved back onto x
    according to 'pad' and 'value'.
L
littletomatodonkey 已提交
1179 1180 1181 1182 1183
    If mode is 'reflect', pad[0] and pad[1] must be no greater
    than width-1. The height and depth dimension has the same condition.

    Parameters:
        x (Tensor): The input tensor with data type float32/double/int32/int64_t.
1184 1185 1186 1187
        pad (Tensor | List[int] | Tuple[int]): The padding size with data type int.
            If mode is 'constant' and length of pad is twice as length of x dimension, then x will 
            be padded from the first  dimension to the last dimension.
            Else: 1. If input dimension is 3, then the pad has the form (pad_left,
L
littletomatodonkey 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
            pad_right). 2. If the input dimension is 4, then the pad has the form (pad_left, pad_right, 
            pad_top, pad_bottom). 3. If the input dimension is 5, then the pad has the form 
            (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'
        value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
        data_format (str): An string from: "NCL", "NLC", NHWC", "NCHW", "NCDHW", "NDHWC". Specify the data format of
           the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
                    
    Returns: a Tensor padded according to pad and mode and data type is same as input.
    Return Type: Tensor

    Examples:
        .. code-block:: text

            x = [[[[[1., 2., 3.],
                    [4., 5., 6.]]]]]

            Case 0:
1214 1215 1216 1217 1218 1219 1220 1221 1222
                pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
                mode = 'constant'
                value = 0
                Out = [[[[[0., 0., 0.],
                          [1., 2., 3.],
                          [4., 5., 6.],
                          [0., 0., 0.]]]]]

            Case 1:
L
littletomatodonkey 已提交
1223 1224 1225 1226 1227 1228 1229 1230
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'constant'
                value = 0
                Out = [[[[[0. 0. 0. 0. 0. 0. 0.]
                          [0. 0. 1. 2. 3. 0. 0.]
                          [0. 0. 4. 5. 6. 0. 0.]
                          [0. 0. 0. 0. 0. 0. 0.]]]]]

1231
            Case 2:
L
littletomatodonkey 已提交
1232 1233 1234 1235 1236 1237 1238
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'reflect'
                Out = [[[[[6. 5. 4. 5. 6. 5. 4.]
                          [3. 2. 1. 2. 3. 2. 1.]
                          [6. 5. 4. 5. 6. 5. 4.]
                          [3. 2. 1. 2. 3. 2. 1.]]]]]

1239
            Case 3:
L
littletomatodonkey 已提交
1240 1241 1242 1243 1244 1245 1246
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'replicate'
                Out = [[[[[1. 1. 1. 2. 3. 3. 3.]
                          [1. 1. 1. 2. 3. 3. 3.]
                          [4. 4. 4. 5. 6. 6. 6.]
                          [4. 4. 4. 5. 6. 6. 6.]]]]]

1247
            Case 4:
L
littletomatodonkey 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256
                pad = [2, 2, 1, 1, 0, 0],
                mode = 'circular'
                Out = [[[[[5. 6. 4. 5. 6. 4. 5.]
                          [2. 3. 1. 2. 3. 1. 2.]
                          [5. 6. 4. 5. 6. 4. 5.]
                          [2. 3. 1. 2. 3. 1. 2.]]]]]

    Code Examples:
        .. code-block:: python
L
littletomatodonkey 已提交
1257

L
littletomatodonkey 已提交
1258 1259 1260 1261 1262 1263
            import numpy as np
            import paddle
            import paddle.nn.functional as F
            
            # example 1
            x_shape = (1, 1, 3)
L
littletomatodonkey 已提交
1264
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
1265
            y = F.pad(x, [0, 0, 0, 0, 2, 3], value=1, mode='constant', data_format="NCL")
L
littletomatodonkey 已提交
1266
            print(y)
L
littletomatodonkey 已提交
1267
            # [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
1268
            
L
littletomatodonkey 已提交
1269
            # example 2
1270 1271 1272 1273 1274 1275 1276
            x_shape = (1, 1, 3)
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.pad(x, [2, 3], value=1, mode='constant', data_format="NCL")
            print(y)
            # [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
            
            # example 3
L
littletomatodonkey 已提交
1277
            x_shape = (1, 1, 2, 3)
L
littletomatodonkey 已提交
1278 1279 1280
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.pad(x, [1, 2, 1, 1], value=1, mode='circular')
            print(y)
L
littletomatodonkey 已提交
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
            # [[[[6. 4. 5. 6. 4. 5.]
            #    [3. 1. 2. 3. 1. 2.]
            #    [6. 4. 5. 6. 4. 5.]
            #    [3. 1. 2. 3. 1. 2.]]]]
    """
    assert mode in ['reflect', 'replicate', 'constant', 'circular'], \
            "mode should be one of constant, reflect, replicate, circular, but got {}.".format(mode)

    data_format = data_format.upper()
    assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], \
        "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \
        "but got {}".format(data_format)

    x_dim = len(x.shape)

littletomatodonkey's avatar
littletomatodonkey 已提交
1296 1297
    if mode == "constant" and isinstance(pad, (
            list, tuple)) and len(pad) == x_dim * 2:
L
littletomatodonkey 已提交
1298 1299
        return layers.pad(x, pad, pad_value=value)

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
    assert x_dim in [
        3, 4, 5
    ], "input tesor dimension must be in [3, 4, 5] but got {}".format(x_dim)

    supported_format_map = {
        3: ["NCL", "NLC"],
        4: ["NCHW", "NHWC"],
        5: ["NCDHW", "NDHWC"],
    }
    assert data_format in supported_format_map[x_dim], \
    "input tensor dimension is {}, it's data format should be in {} but got {}".format(
        x_dim, supported_format_map[x_dim], data_format)

L
littletomatodonkey 已提交
1313 1314 1315 1316 1317 1318 1319 1320
    unsqueezed_dim = []

    if isinstance(pad, Variable):
        if data_format in ["NCL", "NCHW", "NCDHW"]:
            data_format = "NCDHW"
            if x_dim == 3:
                pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
                unsqueezed_dim = [3, 4]
1321
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1322 1323 1324
            elif x_dim == 4:
                pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
                unsqueezed_dim = [2]
1325
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1326 1327 1328 1329 1330
        elif data_format in ["NLC", "NHWC", "NDHWC"]:
            data_format = "NDHWC"
            if x_dim == 3:
                pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
                unsqueezed_dim = [2, 3]
1331
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1332 1333 1334
            elif x_dim == 4:
                pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
                unsqueezed_dim = [1]
1335
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1336
    else:
1337
        pad = list(pad)
L
littletomatodonkey 已提交
1338 1339 1340 1341 1342
        if data_format in ["NCL", "NCHW", "NCDHW"]:
            data_format = "NCDHW"
            if x_dim == 3:
                pad = [0, 0, 0, 0] + pad
                unsqueezed_dim = [3, 4]
1343
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1344 1345 1346
            elif x_dim == 4:
                pad = pad + [0, 0]
                unsqueezed_dim = [2]
1347
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1348 1349 1350 1351 1352
        elif data_format in ["NLC", "NHWC", "NDHWC"]:
            data_format = "NDHWC"
            if x_dim == 3:
                pad = [0, 0, 0, 0] + pad
                unsqueezed_dim = [2, 3]
1353
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1354 1355 1356
            elif x_dim == 4:
                pad = pad + [0, 0]
                unsqueezed_dim = [1]
1357
                x = unsqueeze(x, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1358

J
Jiabin Yang 已提交
1359
    if in_dygraph_mode():
L
littletomatodonkey 已提交
1360
        if isinstance(pad, Variable):
J
Jiabin Yang 已提交
1361 1362 1363
            pad = pad.numpy().tolist()
        out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format)
    else:
1364
        if _in_legacy_dygraph():
J
Jiabin Yang 已提交
1365 1366
            if isinstance(pad, Variable):
                pad = pad.numpy().tolist()
1367 1368 1369
            out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
                               "data_format", data_format, "name", name)
        else:
J
Jiabin Yang 已提交
1370 1371 1372 1373 1374 1375 1376
            attrs = {'mode': mode, 'value': value, 'data_format': data_format}
            inputs = {'X': [x]}
            if isinstance(pad, Variable):
                inputs['Paddings'] = [pad]
                attrs['paddings'] = []
            else:
                attrs['paddings'] = pad
L
littletomatodonkey 已提交
1377

J
Jiabin Yang 已提交
1378
            helper = LayerHelper('pad3d', **locals())
L
littletomatodonkey 已提交
1379

J
Jiabin Yang 已提交
1380 1381 1382 1383
            dtype = helper.input_dtype(input_param_name='input')
            out = helper.create_variable_for_type_inference(dtype)
            helper.append_op(
                type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
L
littletomatodonkey 已提交
1384 1385

    if len(unsqueezed_dim) != 0:
1386
        out = squeeze(out, axis=unsqueezed_dim)
L
littletomatodonkey 已提交
1387 1388 1389 1390

    return out


1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
def zeropad2d(x, padding, data_format="NCHW", name=None):
    """
    Pads the input tensor boundaries with zero according to 'pad'.

    Args:
        x(Tensor): The input tensor with data type float16/float32/float64/int32/int64.
        padding(int | Tensor | List[int] | Tuple[int]): The padding size with data type int.
            The input dimension should be 4 and pad has the form (pad_left, pad_right,
            pad_top, pad_bottom).
        data_format(str): An string from: "NHWC", "NCHW". Specify the data format of
            the input data. Default: "NCHW".
        name(str, optional): The default value is None. Normally there is no need for user
            to set this property.

    Returns:Tensor,padded with 0 according to pad and data type is same as input.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np
            import paddle.nn.functional as F

            x_shape = (1, 1, 2, 3)
            x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
            y = F.zeropad2d(x, [1, 2, 1, 1])
            # [[[[0. 0. 0. 0. 0. 0.]
            #    [0. 1. 2. 3. 0. 0.]
            #    [0. 4. 5. 6. 0. 0.]
            #    [0. 0. 0. 0. 0. 0.]]]]
    """

    return pad(x,
               pad=padding,
               mode='constant',
               value=0,
               data_format=data_format,
               name=name)


Y
Yang Zhang 已提交
1431
def cosine_similarity(x1, x2, axis=1, eps=1e-8):
L
littletomatodonkey 已提交
1432
    """
Y
Yang Zhang 已提交
1433
    Compute cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1434 1435 1436 1437

    Parameters:
        x1 (Tensor): First input. float32/double.
        x2 (Tensor): Second input. float32/double.
Y
Yang Zhang 已提交
1438
        axis (int): Dimension of vectors to compute cosine similarity. Default is 1.
L
littletomatodonkey 已提交
1439 1440
        eps(float): Small value to avoid division by zero. Default is 1e-8.
                    
Y
Yang Zhang 已提交
1441
    Returns: a Tensor representing cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1442 1443 1444 1445
    Return Type: Tensor

    Examples:
        .. code-block:: text
1446

L
littletomatodonkey 已提交
1447 1448 1449 1450 1451 1452 1453 1454 1455
            Case 0:
                x1 = [[0.8024077  0.9927354  0.27238318 0.8344984 ]
                     [0.48949873 0.5797396  0.65444374 0.66510963]
                     [0.1031398  0.9614342  0.08365563 0.6796464 ]
                     [0.10760343 0.7461209  0.7726148  0.5801006 ]]
                x2 = [[0.62913156 0.1536727  0.9847992  0.04591406]
                     [0.9098952  0.15715368 0.8671125  0.3156102 ]
                     [0.4427798  0.54136837 0.5276275  0.32394758]
                     [0.3769419  0.8535014  0.48041078 0.9256797 ]]
Y
Yang Zhang 已提交
1456
                axis = 1
L
littletomatodonkey 已提交
1457 1458 1459 1460 1461
                eps = 1e-8
                Out: [0.5275037  0.8368967  0.75037485 0.9245899]

    Code Examples:
        .. code-block:: python
1462

L
littletomatodonkey 已提交
1463 1464 1465 1466 1467 1468 1469 1470 1471
            import paddle
            import paddle.nn as nn
            import numpy as np

            np.random.seed(0)
            x1 = np.random.rand(2,3)
            x2 = np.random.rand(2,3)
            x1 = paddle.to_tensor(x1)
            x2 = paddle.to_tensor(x2)
Y
Yang Zhang 已提交
1472
            result = paddle.nn.functional.cosine_similarity(x1, x2, axis=0)
L
littletomatodonkey 已提交
1473
            print(result)
L
littletomatodonkey 已提交
1474 1475 1476
            # [0.99806249 0.9817672  0.94987036]
            
    """
1477 1478 1479
    w12 = sum(paddle.multiply(x1, x2), axis=axis)
    w1 = sum(paddle.multiply(x1, x1), axis=axis)
    w2 = sum(paddle.multiply(x2, x2), axis=axis)
Y
Yang Zhang 已提交
1480
    n12 = sqrt(clip(w1 * w2, min=eps * eps))
L
littletomatodonkey 已提交
1481 1482
    cos_sim = w12 / n12
    return cos_sim
1483 1484 1485


def linear(x, weight, bias=None, name=None):
1486
    r"""
1487

1488 1489
    Fully-connected linear transformation operator. For each input :math:`X` ,
    the equation is:
1490 1491 1492

    .. math::

1493
        Out = XW + b
1494

1495
    where :math:`W` is the weight and :math:`b` is the bias.
1496

1497 1498 1499 1500 1501 1502 1503
    If the weight is a 2-D tensor of shape :math:`[in\_features, out\_features]` ,
    input should be a multi-dimensional tensor of shape
    :math:`[batch\_size, *, in\_features]` , where :math:`*` means any number of
    additional dimensions. The linear operator multiplies input tensor with
    weight and produces an output tensor of shape :math:`[batch\_size, *, out\_features]` , 
    If :math:`bias` is not None, the bias should be a 1-D tensor of shape
    :math:`[out\_features]` and will be added to the output.
1504

1505 1506 1507 1508 1509 1510 1511
    Parameters:
        x (Tensor): Input tensor. The data type should be float16, float32 or float64.
        weight (Tensor): Weight tensor. The data type should be float16, float32 or float64.
        bias (Tensor, optional): Bias tensor. The data type should be float16, float32 or float64.
                                 If it is set to None, no bias will be added to the output units.
        name (str, optional): Normally there is no need for user to set this parameter.
                              For detailed information, please refer to :ref:`api_guide_Name` .
1512 1513

    Returns:
1514 1515
        Tensor, the shape is :math:`[batch\_size, *, out\_features]` and the
        data type is the same with input :math:`x` .
1516 1517 1518 1519 1520 1521

    Examples:
        .. code-block:: python
          
          import paddle
          
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
          x = paddle.randn((3, 2), dtype="float32")
          # x: [[-0.32342386 -1.200079  ]
          #     [ 0.7979031  -0.90978354]
          #     [ 0.40597573  1.8095392 ]]
          weight = paddle.full(shape=[2, 4], fill_value="0.5", dtype="float32", name="weight")
          # weight: [[0.5 0.5 0.5 0.5]
          #          [0.5 0.5 0.5 0.5]]
          bias = paddle.ones(shape=[4], dtype="float32", name="bias")
          # bias: [1. 1. 1. 1.]
          y = paddle.nn.functional.linear(x, weight, bias)
          # y: [[0.23824859 0.23824859 0.23824859 0.23824859]
          #     [0.9440598  0.9440598  0.9440598  0.9440598 ]
          #     [2.1077576  2.1077576  2.1077576  2.1077576 ]]
1535
    """
J
Jiabin Yang 已提交
1536 1537
    if in_dygraph_mode():
        pre_bias = _C_ops.final_state_matmul(x, weight, False, False)
1538 1539 1540 1541

        if bias is None:
            return pre_bias

J
Jiabin Yang 已提交
1542
        return _C_ops.final_state_add(pre_bias, bias)
1543
    else:
J
Jiabin Yang 已提交
1544 1545 1546
        if _in_legacy_dygraph():
            pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y',
                                        False)
1547

J
Jiabin Yang 已提交
1548 1549
            if bias is None:
                return pre_bias
1550

J
Jiabin Yang 已提交
1551
            return _C_ops.elementwise_add(pre_bias, bias)
1552
        else:
J
Jiabin Yang 已提交
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
            helper = LayerHelper('linear', **locals())
            dtype = x.dtype

            check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                     'linear')
            check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
                        'linear')

            inputs = {'X': [x], 'Y': [weight]}
            attrs = {'trans_x': False, 'trans_y': False}
            tmp = helper.create_variable_for_type_inference(dtype)
            helper.append_op(
                type='matmul_v2',
                inputs=inputs,
                outputs={'Out': tmp},
                attrs=attrs)
            if bias is not None:
                res = helper.create_variable_for_type_inference(dtype)
                helper.append_op(
                    type='elementwise_add',
                    inputs={'X': [tmp],
                            'Y': [bias]},
                    outputs={'Out': [res]},
                    attrs={'axis': len(x.shape) - 1})
            else:
                res = tmp
            return res
1580 1581 1582


def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
1583
    r"""
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
    Label smoothing is a mechanism to regularize the classifier layer and is called
    label-smoothing regularization (LSR).

    Label smoothing is proposed to encourage the model to be less confident,
    since optimizing the log-likelihood of the correct label directly may
    cause overfitting and reduce the ability of the model to adapt. Label
    smoothing replaces the ground-truth label :math:`y` with the weighted sum
    of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
    i.e.

    .. math::

        \\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,

    where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
    respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
    uniform distribution is used for :math:`\mu`.

    See more details about label smoothing in https://arxiv.org/abs/1512.00567.

    Parameters:
        label(Tensor): The input variable containing the label data. The
                        label data should use one-hot representation. It's
                        a multidimensional tensor with a shape of
                        :math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64".
        prior_dist(Tensor, optional): The prior distribution to be used to smooth
                        labels. If not provided, an uniform distribution
                        is used. It's a multidimensional tensor with a shape of
                        :math:`[1, class\_num]` . The default value is None.
        epsilon(float, optional): The weight used to mix up the original ground-truth
                        distribution and the fixed distribution. The default value is
                        0.1.
        name(str, optional): The default value is None. Normally there is no need for user
                        to set this property. For more information, please refer to
                        :ref:`api_guide_Name`.

    Returns:
        Tensor: The tensor containing the smoothed labels.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np
            
            x_data = np.array([[[0, 1, 0],
                                [ 1,  0, 1]]]).astype("float32")
            print(x_data.shape)
            paddle.disable_static()
            x = paddle.to_tensor(x_data, stop_gradient=False)
            output = paddle.nn.functional.label_smooth(x)
1635
            print(output)
1636 1637 1638 1639
            
            #[[[0.03333334 0.93333334 0.03333334]
            #  [0.93333334 0.03333334 0.93333334]]]
    """
1640 1641 1642 1643
    if in_dygraph_mode():
        return _C_ops.final_state_label_smooth(label, prior_dist,
                                               float(epsilon))

1644 1645 1646
    if epsilon > 1. or epsilon < 0.:
        raise ValueError("The value of epsilon must be between 0 and 1.")

H
hong 已提交
1647
    if paddle.in_dynamic_mode():
W
wanghuancoder 已提交
1648
        return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon))
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662

    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'label_smooth')

    helper = LayerHelper("label_smooth", **locals())
    label.stop_gradient = True
    smooth_label = helper.create_variable_for_type_inference(label.dtype)
    helper.append_op(
        type="label_smooth",
        inputs={"X": label,
                "PriorDist": prior_dist} if prior_dist else {"X": label},
        outputs={"Out": smooth_label},
        attrs={"epsilon": float(epsilon)})
    return smooth_label
1663 1664


G
Guoxia Wang 已提交
1665
def class_center_sample(label, num_classes, num_samples, group=None):
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
    """
    Class center sample method is proposed from the paper PartialFC that only sample a subset of the class centers.
    The process of sampling subset class centers is straightforward: 

    1. First select the positive class centers;
    2. Then randomly sample negative class centers.

    Specifically, given a label tensor, shape [batch_size], select all the positive class centers and randomly 
    sample negative class centers, then remap the input label tensor using the sampled class centers.

    For more information, Partial FC: Training 10 Million Identities on a Single Machine
    arxiv: https://arxiv.org/abs/2010.05222
    
    .. hint::
        If the number of the positive class centers is greater than the input num_samples, it keeps all the positive 
        class centers and the shape of sampled_class_center will be [num_positive_class_centers].
1682

1683 1684
        The API supports CPU, single GPU and multi GPU.

1685 1686 1687 1688
        For data parallel mode, set ``group=False``.

        For model parallel mode, set ``group=None`` or the group instance return by paddle.distributed.new_group.

1689
    Args:
G
Guoxia Wang 已提交
1690 1691
        label (Tensor): 1-D tensor with shape [N], each label in [0, num_classes)
        num_classes (int): A positive integer to specify the number of classes at local rank.
1692
            Note that num_classes of each GPU can be different.
G
Guoxia Wang 已提交
1693
        num_samples (int): A positive integer to specify the number of class center to sample.
1694 1695 1696
        group (Group, optional): The group instance return by paddle.distributed.new_group 
            or ``None`` for global default group or ``False`` for data parallel (do not communication cross ranks).
            Default is ``None``.
1697 1698 1699 1700 1701 1702 1703 1704

    Returns:
        Tuple of two ``Tensor`` : (remapped_label, sampled_class_center), remapped label using sampled class center,
        sampled class center from [0, num_classes).

    Examples:

    .. code-block:: python
G
Guoxia Wang 已提交
1705
        :name: code-example1
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727

        # CPU or single GPU
        import paddle
        num_classes = 20
        batch_size = 10
        num_samples = 6
        label = paddle.randint(low=0, high=num_classes, shape=[batch_size], dtype='int64')
        remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes, num_samples)

        print(label)
        print(remapped_label)
        print(sampled_class_index)

        # the output is
        #Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [11, 5 , 1 , 3 , 12, 2 , 15, 19, 18, 19])
        #Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [4, 3, 0, 2, 5, 1, 6, 8, 7, 8])
        #Tensor(shape=[9], dtype=int64, place=CPUPlace, stop_gradient=True,
        #       [1 , 2 , 3 , 5 , 11, 12, 15, 18, 19])

    .. code-block:: python
G
Guoxia Wang 已提交
1728
        :name: code-example2
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768

        # required: distributed
        # Multi GPU, test_class_center_sample.py
        import paddle
        import paddle.distributed as dist
        strategy = dist.fleet.DistributedStrategy()
        dist.fleet.init(is_collective=True, strategy=strategy)
        batch_size = 10
        num_samples = 6
        rank_id = dist.get_rank()
        # num_classes of each GPU can be different, e.g num_classes_list = [10, 8]
        num_classes_list = [10, 10]
        num_classes = paddle.sum(paddle.to_tensor(num_classes_list))
        label = paddle.randint(low=0, high=num_classes.item(), shape=[batch_size], dtype='int64')
        label_list = []
        dist.all_gather(label_list, label)
        label = paddle.concat(label_list, axis=0)
        remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes_list[rank_id], num_samples)

        print(label)
        print(remapped_label)
        print(sampled_class_index)

        #python -m paddle.distributed.launch --gpus=0,1 test_class_center_sample.py
        # rank 0 output:
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
        #Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [0, 2, 4, 8, 9, 3])
        
        # rank 1 output:
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
        #Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
        #Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [0, 1, 2, 3, 5, 7, 8])
    """
1769 1770 1771 1772 1773 1774 1775
    if not (group == False or group is None or hasattr(group, 'is_member')):
        raise ValueError(
            'Expected group is False, None or instance of paddle.distributed.collective.Group \
             (got group: {})'.format(group))
        return

    if hasattr(group, 'is_member') and not group.is_member():
1776 1777
        return

1778
    ring_id = 0
1779 1780
    rank = 0
    nranks = 1
1781 1782 1783 1784 1785 1786 1787
    if group != False:
        if core.is_compiled_with_dist():
            parallel_env = paddle.distributed.ParallelEnv()
            global_rank = parallel_env.rank
            rank = global_rank if group is None else group.get_group_rank(
                global_rank)
            nranks = parallel_env.world_size if group is None else group.nranks
1788 1789 1790 1791 1792 1793

    if num_samples > num_classes:
        raise ValueError(
            'Expected num_samples less than or equal to {}, got num_samples {}'.
            format(num_classes, num_samples))

G
Guoxia Wang 已提交
1794 1795 1796
    label_size = 1
    for dim in list(label.shape):
        label_size *= dim
1797
    if label_size != -1 and label_size < 1:
G
Guoxia Wang 已提交
1798
        raise ValueError('Expected label_size > 0 \
1799
             (got label_size: {})'.format(label_size))
G
Guoxia Wang 已提交
1800 1801 1802 1803

    label_dims = len(list(label.shape))
    if label_dims != 1:
        raise ValueError('Expected label_dims == 1 \
1804
             (got label_dims: {})'.format(label_dims))
G
Guoxia Wang 已提交
1805 1806

    seed = None
1807 1808 1809
    if (seed is None or seed == 0) and default_main_program().random_seed != 0:
        seed = default_main_program().random_seed

Z
zhiboniu 已提交
1810
    if in_dynamic_mode():
1811
        remapped_label, sampled_class_center = _C_ops.class_center_sample(
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
            label, 'num_classes', num_classes, 'num_samples', num_samples,
            'ring_id', ring_id, 'nranks', nranks, 'rank', rank, 'fix_seed',
            seed is not None, 'seed', seed if seed is not None else 0)
        return remapped_label, sampled_class_center

    check_variable_and_dtype(label, 'label', ['int64', 'int32'],
                             'class_center_sample')
    op_type = 'class_center_sample'
    helper = LayerHelper(op_type, **locals())
    remapped_label = helper.create_variable_for_type_inference(
        dtype=label.dtype)
    sampled_class_center = helper.create_variable_for_type_inference(
        dtype=label.dtype)
    helper.append_op(
        type=op_type,
        inputs={'Label': label},
        outputs={
            'RemappedLabel': remapped_label,
            'SampledLocalClassCenter': sampled_class_center
        },
        attrs={
            'num_classes': num_classes,
            'num_samples': num_samples,
            'ring_id': ring_id,
            'nranks': nranks,
            'rank': rank,
            'fix_seed': seed is not None,
            'seed': seed if seed is not None else 0
        })
    return remapped_label, sampled_class_center
X
xiaoting 已提交
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868


def fold(x,
         output_sizes,
         kernel_sizes,
         strides=1,
         paddings=0,
         dilations=1,
         name=None):
    r"""
    
    This Op is used to combines an array of sliding local blocks into a large containing
    tensor. also known as col2im when operated on batched 2D image tensor. Fold calculates each 
    combined value in the resulting large tensor by summing all values from all containing blocks. 


    For each input :math:`x` with shape [N, C_in , L], the output shape [N, C_out, H_out, W_out]
    can be calculated as following.

    .. math::
        H_out &= output_size[0]
        W_out &= output_size[1]
        C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1]

    Parameters:
        x(Tensor):                3-D Tensor, input tensor of format [N, C, L],
                                  data type can be float32 or float64
X
xiaoting 已提交
1869
        output_sizes(int|list|tuple):       The size of output size, should be [output_size_h, output_size_w]
X
xiaoting 已提交
1870
                                  or an interger o treated as [o, o].
X
xiaoting 已提交
1871
        kernel_sizes(int|list|tuple):   The size of convolution kernel, should be [k_h, k_w]
X
xiaoting 已提交
1872
                                  or an integer k treated as [k, k].
X
xiaoting 已提交
1873
        strides(int|list|tuple):        The strides, should be [stride_h, stride_w]
X
xiaoting 已提交
1874 1875
                                  or an integer stride treated as [sride, stride].
                                  For default, strides will be [1, 1].
X
xiaoting 已提交
1876
        paddings(int|list|tuple):       The paddings of each dimension, should be
X
xiaoting 已提交
1877 1878 1879 1880 1881 1882
                                  [padding_top, padding_left, padding_bottom, padding_right]
                                  or [padding_h, padding_w] or an integer padding.
                                  If [padding_h, padding_w] was given, it will expanded to
                                  [padding_h, padding_w, padding_h, padding_w]. If an integer
                                  padding was given, [padding, padding, padding, padding] will
                                  be used. For default, paddings will be [0, 0, 0, 0]
X
xiaoting 已提交
1883
        dilations(int|list|tuple):      the dilations of convolution kernel, should be
X
xiaoting 已提交
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
                                  [dilation_h, dilation_w], or an integer dilation treated as
                                  [dilation, dilation]. For default, it will be [1, 1].
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`


    Returns:
        The tensor formed by combining a group of sliding local blocks
        The output shape is [N, Cout, H, W] as decriabled above.

    Examples:

        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

X
xiaoting 已提交
1902 1903 1904
            x = paddle.randn([2,3*2*2,12])
            y = F.fold(x, output_sizes=[4, 5], kernel_sizes=2)
            # y.shape = [2,3,4,5]
X
xiaoting 已提交
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914

    """

    helper = LayerHelper("fold", **locals())

    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold')

    assert len(x.shape) == 3, \
            "input should be the format of [N, C, L]"

X
xiaoting 已提交
1915 1916 1917
    def _is_list_or_turple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

X
xiaoting 已提交
1918 1919 1920
    if isinstance(output_sizes, int):
        output_sizes = [output_sizes, output_sizes]
    else:
X
xiaoting 已提交
1921 1922
        assert _is_list_or_turple_(output_sizes) and (len(output_sizes) == 2), \
            "output_sizes should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
1923 1924 1925 1926

    if isinstance(kernel_sizes, int):
        kernel_sizes = [kernel_sizes, kernel_sizes]
    else:
X
xiaoting 已提交
1927 1928
        assert _is_list_or_turple_(kernel_sizes) and (len(kernel_sizes) == 2), \
            "kernel_sizes should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
1929 1930 1931 1932

    if isinstance(strides, int):
        strides = [strides, strides]
    else:
X
xiaoting 已提交
1933 1934
        assert _is_list_or_turple_(strides) and (len(strides) == 2), \
            "strides should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
1935 1936 1937 1938

    if isinstance(dilations, int):
        dilations = [dilations, dilations]
    else:
X
xiaoting 已提交
1939 1940
        assert _is_list_or_turple_(dilations) and (len(dilations) == 2), \
            "dilations should either be an integer or a list/tuple of two integers"
X
xiaoting 已提交
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957

    if isinstance(paddings, int):
        paddings = [paddings] * 4
    elif isinstance(paddings, list):
        if len(paddings) == 2:
            paddings = paddings * 2
        elif len(paddings) == 4:
            pass
        else:
            raise ValueError(
                "paddings should either be an integer or a list of 2 or 4 integers"
            )
    else:
        raise ValueError(
            "Unexpected type of paddings, it should be either an integer or a list"
            "of 2 or 4 integers")

X
xiaoting 已提交
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
    if in_dynamic_mode():
        out = _C_ops.fold(x, "output_sizes", output_sizes, "kernel_sizes",
                          kernel_sizes, "strides", strides, "paddings",
                          paddings, "dilations", dilations)
    else:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
        helper.append_op(
            type="fold",
            inputs={"X": x},
            outputs={"Y": out},
            attrs={
                "output_sizes": output_sizes,
                "kernel_sizes": kernel_sizes,
                "strides": strides,
                "paddings": paddings,
                "dilations": dilations
            })
X
xiaoting 已提交
1975
    return out