common.py 58.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define the common classes to build a neural network
16
import paddle
17
from ...fluid.dygraph import Flatten  #DEFINE_ALIAS
18
from ...fluid.dygraph import layers
T
tangwei12 已提交
19
from ...fluid.framework import in_dygraph_mode
20
from .. import functional as F
21
from ...fluid.framework import _dygraph_tracer
22

C
ceci3 已提交
23
__all__ = [
24 25
    'Embedding',
    'Linear',
26
    'Upsample',
L
littletomatodonkey 已提交
27
    'Pad1D',
28
    'Pad2D',
L
littletomatodonkey 已提交
29
    'Pad3D',
X
xiaoting 已提交
30 31
    'UpsamplingNearest2D',
    'UpsamplingBilinear2D',
32 33
    'CosineSimilarity',
    'Dropout',
C
cnn 已提交
34 35
    'Dropout2D',
    'Dropout3D',
36 37
    'Bilinear',
    'AlphaDropout',
F
FNRE 已提交
38
    'Unfold',
C
ceci3 已提交
39
]
40 41


42 43 44 45 46 47 48
def _npairs(x, n):
    if isinstance(x, (paddle.Tensor, list)):
        return x
    x = [x] * (n * 2)
    return x


49
class Linear(layers.Layer):
50
    r"""
51 52 53

    Fully-connected linear transformation layer. For each input :math:`X` ,
    the equation is:
54 55 56

    .. math::

57
        Out = XW + b
58

59
    where :math:`W` is the weight and :math:`b` is the bias.
60

61 62 63 64 65 66 67
    Linear layer takes only one multi-dimensional tensor as input with the
    shape :math:`[batch\_size, *, in\_features]` , where :math:`*` means any
    number of additional dimensions. It multiplies input tensor with the weight
    (a 2-D tensor of shape :math:`[in\_features, out\_features]` ) and produces
    an output tensor of shape :math:`[batch\_size, *, out\_features]` .
    If :math:`bias\_attr` is not False, the bias (a 1-D tensor of
    shape :math:`[out\_features]` ) will be created and added to the output.
68 69

    Parameters:
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
        in_features (int): The number of input units.
        out_features (int): The number of output units.
        weight_attr (ParamAttr, optional): The attribute for the learnable
            weight of this layer. The default value is None and the weight will be
            initialized to zero. For detailed information, please refer to
            paddle.ParamAttr.
        bias_attr (ParamAttr|bool, optional): The attribute for the learnable bias
            of this layer. If it is set to False, no bias will be added to the output.
            If it is set to None or one kind of ParamAttr, a bias parameter will
            be created according to ParamAttr. For detailed information, please refer
            to paddle.ParamAttr. The default value is None and the bias will be
            initialized to zero.
        name (str, optional): Normally there is no need for user to set this parameter.
            For detailed information, please refer to :ref:`api_guide_Name` .

    Attribute:
        **weight** (Parameter): the learnable weight of this layer.

        **bias** (Parameter): the learnable bias of this layer.

    Shape:
        - input: Multi-dimentional tensor with shape :math:`[batch\_size, *, in\_features]` .
        - output: Multi-dimentional tensor with shape :math:`[batch\_size, *, out\_features]` .
93 94 95 96 97

    Examples:
        .. code-block:: python

          import paddle
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

          # Define the linear layer.
          weight_attr = paddle.ParamAttr(
              name="weight",
              initializer=paddle.nn.initializer.Constant(value=0.5))
          bias_attr = paddle.ParamAttr(
              name="bias",
              initializer=paddle.nn.initializer.Constant(value=1.0))
          linear = paddle.nn.Linear(2, 4, weight_attr=weight_attr, bias_attr=bias_attr)
          # linear.weight: [[0.5 0.5 0.5 0.5]
          #                 [0.5 0.5 0.5 0.5]]
          # linear.bias: [1. 1. 1. 1.]

          x = paddle.randn((3, 2), dtype="float32")
          # x: [[-0.32342386 -1.200079  ]
          #     [ 0.7979031  -0.90978354]
          #     [ 0.40597573  1.8095392 ]]
          y = linear(x)
          # y: [[0.23824859 0.23824859 0.23824859 0.23824859]
          #     [0.9440598  0.9440598  0.9440598  0.9440598 ]
          #     [2.1077576  2.1077576  2.1077576  2.1077576 ]]
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
    """

    def __init__(self,
                 in_features,
                 out_features,
                 weight_attr=None,
                 bias_attr=None,
                 name=None):
        super(Linear, self).__init__()
        self._dtype = self._helper.get_default_dtype()
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self.weight = self.create_parameter(
            shape=[in_features, out_features],
            attr=self._weight_attr,
            dtype=self._dtype,
            is_bias=False)
        self.bias = self.create_parameter(
            shape=[out_features],
            attr=self._bias_attr,
            dtype=self._dtype,
            is_bias=True)
        self.name = name

    def forward(self, input):
        out = F.linear(
            x=input, weight=self.weight, bias=self.bias, name=self.name)
        return out

148 149 150 151 152
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'in_features={}, out_features={}, dtype={}{}'.format(
            self.weight.shape[0], self.weight.shape[1], self._dtype, name_str)

153

154
class Upsample(layers.Layer):
155 156
    """
    This op resizes a batch of images.
157

158 159 160
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
161 162
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
163
    and the resizing only applies on the three dimensions(depth, height and width).
X
xiaoting 已提交
164

165
    Supporting resample methods:
166 167 168 169 170 171
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation

T
tangwei12 已提交
172 173 174
    Linear interpolation is the method of using a line connecting two known quantities
    to determine the value of an unknown quantity between the two known quantities.

175 176 177 178 179 180 181 182 183
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.
T
tangwei12 已提交
184

185 186 187 188
    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.
189 190 191 192 193

    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
    The linear interpolation is performed on three directions.
X
xiaoting 已提交
194
    align_corners and align_mode are optional parameters,the calculation method
195 196
    of interpolation can be selected by them.

197 198 199 200 201 202
    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

203 204 205 206
    Example:

    .. code-block:: text

207
        For scale_factor:
208 209 210 211 212
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)

213 214 215 216 217 218 219 220 221 222
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
223 224 225 226 227 228 229 230 231 232 233 234 235 236

        Nearest neighbor interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
          else:
              align_corners = True
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
T
tangwei12 已提交
237

238 239 240
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
241

242 243 244 245 246
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
247

248 249 250 251 252 253 254 255 256 257 258 259
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
260

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

282 283
    https://en.wikipedia.org/wiki/Linear_interpolation.
    For details of linear interpolation, please refer to Wikipedia:
T
tangwei12 已提交
284

285 286
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
T
tangwei12 已提交
287

288 289
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
T
tangwei12 已提交
290

291 292
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
T
tangwei12 已提交
293

294 295
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
T
tangwei12 已提交
296

297
    Parameters:
X
xiaoting 已提交
298
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
299
                          its data format is specified by :attr:`data_format`.
X
xiaoting 已提交
300
        size (list|tuple|Tensor|None): Output shape of image resize
301 302
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
X
xiaoting 已提交
303 304
             Default: None. If a list, each element can be an integer or a Tensor of shape: [1].
             If a Tensor , its dimensions size should be a 1.
305 306 307
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`. Has to match input size if it is either a list or a tuple or a Tensor.
308
             Default: None.
309 310
        mode (str): The resample method. It supports 'linear', 'nearst', 'bilinear',
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
311 312 313
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
                               corner pixels.
314 315 316 317
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
318
        data_format (str, optional): Specify the data format of the input, and the data format of the output
319
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
320 321 322
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
323 324 325
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
326 327 328
    Returns:
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
329
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
330
    Raises:
X
xiaoting 已提交
331
        TypeError: size should be a list or tuple or Tensor.
332 333 334 335 336 337 338 339 340 341
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
                    'trilinear', 'bicubic', or 'nearest' currently.
        ValueError: 'linear' only support 3-D tensor.
        ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
342 343
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
344
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
345 346 347

    Examples:
        .. code-block:: python
C
Chen Long 已提交
348
            
349
            import paddle
X
xiaoting 已提交
350
            import paddle.nn as nn
351
            import numpy as np
X
xiaoting 已提交
352

353
            input_data = np.random.rand(2,3,6,10).astype("float32")
354
            upsample_out  = paddle.nn.Upsample(size=[12,12])
X
xiaoting 已提交
355 356 357 358 359 360

            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]

361 362 363
    """

    def __init__(self,
364 365 366 367
                 size=None,
                 scale_factor=None,
                 mode='nearest',
                 align_corners=False,
X
xiaoting 已提交
368 369 370
                 align_mode=0,
                 data_format='NCHW',
                 name=None):
371
        super(Upsample, self).__init__()
372 373 374
        self.size = size
        self.scale_factor = scale_factor
        self.mode = mode.lower()
375 376 377
        self.align_corners = align_corners
        self.align_mode = align_mode
        self.data_format = data_format
X
xiaoting 已提交
378
        self.name = name
379

X
xiaoting 已提交
380
    def forward(self, x):
381
        out = F.interpolate(
X
xiaoting 已提交
382
            x,
383 384 385
            size=self.size,
            scale_factor=self.scale_factor,
            mode=self.mode,
386 387
            align_corners=self.align_corners,
            align_mode=self.align_mode,
X
xiaoting 已提交
388 389
            data_format=self.data_format,
            name=self.name)
X
xiaoting 已提交
390 391 392

        return out

393 394 395 396 397 398 399 400 401 402
    def extra_repr(self):
        if self.scale_factor is not None:
            main_str = 'scale_factor={}'.format(self.scale_factor)
        else:
            main_str = 'size={}'.format(self.size)
        name_str = ', name={}'.format(self.name) if self.name else ''
        return '{}, mode={}, align_corners={}, align_mode={}, data_format={}{}'.format(
            main_str, self.mode, self.align_corners, self.align_mode,
            self.data_format, name_str)

X
xiaoting 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

class UpsamplingNearest2D(layers.Layer):
    """
    This op upsamples a batch of images, using nearest neighbours' pixel values.
    The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w),
    where in_w is width of the input tensor, in_h is the height of the input tensor.
    And the upsampling only applies on the two dimensions(height and width).
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.

    Parameters:
        x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_h, out_w) when input is a 4-D Tensor.
             Default: None. If a list, each element can be an integer or a Tensor of shape: [1].
             If a Tensor , its dimensions size should be a 1.
        scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.
             Has to match input size if it is either a list or a tuple or a Tensor.
             Default: None.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),


    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn

X
xiaoting 已提交
447
            input_data = paddle.rand(shape=(2,3,6,10)).astype("float32")
X
xiaoting 已提交
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
            upsample_out  = paddle.nn.UpsamplingNearest2D(size=[12,12])
            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]
    """

    def __init__(self,
                 size=None,
                 scale_factor=None,
                 data_format='NCHW',
                 name=None):
        super(UpsamplingNearest2D, self).__init__()
        self.size = size
        self.scale_factor = scale_factor
        self.data_format = data_format
        self.name = name

    def forward(self, x):
        out = F.interpolate(
            x,
            size=self.size,
            scale_factor=self.scale_factor,
            mode='nearest',
            align_corners=False,
            align_mode=0,
            data_format=self.data_format,
            name=self.name)

        return out

479 480 481 482 483 484 485 486 487
    def extra_repr(self):
        if self.scale_factor is not None:
            main_str = 'scale_factor={}'.format(self.scale_factor)
        else:
            main_str = 'size={}'.format(self.size)
        name_str = ', name={}'.format(self.name) if self.name else ''
        return '{}, data_format={}{}'.format(main_str, self.data_format,
                                             name_str)

X
xiaoting 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532

class UpsamplingBilinear2D(layers.Layer):
    """
    This op upsamples a batch of images, using bilinear' pixel values.
    The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w),
    where in_w is width of the input tensor, in_h is the height of the input tensor.
    And the upsampling only applies on the two dimensions(height and width).
    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.

    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.

    Parameters:
        x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_h, out_w) when input is a 4-D Tensor.
             Default: None. If a list, each element can be an integer or a Tensor  of shape: [1].
             If a Tensor , its dimensions size should be a 1.
        scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.
             Has to match input size if it is either a list or a tuple or a Tensor.
             Default: None.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn

X
xiaoting 已提交
533
            input_data = paddle.rand(shape=(2,3,6,10)).astype("float32")
X
xiaoting 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
            upsample_out  = paddle.nn.UpsamplingBilinear2D(size=[12,12])
            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]
    """

    def __init__(self,
                 size=None,
                 scale_factor=None,
                 data_format='NCHW',
                 name=None):
        super(UpsamplingBilinear2D, self).__init__()
        self.size = size
        self.scale_factor = scale_factor
        self.data_format = data_format
        self.name = name

    def forward(self, x):
        out = F.interpolate(
            x,
            size=self.size,
            scale_factor=self.scale_factor,
            mode='bilinear',
            align_corners=True,
            align_mode=0,
            data_format=self.data_format,
            name=self.name)
X
xiaoting 已提交
562 563 564

        return out

565 566 567 568 569 570 571 572 573
    def extra_repr(self):
        if self.scale_factor is not None:
            main_str = 'scale_factor={}'.format(self.scale_factor)
        else:
            main_str = 'size={}'.format(self.size)
        name_str = ', name={}'.format(self.name) if self.name else ''
        return '{}, data_format={}{}'.format(main_str, self.data_format,
                                             name_str)

X
xiaoting 已提交
574

575
class Bilinear(layers.Layer):
576
    r"""
577 578 579 580

    This layer performs bilinear on two inputs.

    .. math::
581

582
      out_{i} = x1 * W_{i} * {x2^\mathrm{T}}, i=0,1,...,outfeatures-1
583

584 585 586 587 588 589
      out = out + b

    In this formula:
     - :math:`x1`: the first input contains in1_features elements, shape is [batch_size, in1_features].
     - :math:`x2`: the second input contains in2_features elements, shape is [batch_size, in2_features].
     - :math:`W_{i}`: the i-th learned weight, shape is [in1_features, in2_features], and learned weight's shape is [out_features, in1_features, in2_features].
590
     - :math:`out_{i}`: the i-th element of out, shape is [batch_size], and out's shape is [batch_size, out_features].
591 592 593 594 595 596 597
     - :math:`b`: the learned bias, shape is [1, out_features].
     - :math:`x2^\mathrm{T}`: the transpose of :math:`x2`.

    Parameters:
       in1_features (int): The dimension of each first input(`x1`).
       in2_features (int): The dimension of each second input(`x2`).
       out_features (int): The dimension of output of this layer.
T
tangwei12 已提交
598
       weight_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
599 600 601
       this layer. The default value is None.
       bias_attr (ParamAttr, optional): The parameter attribute for the bias
           of this layer. If it is set to False, no bias will be added to the output units.
T
tangwei12 已提交
602
           If it is set to None, the bias is initialized zero. The default value is None.
603 604 605 606 607 608 609 610 611
       name (str, optional): The default value is None. Normally there is no need for user
           to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Attribute:
        **weight** (Parameter): the learnable weights of this layer.

        **bias** (Parameter): the learnable bias of this layer.

    Returns:
612
       Tensor: A 2-D Tensor of shape [batch_size, out_features].
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662

    Examples:
       .. code-block:: python

        import paddle
        import numpy

        layer1 = numpy.random.random((5, 5)).astype('float32')
        layer2 = numpy.random.random((5, 4)).astype('float32')
        bilinear = paddle.nn.Bilinear(
            in1_features=5, in2_features=4, out_features=1000)
        result = bilinear(paddle.to_tensor(layer1),
                        paddle.to_tensor(layer2))     # result shape [5, 1000]

    """

    def __init__(self,
                 in1_features,
                 in2_features,
                 out_features,
                 weight_attr=None,
                 bias_attr=None,
                 name=None):
        super(Bilinear, self).__init__()
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self._name = name
        self._in1_features = in1_features
        self._in2_features = in2_features
        self._out_features = out_features
        self._dtype = self._helper.get_default_dtype()

        weight_shape = [
            self._out_features, self._in1_features, self._in2_features
        ]
        self.weight = self.create_parameter(
            attr=self._weight_attr,
            shape=weight_shape,
            dtype=self._dtype,
            is_bias=False)
        bias_shape = [1, self._out_features]
        self.bias = self.create_parameter(
            attr=self._bias_attr,
            shape=bias_shape,
            dtype=self._dtype,
            is_bias=True)

    def forward(self, x1, x2):
        return F.bilinear(x1, x2, self.weight, self.bias, self._name)

663 664 665 666 667 668
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'in1_features={}, in2_features={}, out_features={}, dtype={}{}'.format(
            self._in1_features, self._in2_features, self._out_features,
            self._dtype, name_str)

669

670 671 672 673
class Dropout(layers.Layer):
    """
    Dropout is a regularization technique for reducing overfitting by preventing
    neuron co-adaption during training as described in the paper:
T
tangwei12 已提交
674
    `Improving neural networks by preventing co-adaptation of feature detectors <https://arxiv.org/abs/1207.0580>`_
675 676 677 678
    The dropout operator randomly sets the outputs of some units to zero, while upscale others
    according to the given dropout probability.

    See ``paddle.nn.functional.dropout`` for more details.
679 680

    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695

    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
        axis (int | list): The axis along which the dropout is performed. Default None.
        mode(str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
696
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
697 698 699 700 701

    Shape:
        - input: N-D tensor.
        - output: N-D tensor, the same shape as input.

702

703 704
    Examples:
        .. code-block:: python
705

706 707 708 709 710 711 712 713 714
            import paddle
            import numpy as np

            x = np.array([[1,2,3], [4,5,6]]).astype('float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.Dropout(p=0.5)
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
715 716 717
            print(x)
            print(y_train)
            print(y_test)
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
   """

    def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
        super(Dropout, self).__init__()

        self.p = p
        self.axis = axis
        self.mode = mode
        self.name = name

    def forward(self, input):
        out = F.dropout(
            input,
            p=self.p,
            axis=self.axis,
            training=self.training,
            mode=self.mode,
            name=self.name)
        return out

738 739 740 741 742
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}, axis={}, mode={}{}'.format(self.p, self.axis, self.mode,
                                                 name_str)

743

C
cnn 已提交
744
class Dropout2D(layers.Layer):
745 746 747 748
    """
    Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
    a channel is a 2D feature map with the shape `HW`). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.
C
cnn 已提交
749
    Dropout2D will help promote independence between feature maps as described in the paper:
T
tangwei12 已提交
750
    `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
751 752 753

    See ``paddle.nn.functional.dropout2d`` for more details.

754 755
    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

756 757
    Parameters:
        p (float, optional): Probability of setting units to zero. Default: 0.5
758
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCHW` or `NHWC`. The default is `NCHW`. When it is `NCHW`, the data is stored in the order of: [batch_size, input_channels, input_height, input_width].
759 760 761 762 763 764
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: 4-D tensor.
        - output: 4-D tensor, the same shape as input.

765

766 767
    Examples:
        .. code-block:: python
768

769 770 771 772 773
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
            x = paddle.to_tensor(x)
C
cnn 已提交
774
            m = paddle.nn.Dropout2D(p=0.5)
775 776 777
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
778 779 780
            print(x)
            print(y_train)
            print(y_test)
781 782 783
   """

    def __init__(self, p=0.5, data_format='NCHW', name=None):
C
cnn 已提交
784
        super(Dropout2D, self).__init__()
785 786 787 788 789 790 791 792 793 794 795 796 797 798

        self.p = p
        self.data_format = data_format
        self.name = name

    def forward(self, input):
        out = F.dropout2d(
            input,
            p=self.p,
            training=self.training,
            data_format=self.data_format,
            name=self.name)
        return out

799 800 801 802 803
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}, data_format={}{}'.format(self.p, self.data_format,
                                               name_str)

804

C
cnn 已提交
805
class Dropout3D(layers.Layer):
806 807 808 809
    """
    Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
    a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.
C
cnn 已提交
810
    Dropout3D will help promote independence between feature maps as described in the paper:
T
tangwei12 已提交
811
    `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
812 813 814

    See ``paddle.nn.functional.dropout3d`` for more details.

815 816
    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

817 818
    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
819
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCDHW` or `NDHWC`. The default is `NCDHW`. When it is `NCDHW`, the data is stored in the order of: [batch_size, input_channels, input_depth, input_height, input_width].
820 821 822 823 824 825
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: 5-D tensor.
        - output: 5-D tensor, the same shape as input.

826

827 828
    Examples:
        .. code-block:: python
829

830 831 832 833 834
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
            x = paddle.to_tensor(x)
C
cnn 已提交
835
            m = paddle.nn.Dropout3D(p=0.5)
836 837 838
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
839 840 841
            print(x)
            print(y_train)
            print(y_test)
842 843 844
   """

    def __init__(self, p=0.5, data_format='NCDHW', name=None):
C
cnn 已提交
845
        super(Dropout3D, self).__init__()
846 847 848 849 850 851 852 853 854 855 856 857 858 859

        self.p = p
        self.data_format = data_format
        self.name = name

    def forward(self, input):
        out = F.dropout3d(
            input,
            p=self.p,
            training=self.training,
            data_format=self.data_format,
            name=self.name)
        return out

860 861 862 863 864
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}, data_format={}{}'.format(self.p, self.data_format,
                                               name_str)

865

866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
class AlphaDropout(layers.Layer):
    """
    Alpha Dropout is a type of Dropout that maintains the self-normalizing property. For an input with
    zero mean and unit standard deviation, the output of Alpha Dropout maintains the original mean and
    standard deviation of the input. Alpha Dropout fits well to SELU activate function by randomly setting
    activations to the negative saturation value.

    For more information, please refer to:
    `Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_

    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: N-D tensor.
        - output: N-D tensor, the same shape as input.

    Examples:
        .. code-block:: python
888

889 890 891 892 893 894 895 896 897
            import paddle
            import numpy as np

            x = np.array([[-1, 1], [-1, 1]]).astype('float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.AlphaDropout(p=0.5)
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
898 899
            print(x)
            print(y_train)
900
            # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
901
            print(y_test)
902 903 904 905 906 907 908 909 910 911 912 913
   """

    def __init__(self, p=0.5, name=None):
        super(AlphaDropout, self).__init__()
        self.p = p
        self.name = name

    def forward(self, input):
        out = F.alpha_dropout(
            input, p=self.p, training=self.training, name=self.name)
        return out

914 915 916 917
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}{}'.format(self.p, name_str)

918

L
littletomatodonkey 已提交
919
class Pad1D(layers.Layer):
L
littletomatodonkey 已提交
920
    """
L
littletomatodonkey 已提交
921 922 923
    This interface is used to construct a callable object of the ``Pad1D`` class.
    Pad tensor according to 'pad', 'mode' and 'value'.
    If mode is 'reflect', pad[0] and pad[1] must be no greater than width-1.
L
littletomatodonkey 已提交
924 925

    Parameters:
926 927
        padding (Tensor | List[int] | int): The padding size with data type int. If is int, use the
            same padding in both dimensions. Else [len(padding)/2] dimensions
L
littletomatodonkey 已提交
928
            of input will be padded. The pad has the form (pad_left, pad_right).
L
littletomatodonkey 已提交
929 930 931 932 933 934
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'.
L
littletomatodonkey 已提交
935 936 937 938 939
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data.
           Default is  "NCL"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
940 941

    Returns:
L
littletomatodonkey 已提交
942 943 944 945 946 947 948 949
        None

    Examples:
        .. code-block:: text

            x = [[[1., 2., 3.],
                  [4., 5., 6.]]]
            padding = [1, 2],
L
littletomatodonkey 已提交
950
            mode = "constant"
L
littletomatodonkey 已提交
951 952 953 954 955 956
            value = 0.0
            Out = [[[0. 1. 2. 3. 0. 0.]
                    [0. 4. 5. 6. 0. 0.]]]

    Code Examples:
        .. code-block:: python
957

L
littletomatodonkey 已提交
958 959 960 961 962 963
            import paddle
            import paddle.nn as nn
            import numpy as np

            input_shape = (1, 2, 3)
            pad = [1, 2]
L
littletomatodonkey 已提交
964 965 966
            mode = "constant"
            data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1
            my_pad = nn.Pad1D(padding=pad, mode=mode)
L
littletomatodonkey 已提交
967
            result = my_pad(data)
L
littletomatodonkey 已提交
968
            print(result)
L
littletomatodonkey 已提交
969 970 971 972
            # [[[0. 1. 2. 3. 0. 0.]
            #   [0. 4. 5. 6. 0. 0.]]]
    """

L
littletomatodonkey 已提交
973 974 975 976 977 978 979
    def __init__(self,
                 padding,
                 mode='constant',
                 value=0.0,
                 data_format="NCL",
                 name=None):
        super(Pad1D, self).__init__()
980
        self._pad = _npairs(padding, 1)
L
littletomatodonkey 已提交
981
        self._mode = mode
L
littletomatodonkey 已提交
982
        self._value = value
L
littletomatodonkey 已提交
983
        self._data_format = data_format
L
littletomatodonkey 已提交
984 985 986 987 988 989 990 991 992 993
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     value=self._value,
                     data_format=self._data_format,
                     name=self._name)

994 995 996 997 998
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'padding={}, mode={}, value={}, data_format={}{}'.format(
            self._pad, self._mode, self._value, self._data_format, name_str)

L
littletomatodonkey 已提交
999

L
littletomatodonkey 已提交
1000
class Pad2D(layers.Layer):
L
littletomatodonkey 已提交
1001
    """
L
littletomatodonkey 已提交
1002 1003 1004 1005
    This interface is used to construct a callable object of the ``Pad2D`` class.
    Pad tensor according to 'pad', 'mode' and 'value'.
    If mode is 'reflect', pad[0] and pad[1] must be no greater
    than width-1. The height dimension has the same condition.
L
littletomatodonkey 已提交
1006 1007

    Parameters:
1008 1009 1010
        padding (Tensor | List[int] | int): The padding size with data type int. If is int, use the
            same padding in all dimensions. Else [len(padding)/2] dimensions of input will be padded. 
            The pad has the form (pad_left, pad_right, pad_top, pad_bottom). 
L
littletomatodonkey 已提交
1011 1012 1013 1014 1015 1016
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'.
L
littletomatodonkey 已提交
1017 1018 1019 1020 1021
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1022 1023

    Returns:
L
littletomatodonkey 已提交
1024 1025 1026 1027 1028 1029 1030 1031
        None

    Examples:
        .. code-block:: text

            x = [[[[1., 2., 3.],
                   [4., 5., 6.]]]]
            padding = [1, 1, 0, 0]
L
littletomatodonkey 已提交
1032
            mode = "constant"
L
littletomatodonkey 已提交
1033 1034 1035 1036 1037 1038
            value = 0.0
            Out = [[[[0. 1. 2. 3. 0.]
                     [0. 4. 5. 6. 0.]]]]

    Code Examples:
        .. code-block:: python
1039

L
littletomatodonkey 已提交
1040 1041 1042 1043 1044
            import paddle
            import paddle.nn as nn
            import numpy as np
            input_shape = (1, 1, 2, 3)
            pad = [1, 0, 1, 2]
L
littletomatodonkey 已提交
1045 1046 1047
            mode = "constant"
            data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1
            my_pad = nn.Pad2D(padding=pad, mode=mode)
L
littletomatodonkey 已提交
1048
            result = my_pad(data)
L
littletomatodonkey 已提交
1049
            print(result)
L
littletomatodonkey 已提交
1050 1051 1052 1053 1054 1055 1056
            # [[[[0. 0. 0. 0.]
            #    [0. 1. 2. 3.]
            #    [0. 4. 5. 6.]
            #    [0. 0. 0. 0.]
            #    [0. 0. 0. 0.]]]]
    """

L
littletomatodonkey 已提交
1057 1058 1059 1060 1061 1062 1063
    def __init__(self,
                 padding,
                 mode='constant',
                 value=0.0,
                 data_format="NCHW",
                 name=None):
        super(Pad2D, self).__init__()
1064
        self._pad = _npairs(padding, 2)
L
littletomatodonkey 已提交
1065
        self._mode = mode
L
littletomatodonkey 已提交
1066 1067 1068 1069 1070 1071 1072 1073
        self._value = value
        self._data_format = data_format
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
L
littletomatodonkey 已提交
1074
                     value=self._value,
L
littletomatodonkey 已提交
1075 1076 1077
                     data_format=self._data_format,
                     name=self._name)

1078 1079 1080 1081 1082
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'padding={}, mode={}, value={}, data_format={}{}'.format(
            self._pad, self._mode, self._value, self._data_format, name_str)

L
littletomatodonkey 已提交
1083

L
littletomatodonkey 已提交
1084
class Pad3D(layers.Layer):
L
littletomatodonkey 已提交
1085
    """
L
littletomatodonkey 已提交
1086 1087 1088 1089
    This interface is used to construct a callable object of the ``Pad3D`` class.
    Pad tensor according to 'pad', 'mode' and 'value'.
    If mode is 'reflect', pad[0] and pad[1] must be no greater
    than width-1. The height and depth dimension has the same condition.
L
littletomatodonkey 已提交
1090 1091

    Parameters:
1092 1093
        padding (Tensor | List[int] | int): The padding size with data type int. If is int, use the
            same padding in all dimensions. Else [len(padding)/2] dimensions
L
littletomatodonkey 已提交
1094
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
L
littletomatodonkey 已提交
1095 1096 1097 1098 1099 1100
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'.
L
littletomatodonkey 已提交
1101 1102 1103 1104 1105
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data.
           Default is  "NCDHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1106 1107

    Returns:
L
littletomatodonkey 已提交
1108 1109 1110 1111 1112 1113 1114 1115
        None

    Examples:
        .. code-block:: text

            x = [[[[[1., 2., 3.],
                    [4., 5., 6.]]]]]
            padding = [1, 2, 0, 0, 0, 0]
L
littletomatodonkey 已提交
1116
            mode = "constant"
L
littletomatodonkey 已提交
1117 1118 1119 1120 1121 1122
            value = 0.0
            Out = [[[[[0. 1. 2. 3. 0. 0.]
                      [0. 4. 5. 6. 0. 0.]]]]]

    Code Examples:
        .. code-block:: python
1123

L
littletomatodonkey 已提交
1124 1125 1126 1127 1128
            import paddle
            import paddle.nn as nn
            import numpy as np
            input_shape = (1, 1, 1, 2, 3)
            pad = [1, 0, 1, 2, 0, 0]
L
littletomatodonkey 已提交
1129 1130 1131
            mode = "constant"
            data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1
            my_pad = nn.Pad3D(padding=pad, mode=mode)
L
littletomatodonkey 已提交
1132
            result = my_pad(data)
L
littletomatodonkey 已提交
1133
            print(result)
L
littletomatodonkey 已提交
1134 1135 1136 1137 1138 1139 1140
            # [[[[[0. 0. 0. 0.]
            #     [0. 1. 2. 3.]
            #     [0. 4. 5. 6.]
            #     [0. 0. 0. 0.]
            #     [0. 0. 0. 0.]]]]]
    """

L
littletomatodonkey 已提交
1141 1142 1143 1144 1145 1146 1147
    def __init__(self,
                 padding,
                 mode='constant',
                 value=0.0,
                 data_format="NCDHW",
                 name=None):
        super(Pad3D, self).__init__()
1148
        self._pad = _npairs(padding, 3)
L
littletomatodonkey 已提交
1149
        self._mode = mode
L
littletomatodonkey 已提交
1150 1151 1152 1153 1154 1155 1156 1157
        self._value = value
        self._data_format = data_format
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
L
littletomatodonkey 已提交
1158
                     value=self._value,
L
littletomatodonkey 已提交
1159 1160 1161
                     data_format=self._data_format,
                     name=self._name)

1162 1163 1164 1165 1166
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'padding={}, mode={}, value={}, data_format={}{}'.format(
            self._pad, self._mode, self._value, self._data_format, name_str)

L
littletomatodonkey 已提交
1167 1168 1169

class CosineSimilarity(layers.Layer):
    """
1170
    This interface is used to compute cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1171 1172

    Parameters:
1173
        axis (int): Dimension of vectors to compute cosine similarity. Default is 1.
L
littletomatodonkey 已提交
1174
        eps(float): Small value to avoid division by zero. Default is 1e-8.
1175
    Returns:
L
littletomatodonkey 已提交
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
        None

    Examples:
        .. code-block:: text

            Case 0:
                x1 = [[0.8024077  0.9927354  0.27238318 0.8344984 ]
                     [0.48949873 0.5797396  0.65444374 0.66510963]
                     [0.1031398  0.9614342  0.08365563 0.6796464 ]
                     [0.10760343 0.7461209  0.7726148  0.5801006 ]]
                x2 = [[0.62913156 0.1536727  0.9847992  0.04591406]
                     [0.9098952  0.15715368 0.8671125  0.3156102 ]
                     [0.4427798  0.54136837 0.5276275  0.32394758]
                     [0.3769419  0.8535014  0.48041078 0.9256797 ]]
1190
                axis = 1
L
littletomatodonkey 已提交
1191 1192 1193 1194 1195
                eps = 1e-8
                Out: [0.5275037  0.8368967  0.75037485 0.9245899]

    Code Examples:
        .. code-block:: python
1196

L
littletomatodonkey 已提交
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
            import paddle
            import paddle.nn as nn
            import numpy as np

            np.random.seed(0)
            x1 = np.random.rand(2,3)
            x2 = np.random.rand(2,3)
            x1 = paddle.to_tensor(x1)
            x2 = paddle.to_tensor(x2)

1207
            cos_sim_func = nn.CosineSimilarity(axis=0)
L
littletomatodonkey 已提交
1208
            result = cos_sim_func(x1, x2)
L
littletomatodonkey 已提交
1209
            print(result)
L
littletomatodonkey 已提交
1210 1211 1212
            # [0.99806249 0.9817672  0.94987036]
    """

1213
    def __init__(self, axis=1, eps=1e-8):
L
littletomatodonkey 已提交
1214
        super(CosineSimilarity, self).__init__()
1215
        self._axis = axis
L
littletomatodonkey 已提交
1216 1217 1218
        self._eps = eps

    def forward(self, x1, x2):
1219
        return F.cosine_similarity(x1, x2, axis=self._axis, eps=self._eps)
T
tangwei12 已提交
1220

1221 1222 1223
    def extra_repr(self):
        return 'axis={_axis}, eps={_eps}'.format(**self.__dict__)

T
tangwei12 已提交
1224 1225

class Embedding(layers.Layer):
1226
    r"""
T
tangwei12 已提交
1227 1228 1229 1230
    **Embedding Layer**

    This interface is used to construct a callable object of the ``Embedding`` class.
    For specific usage, refer to code examples. It implements the function of the Embedding Layer.
T
tangwei12 已提交
1231
    This layer is used to lookup embeddings vector of ids provided by :attr:`x` .
T
tangwei12 已提交
1232
    It automatically constructs a 2D embedding matrix based on the
T
tangwei12 已提交
1233
    input :attr:`num_embeddings` and :attr:`embedding_dim`.
T
tangwei12 已提交
1234 1235 1236 1237

    The shape of output Tensor is generated by appending an emb_size dimension to the
    last dimension of the input Tensor shape.

T
tangwei12 已提交
1238
    **Note:** The id in :attr:`x` must satisfy :math:`0 =< id < num_embeddings` ,
T
tangwei12 已提交
1239 1240 1241 1242 1243 1244
    otherwise the program will throw an exception and exit.

    .. code-block:: text

        Case 1:

T
tangwei12 已提交
1245 1246 1247
        x is a Tensor. padding_idx = -1
            x.data = [[1, 3], [2, 4], [4, 127]
            x.shape = [3, 2]
T
tangwei12 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
        Given size = [128, 16]
        output is a Tensor:
            out.shape = [3, 2, 16]
            out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654]],

                        [[0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365]],

                        [[0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]]  # padding data
        The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
        It will pad all-zero data when ids is 127.

    Parameters:
        num_embeddings (int): Just one element which indicate the size
            of the dictionary of embeddings.
T
tangwei12 已提交
1265
        embedding_dim (int):  Just one element which indicate the size of each embedding vector respectively.
T
tangwei12 已提交
1266
        padding_idx(int|long|None): padding_idx needs to be in the interval [-num_embeddings, num_embeddings).
T
tangwei12 已提交
1267 1268 1269 1270 1271 1272 1273
            If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
            to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
            encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
            If set None, it makes no effect to output. Default: None.
        sparse(bool): The flag indicating whether to use sparse update. This parameter only
            affects the performance of the backwards gradient update. It is recommended to set
            True because sparse update is faster. But some optimizer does not support sparse update,
T
tangwei12 已提交
1274
            such as :ref:`api_paddle_optimizer_adadelta_Adadelta` , :ref:`api_paddle_optimizer_adamax_Adamax` , :ref:`api_paddle_optimizer_lamb_Lamb`.
T
tangwei12 已提交
1275
            In these case, sparse must be False. Default: False.
T
tangwei12 已提交
1276
        weight_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
T
tangwei12 已提交
1277
            default weight parameter property is used. See usage for details in :ref:`api_ParamAttr` . In addition,
T
tangwei12 已提交
1278 1279
            user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
            The local word vector needs to be transformed into numpy format, and the shape of local word
T
tangwei12 已提交
1280 1281
            vector should be consistent with :attr:`num_embeddings` . Then :ref:`api_initializer_NumpyArrayInitializer`
            is used to load custom or pre-trained word vectors. See code example for details.
T
tangwei12 已提交
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
        name(str|None): For detailed information, please refer
               to :ref:`api_guide_Name`. Usually name is no need to set and
               None by default.

    Attribute:
        **weight** (Parameter): the learnable weights of this layer.

    Returns:
        None

    Examples:

        .. code-block:: python

T
tangwei12 已提交
1296 1297 1298 1299 1300
            import paddle
            import numpy as np

            x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
            y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
T
tangwei12 已提交
1301

T
tangwei12 已提交
1302 1303 1304 1305 1306 1307 1308
            x = paddle.to_tensor(x_data, stop_gradient=False)
            y = paddle.to_tensor(y_data, stop_gradient=False)

            embedding = paddle.nn.Embedding(10, 3, sparse=True)

            w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32)
            embedding.weight.set_value(w0)
T
tangwei12 已提交
1309

T
tangwei12 已提交
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
            adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01)
            adam.clear_grad()

            # weight.shape = [10, 3]

            # x.data = [[3],[4],[5]]
            # x.shape = [3, 1]

            # out.data = [[2,2,2], [2,2,2], [2,2,2]]
            # out.shape = [3, 1, 3]
            out=embedding(x)
            out.backward()
            adam.step()
T
tangwei12 已提交
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337

    """

    def __init__(self,
                 num_embeddings,
                 embedding_dim,
                 padding_idx=None,
                 sparse=False,
                 weight_attr=None,
                 name=None):
        super(Embedding, self).__init__()
        self._num_embeddings = num_embeddings
        self._embedding_dim = embedding_dim
        self._sparse = sparse
        self._is_distributed = False
1338
        self._padding_idx = padding_idx
T
tangwei12 已提交
1339 1340 1341 1342 1343 1344 1345

        if self._num_embeddings <= 0:
            raise ValueError("num_embeddings must be gather than 0")

        if self._embedding_dim <= 0:
            raise ValueError("embedding_dim must be gather than 0")

1346 1347 1348 1349
        padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
            num_embeddings + padding_idx)

        if padding_idx >= num_embeddings or padding_idx < -num_embeddings:
T
tangwei12 已提交
1350 1351 1352
            raise ValueError("padding_idx must be within [-{}, {})".format(
                num_embeddings, num_embeddings))

T
tangwei12 已提交
1353 1354 1355 1356 1357 1358
        self._dtype = self._helper.get_default_dtype()
        self._size = [self._num_embeddings, self._embedding_dim]

        self._weight_attr = weight_attr
        self._remote_prefetch = False
        self._name = name
T
tangwei12 已提交
1359
        self.weight = self.create_parameter(
T
tangwei12 已提交
1360 1361 1362 1363 1364
            attr=self._weight_attr,
            shape=self._size,
            dtype=self._dtype,
            is_bias=False)

T
tangwei12 已提交
1365 1366 1367
        if in_dygraph_mode() and padding_idx != -1:
            self.weight[padding_idx] = 0.0

T
tangwei12 已提交
1368 1369 1370
    def forward(self, x):
        return F.embedding(
            x,
T
tangwei12 已提交
1371
            weight=self.weight,
T
tangwei12 已提交
1372 1373 1374
            padding_idx=self._padding_idx,
            sparse=self._sparse,
            name=self._name)
1375 1376 1377 1378 1379 1380 1381 1382 1383

    def extra_repr(self):
        main_str = '{_num_embeddings}, {_embedding_dim}'
        if self._padding_idx is not None:
            main_str += ', padding_idx={_padding_idx}'
        main_str += ', sparse={_sparse}'
        if self._name is not None:
            main_str += ', name={_name}'
        return main_str.format(**self.__dict__)
F
FNRE 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453


class Unfold(layers.Layer):
    """
    This op returns a col buffer of sliding local blocks of input x, also known
    as im2col for batched 2D image tensors. For each block under the convolution filter,
    all element will be rearranged as a column. While the convolution filter sliding over
    the input feature map, a series of such columns will be formed.

    For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
    can be calculated as following.

    See ``paddle.nn.functional.unfold`` for more details.

    
    Parameters:
        kernel_sizes(int|list):   The size of convolution kernel, should be [k_h, k_w]
                                  or an integer k treated as [k, k].
        strides(int|list):        The strides, should be [stride_h, stride_w]
                                  or an integer stride treated as [sride, stride].
                                  For default, strides will be [1, 1].
        paddings(int|list):       The paddings of each dimension, should be
                                  [padding_top, padding_left, padding_bottom, padding_right]
                                  or [padding_h, padding_w] or an integer padding.
                                  If [padding_h, padding_w] was given, it will expanded to
                                  [padding_h, padding_w, padding_h, padding_w]. If an integer
                                  padding was given, [padding, padding, padding, padding] will
                                  be used. For default, paddings will be [0, 0, 0, 0]
        dilations(int|list):      the dilations of convolution kernel, should be
                                  [dilation_h, dilation_w], or an integer dilation treated as
                                  [dilation, dilation]. For default, it will be [1, 1].
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`


    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn

            x = paddle.randn((100,3,224,224))
            unfold = nn.Unfold(kernel_sizes=[3, 3])
            result = unfold(x)
            print(result)
   """

    def __init__(self,
                 kernel_sizes,
                 dilations=1,
                 paddings=0,
                 strides=1,
                 name=None):
        super(Unfold, self).__init__()

        self.kernel_sizes = kernel_sizes
        self.dilations = dilations
        self.paddings = paddings
        self.strides = strides
        self.name = name

    def forward(self, input):
        return F.unfold(input, self.kernel_sizes, self.dilations, self.paddings,
                        self.strides, self.name)

    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'kernel_size={}, dilation={}, padding={}, stride={}{}'.\
                format(self.kernel_sizes, self.dilations, self.paddings, self.strides, name_str)