common.py 58.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define the common classes to build a neural network
16
import paddle
Z
zhiboniu 已提交
17
from ...fluid.dygraph import Flatten  # noqa: F401
18
from ...fluid.dygraph import layers
T
tangwei12 已提交
19
from ...fluid.framework import in_dygraph_mode
20
from .. import functional as F
21
from ...fluid.framework import _dygraph_tracer
22

23

24 25 26 27 28 29 30
def _npairs(x, n):
    if isinstance(x, (paddle.Tensor, list)):
        return x
    x = [x] * (n * 2)
    return x


31
class Linear(layers.Layer):
32
    r"""
33 34 35

    Fully-connected linear transformation layer. For each input :math:`X` ,
    the equation is:
36 37 38

    .. math::

39
        Out = XW + b
40

41
    where :math:`W` is the weight and :math:`b` is the bias.
42

43 44 45 46 47 48 49
    Linear layer takes only one multi-dimensional tensor as input with the
    shape :math:`[batch\_size, *, in\_features]` , where :math:`*` means any
    number of additional dimensions. It multiplies input tensor with the weight
    (a 2-D tensor of shape :math:`[in\_features, out\_features]` ) and produces
    an output tensor of shape :math:`[batch\_size, *, out\_features]` .
    If :math:`bias\_attr` is not False, the bias (a 1-D tensor of
    shape :math:`[out\_features]` ) will be created and added to the output.
50 51

    Parameters:
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
        in_features (int): The number of input units.
        out_features (int): The number of output units.
        weight_attr (ParamAttr, optional): The attribute for the learnable
            weight of this layer. The default value is None and the weight will be
            initialized to zero. For detailed information, please refer to
            paddle.ParamAttr.
        bias_attr (ParamAttr|bool, optional): The attribute for the learnable bias
            of this layer. If it is set to False, no bias will be added to the output.
            If it is set to None or one kind of ParamAttr, a bias parameter will
            be created according to ParamAttr. For detailed information, please refer
            to paddle.ParamAttr. The default value is None and the bias will be
            initialized to zero.
        name (str, optional): Normally there is no need for user to set this parameter.
            For detailed information, please refer to :ref:`api_guide_Name` .

    Attribute:
        **weight** (Parameter): the learnable weight of this layer.

        **bias** (Parameter): the learnable bias of this layer.

    Shape:
        - input: Multi-dimentional tensor with shape :math:`[batch\_size, *, in\_features]` .
        - output: Multi-dimentional tensor with shape :math:`[batch\_size, *, out\_features]` .
75 76 77 78 79

    Examples:
        .. code-block:: python

          import paddle
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

          # Define the linear layer.
          weight_attr = paddle.ParamAttr(
              name="weight",
              initializer=paddle.nn.initializer.Constant(value=0.5))
          bias_attr = paddle.ParamAttr(
              name="bias",
              initializer=paddle.nn.initializer.Constant(value=1.0))
          linear = paddle.nn.Linear(2, 4, weight_attr=weight_attr, bias_attr=bias_attr)
          # linear.weight: [[0.5 0.5 0.5 0.5]
          #                 [0.5 0.5 0.5 0.5]]
          # linear.bias: [1. 1. 1. 1.]

          x = paddle.randn((3, 2), dtype="float32")
          # x: [[-0.32342386 -1.200079  ]
          #     [ 0.7979031  -0.90978354]
          #     [ 0.40597573  1.8095392 ]]
          y = linear(x)
          # y: [[0.23824859 0.23824859 0.23824859 0.23824859]
          #     [0.9440598  0.9440598  0.9440598  0.9440598 ]
          #     [2.1077576  2.1077576  2.1077576  2.1077576 ]]
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
    """

    def __init__(self,
                 in_features,
                 out_features,
                 weight_attr=None,
                 bias_attr=None,
                 name=None):
        super(Linear, self).__init__()
        self._dtype = self._helper.get_default_dtype()
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self.weight = self.create_parameter(
            shape=[in_features, out_features],
            attr=self._weight_attr,
            dtype=self._dtype,
            is_bias=False)
        self.bias = self.create_parameter(
            shape=[out_features],
            attr=self._bias_attr,
            dtype=self._dtype,
            is_bias=True)
        self.name = name

    def forward(self, input):
        out = F.linear(
            x=input, weight=self.weight, bias=self.bias, name=self.name)
        return out

130 131 132 133 134
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'in_features={}, out_features={}, dtype={}{}'.format(
            self.weight.shape[0], self.weight.shape[1], self._dtype, name_str)

135

136
class Upsample(layers.Layer):
137 138
    """
    This op resizes a batch of images.
139

140 141 142
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
143 144
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
145
    and the resizing only applies on the three dimensions(depth, height and width).
X
xiaoting 已提交
146

147
    Supporting resample methods:
148 149 150 151 152 153
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation

T
tangwei12 已提交
154 155 156
    Linear interpolation is the method of using a line connecting two known quantities
    to determine the value of an unknown quantity between the two known quantities.

157 158 159 160 161 162 163 164 165
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.
T
tangwei12 已提交
166

167 168 169 170
    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.
171 172 173 174 175

    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
    The linear interpolation is performed on three directions.
X
xiaoting 已提交
176
    align_corners and align_mode are optional parameters,the calculation method
177 178
    of interpolation can be selected by them.

179 180 181 182 183 184
    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

185 186 187 188
    Example:

    .. code-block:: text

189
        For scale_factor:
190 191 192 193 194
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)

195 196 197 198 199 200 201 202 203 204
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
205 206 207 208 209 210 211 212 213 214 215 216 217 218

        Nearest neighbor interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
          else:
              align_corners = True
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
T
tangwei12 已提交
219

220 221 222
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
223

224 225 226 227 228
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
229

230 231 232 233 234 235 236 237 238 239 240 241
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
242

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

264 265
    https://en.wikipedia.org/wiki/Linear_interpolation.
    For details of linear interpolation, please refer to Wikipedia:
T
tangwei12 已提交
266

267 268
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
T
tangwei12 已提交
269

270 271
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
T
tangwei12 已提交
272

273 274
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
T
tangwei12 已提交
275

276 277
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
T
tangwei12 已提交
278

279
    Parameters:
X
xiaoting 已提交
280
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
281
                          its data format is specified by :attr:`data_format`.
X
xiaoting 已提交
282
        size (list|tuple|Tensor|None): Output shape of image resize
283 284
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
285
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
286
             If a Tensor , its dimensions size should be a 1.
287 288 289
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`. Has to match input size if it is either a list or a tuple or a Tensor.
290
             Default: None.
291 292
        mode (str): The resample method. It supports 'linear', 'nearst', 'bilinear',
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
293 294 295
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
                               corner pixels.
296 297 298 299
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
300
        data_format (str, optional): Specify the data format of the input, and the data format of the output
301
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
302 303 304
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
305 306 307
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
308 309 310
    Returns:
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
311
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
312
    Raises:
X
xiaoting 已提交
313
        TypeError: size should be a list or tuple or Tensor.
314 315 316 317 318 319 320 321 322 323
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
                    'trilinear', 'bicubic', or 'nearest' currently.
        ValueError: 'linear' only support 3-D tensor.
        ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
324 325
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
326
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
327 328 329

    Examples:
        .. code-block:: python
C
Chen Long 已提交
330
            
331
            import paddle
X
xiaoting 已提交
332
            import paddle.nn as nn
333
            import numpy as np
X
xiaoting 已提交
334

335
            input_data = np.random.rand(2,3,6,10).astype("float32")
336
            upsample_out  = paddle.nn.Upsample(size=[12,12])
X
xiaoting 已提交
337 338 339 340 341 342

            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]

343 344 345
    """

    def __init__(self,
346 347 348 349
                 size=None,
                 scale_factor=None,
                 mode='nearest',
                 align_corners=False,
X
xiaoting 已提交
350 351 352
                 align_mode=0,
                 data_format='NCHW',
                 name=None):
353
        super(Upsample, self).__init__()
354 355 356
        self.size = size
        self.scale_factor = scale_factor
        self.mode = mode.lower()
357 358 359
        self.align_corners = align_corners
        self.align_mode = align_mode
        self.data_format = data_format
X
xiaoting 已提交
360
        self.name = name
361

X
xiaoting 已提交
362
    def forward(self, x):
363
        out = F.interpolate(
X
xiaoting 已提交
364
            x,
365 366 367
            size=self.size,
            scale_factor=self.scale_factor,
            mode=self.mode,
368 369
            align_corners=self.align_corners,
            align_mode=self.align_mode,
X
xiaoting 已提交
370 371
            data_format=self.data_format,
            name=self.name)
X
xiaoting 已提交
372 373 374

        return out

375 376 377 378 379 380 381 382 383 384
    def extra_repr(self):
        if self.scale_factor is not None:
            main_str = 'scale_factor={}'.format(self.scale_factor)
        else:
            main_str = 'size={}'.format(self.size)
        name_str = ', name={}'.format(self.name) if self.name else ''
        return '{}, mode={}, align_corners={}, align_mode={}, data_format={}{}'.format(
            main_str, self.mode, self.align_corners, self.align_mode,
            self.data_format, name_str)

X
xiaoting 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403

class UpsamplingNearest2D(layers.Layer):
    """
    This op upsamples a batch of images, using nearest neighbours' pixel values.
    The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w),
    where in_w is width of the input tensor, in_h is the height of the input tensor.
    And the upsampling only applies on the two dimensions(height and width).
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.

    Parameters:
        x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_h, out_w) when input is a 4-D Tensor.
404
             Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
X
xiaoting 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
             If a Tensor , its dimensions size should be a 1.
        scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.
             Has to match input size if it is either a list or a tuple or a Tensor.
             Default: None.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),


    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn

X
xiaoting 已提交
429
            input_data = paddle.rand(shape=(2,3,6,10)).astype("float32")
X
xiaoting 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
            upsample_out  = paddle.nn.UpsamplingNearest2D(size=[12,12])
            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]
    """

    def __init__(self,
                 size=None,
                 scale_factor=None,
                 data_format='NCHW',
                 name=None):
        super(UpsamplingNearest2D, self).__init__()
        self.size = size
        self.scale_factor = scale_factor
        self.data_format = data_format
        self.name = name

    def forward(self, x):
        out = F.interpolate(
            x,
            size=self.size,
            scale_factor=self.scale_factor,
            mode='nearest',
            align_corners=False,
            align_mode=0,
            data_format=self.data_format,
            name=self.name)

        return out

461 462 463 464 465 466 467 468 469
    def extra_repr(self):
        if self.scale_factor is not None:
            main_str = 'scale_factor={}'.format(self.scale_factor)
        else:
            main_str = 'size={}'.format(self.size)
        name_str = ', name={}'.format(self.name) if self.name else ''
        return '{}, data_format={}{}'.format(main_str, self.data_format,
                                             name_str)

X
xiaoting 已提交
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490

class UpsamplingBilinear2D(layers.Layer):
    """
    This op upsamples a batch of images, using bilinear' pixel values.
    The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w),
    where in_w is width of the input tensor, in_h is the height of the input tensor.
    And the upsampling only applies on the two dimensions(height and width).
    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.

    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.

    Parameters:
        x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_h, out_w) when input is a 4-D Tensor.
491
             Default: None. If a list/tuple, each element can be an integer or a Tensor  of shape: [1].
X
xiaoting 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
             If a Tensor , its dimensions size should be a 1.
        scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.
             Has to match input size if it is either a list or a tuple or a Tensor.
             Default: None.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn

X
xiaoting 已提交
515
            input_data = paddle.rand(shape=(2,3,6,10)).astype("float32")
X
xiaoting 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
            upsample_out  = paddle.nn.UpsamplingBilinear2D(size=[12,12])
            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]
    """

    def __init__(self,
                 size=None,
                 scale_factor=None,
                 data_format='NCHW',
                 name=None):
        super(UpsamplingBilinear2D, self).__init__()
        self.size = size
        self.scale_factor = scale_factor
        self.data_format = data_format
        self.name = name

    def forward(self, x):
        out = F.interpolate(
            x,
            size=self.size,
            scale_factor=self.scale_factor,
            mode='bilinear',
            align_corners=True,
            align_mode=0,
            data_format=self.data_format,
            name=self.name)
X
xiaoting 已提交
544 545 546

        return out

547 548 549 550 551 552 553 554 555
    def extra_repr(self):
        if self.scale_factor is not None:
            main_str = 'scale_factor={}'.format(self.scale_factor)
        else:
            main_str = 'size={}'.format(self.size)
        name_str = ', name={}'.format(self.name) if self.name else ''
        return '{}, data_format={}{}'.format(main_str, self.data_format,
                                             name_str)

X
xiaoting 已提交
556

557
class Bilinear(layers.Layer):
558
    r"""
559 560 561 562

    This layer performs bilinear on two inputs.

    .. math::
563

564
      out_{i} = x1 * W_{i} * {x2^\mathrm{T}}, i=0,1,...,outfeatures-1
565

566 567 568 569 570 571
      out = out + b

    In this formula:
     - :math:`x1`: the first input contains in1_features elements, shape is [batch_size, in1_features].
     - :math:`x2`: the second input contains in2_features elements, shape is [batch_size, in2_features].
     - :math:`W_{i}`: the i-th learned weight, shape is [in1_features, in2_features], and learned weight's shape is [out_features, in1_features, in2_features].
572
     - :math:`out_{i}`: the i-th element of out, shape is [batch_size], and out's shape is [batch_size, out_features].
573 574 575 576 577 578 579
     - :math:`b`: the learned bias, shape is [1, out_features].
     - :math:`x2^\mathrm{T}`: the transpose of :math:`x2`.

    Parameters:
       in1_features (int): The dimension of each first input(`x1`).
       in2_features (int): The dimension of each second input(`x2`).
       out_features (int): The dimension of output of this layer.
T
tangwei12 已提交
580
       weight_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
581 582 583
       this layer. The default value is None.
       bias_attr (ParamAttr, optional): The parameter attribute for the bias
           of this layer. If it is set to False, no bias will be added to the output units.
T
tangwei12 已提交
584
           If it is set to None, the bias is initialized zero. The default value is None.
585 586 587 588 589 590 591 592 593
       name (str, optional): The default value is None. Normally there is no need for user
           to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Attribute:
        **weight** (Parameter): the learnable weights of this layer.

        **bias** (Parameter): the learnable bias of this layer.

    Returns:
594
       Tensor: A 2-D Tensor of shape [batch_size, out_features].
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644

    Examples:
       .. code-block:: python

        import paddle
        import numpy

        layer1 = numpy.random.random((5, 5)).astype('float32')
        layer2 = numpy.random.random((5, 4)).astype('float32')
        bilinear = paddle.nn.Bilinear(
            in1_features=5, in2_features=4, out_features=1000)
        result = bilinear(paddle.to_tensor(layer1),
                        paddle.to_tensor(layer2))     # result shape [5, 1000]

    """

    def __init__(self,
                 in1_features,
                 in2_features,
                 out_features,
                 weight_attr=None,
                 bias_attr=None,
                 name=None):
        super(Bilinear, self).__init__()
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self._name = name
        self._in1_features = in1_features
        self._in2_features = in2_features
        self._out_features = out_features
        self._dtype = self._helper.get_default_dtype()

        weight_shape = [
            self._out_features, self._in1_features, self._in2_features
        ]
        self.weight = self.create_parameter(
            attr=self._weight_attr,
            shape=weight_shape,
            dtype=self._dtype,
            is_bias=False)
        bias_shape = [1, self._out_features]
        self.bias = self.create_parameter(
            attr=self._bias_attr,
            shape=bias_shape,
            dtype=self._dtype,
            is_bias=True)

    def forward(self, x1, x2):
        return F.bilinear(x1, x2, self.weight, self.bias, self._name)

645 646 647 648 649 650
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'in1_features={}, in2_features={}, out_features={}, dtype={}{}'.format(
            self._in1_features, self._in2_features, self._out_features,
            self._dtype, name_str)

651

652 653 654 655
class Dropout(layers.Layer):
    """
    Dropout is a regularization technique for reducing overfitting by preventing
    neuron co-adaption during training as described in the paper:
T
tangwei12 已提交
656
    `Improving neural networks by preventing co-adaptation of feature detectors <https://arxiv.org/abs/1207.0580>`_
657 658 659 660
    The dropout operator randomly sets the outputs of some units to zero, while upscale others
    according to the given dropout probability.

    See ``paddle.nn.functional.dropout`` for more details.
661 662

    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.
663 664

    Parameters:
665 666
        p (float|int): Probability of setting units to zero. Default: 0.5
        axis (int|list|tuple): The axis along which the dropout is performed. Default None.
667 668 669 670 671 672 673 674 675 676 677
        mode(str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
678
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
679 680 681 682 683

    Shape:
        - input: N-D tensor.
        - output: N-D tensor, the same shape as input.

684

685 686
    Examples:
        .. code-block:: python
687

688 689 690 691 692 693 694 695 696
            import paddle
            import numpy as np

            x = np.array([[1,2,3], [4,5,6]]).astype('float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.Dropout(p=0.5)
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
697 698 699
            print(x)
            print(y_train)
            print(y_test)
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
   """

    def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
        super(Dropout, self).__init__()

        self.p = p
        self.axis = axis
        self.mode = mode
        self.name = name

    def forward(self, input):
        out = F.dropout(
            input,
            p=self.p,
            axis=self.axis,
            training=self.training,
            mode=self.mode,
            name=self.name)
        return out

720 721 722 723 724
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}, axis={}, mode={}{}'.format(self.p, self.axis, self.mode,
                                                 name_str)

725

C
cnn 已提交
726
class Dropout2D(layers.Layer):
727 728 729 730
    """
    Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
    a channel is a 2D feature map with the shape `HW`). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.
C
cnn 已提交
731
    Dropout2D will help promote independence between feature maps as described in the paper:
T
tangwei12 已提交
732
    `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
733 734 735

    See ``paddle.nn.functional.dropout2d`` for more details.

736 737
    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

738 739
    Parameters:
        p (float, optional): Probability of setting units to zero. Default: 0.5
740
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCHW` or `NHWC`. The default is `NCHW`. When it is `NCHW`, the data is stored in the order of: [batch_size, input_channels, input_height, input_width].
741 742 743 744 745 746
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: 4-D tensor.
        - output: 4-D tensor, the same shape as input.

747

748 749
    Examples:
        .. code-block:: python
750

751 752 753 754 755
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
            x = paddle.to_tensor(x)
C
cnn 已提交
756
            m = paddle.nn.Dropout2D(p=0.5)
757 758 759
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
760 761 762
            print(x)
            print(y_train)
            print(y_test)
763 764 765
   """

    def __init__(self, p=0.5, data_format='NCHW', name=None):
C
cnn 已提交
766
        super(Dropout2D, self).__init__()
767 768 769 770 771 772 773 774 775 776 777 778 779 780

        self.p = p
        self.data_format = data_format
        self.name = name

    def forward(self, input):
        out = F.dropout2d(
            input,
            p=self.p,
            training=self.training,
            data_format=self.data_format,
            name=self.name)
        return out

781 782 783 784 785
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}, data_format={}{}'.format(self.p, self.data_format,
                                               name_str)

786

C
cnn 已提交
787
class Dropout3D(layers.Layer):
788 789 790 791
    """
    Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
    a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.
C
cnn 已提交
792
    Dropout3D will help promote independence between feature maps as described in the paper:
T
tangwei12 已提交
793
    `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
794 795 796

    See ``paddle.nn.functional.dropout3d`` for more details.

797 798
    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

799 800
    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
801
        data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCDHW` or `NDHWC`. The default is `NCDHW`. When it is `NCDHW`, the data is stored in the order of: [batch_size, input_channels, input_depth, input_height, input_width].
802 803 804 805 806 807
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: 5-D tensor.
        - output: 5-D tensor, the same shape as input.

808

809 810
    Examples:
        .. code-block:: python
811

812 813 814 815 816
            import paddle
            import numpy as np

            x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
            x = paddle.to_tensor(x)
C
cnn 已提交
817
            m = paddle.nn.Dropout3D(p=0.5)
818 819 820
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
821 822 823
            print(x)
            print(y_train)
            print(y_test)
824 825 826
   """

    def __init__(self, p=0.5, data_format='NCDHW', name=None):
C
cnn 已提交
827
        super(Dropout3D, self).__init__()
828 829 830 831 832 833 834 835 836 837 838 839 840 841

        self.p = p
        self.data_format = data_format
        self.name = name

    def forward(self, input):
        out = F.dropout3d(
            input,
            p=self.p,
            training=self.training,
            data_format=self.data_format,
            name=self.name)
        return out

842 843 844 845 846
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}, data_format={}{}'.format(self.p, self.data_format,
                                               name_str)

847

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
class AlphaDropout(layers.Layer):
    """
    Alpha Dropout is a type of Dropout that maintains the self-normalizing property. For an input with
    zero mean and unit standard deviation, the output of Alpha Dropout maintains the original mean and
    standard deviation of the input. Alpha Dropout fits well to SELU activate function by randomly setting
    activations to the negative saturation value.

    For more information, please refer to:
    `Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_

    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: N-D tensor.
        - output: N-D tensor, the same shape as input.

    Examples:
        .. code-block:: python
870

871 872 873 874 875 876 877 878 879
            import paddle
            import numpy as np

            x = np.array([[-1, 1], [-1, 1]]).astype('float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.AlphaDropout(p=0.5)
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
880 881
            print(x)
            print(y_train)
882
            # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
883
            print(y_test)
884 885 886 887 888 889 890 891 892 893 894 895
   """

    def __init__(self, p=0.5, name=None):
        super(AlphaDropout, self).__init__()
        self.p = p
        self.name = name

    def forward(self, input):
        out = F.alpha_dropout(
            input, p=self.p, training=self.training, name=self.name)
        return out

896 897 898 899
    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'p={}{}'.format(self.p, name_str)

900

L
littletomatodonkey 已提交
901
class Pad1D(layers.Layer):
L
littletomatodonkey 已提交
902
    """
L
littletomatodonkey 已提交
903 904 905
    This interface is used to construct a callable object of the ``Pad1D`` class.
    Pad tensor according to 'pad', 'mode' and 'value'.
    If mode is 'reflect', pad[0] and pad[1] must be no greater than width-1.
L
littletomatodonkey 已提交
906 907

    Parameters:
908 909
        padding (Tensor | List[int] | int): The padding size with data type int. If is int, use the
            same padding in both dimensions. Else [len(padding)/2] dimensions
L
littletomatodonkey 已提交
910
            of input will be padded. The pad has the form (pad_left, pad_right).
L
littletomatodonkey 已提交
911 912 913 914 915 916
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'.
L
littletomatodonkey 已提交
917 918 919 920 921
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data.
           Default is  "NCL"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
922 923

    Returns:
L
littletomatodonkey 已提交
924 925 926 927 928 929 930 931
        None

    Examples:
        .. code-block:: text

            x = [[[1., 2., 3.],
                  [4., 5., 6.]]]
            padding = [1, 2],
L
littletomatodonkey 已提交
932
            mode = "constant"
L
littletomatodonkey 已提交
933 934 935 936 937 938
            value = 0.0
            Out = [[[0. 1. 2. 3. 0. 0.]
                    [0. 4. 5. 6. 0. 0.]]]

    Code Examples:
        .. code-block:: python
939

L
littletomatodonkey 已提交
940 941 942 943 944 945
            import paddle
            import paddle.nn as nn
            import numpy as np

            input_shape = (1, 2, 3)
            pad = [1, 2]
L
littletomatodonkey 已提交
946 947 948
            mode = "constant"
            data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1
            my_pad = nn.Pad1D(padding=pad, mode=mode)
L
littletomatodonkey 已提交
949
            result = my_pad(data)
L
littletomatodonkey 已提交
950
            print(result)
L
littletomatodonkey 已提交
951 952 953 954
            # [[[0. 1. 2. 3. 0. 0.]
            #   [0. 4. 5. 6. 0. 0.]]]
    """

L
littletomatodonkey 已提交
955 956 957 958 959 960 961
    def __init__(self,
                 padding,
                 mode='constant',
                 value=0.0,
                 data_format="NCL",
                 name=None):
        super(Pad1D, self).__init__()
962
        self._pad = _npairs(padding, 1)
L
littletomatodonkey 已提交
963
        self._mode = mode
L
littletomatodonkey 已提交
964
        self._value = value
L
littletomatodonkey 已提交
965
        self._data_format = data_format
L
littletomatodonkey 已提交
966 967 968 969 970 971 972 973 974 975
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     value=self._value,
                     data_format=self._data_format,
                     name=self._name)

976 977 978 979 980
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'padding={}, mode={}, value={}, data_format={}{}'.format(
            self._pad, self._mode, self._value, self._data_format, name_str)

L
littletomatodonkey 已提交
981

L
littletomatodonkey 已提交
982
class Pad2D(layers.Layer):
L
littletomatodonkey 已提交
983
    """
L
littletomatodonkey 已提交
984 985 986 987
    This interface is used to construct a callable object of the ``Pad2D`` class.
    Pad tensor according to 'pad', 'mode' and 'value'.
    If mode is 'reflect', pad[0] and pad[1] must be no greater
    than width-1. The height dimension has the same condition.
L
littletomatodonkey 已提交
988 989

    Parameters:
990 991 992
        padding (Tensor | List[int] | int): The padding size with data type int. If is int, use the
            same padding in all dimensions. Else [len(padding)/2] dimensions of input will be padded. 
            The pad has the form (pad_left, pad_right, pad_top, pad_bottom). 
L
littletomatodonkey 已提交
993 994 995 996 997 998
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'.
L
littletomatodonkey 已提交
999 1000 1001 1002 1003
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1004 1005

    Returns:
L
littletomatodonkey 已提交
1006 1007 1008 1009 1010 1011 1012 1013
        None

    Examples:
        .. code-block:: text

            x = [[[[1., 2., 3.],
                   [4., 5., 6.]]]]
            padding = [1, 1, 0, 0]
L
littletomatodonkey 已提交
1014
            mode = "constant"
L
littletomatodonkey 已提交
1015 1016 1017 1018 1019 1020
            value = 0.0
            Out = [[[[0. 1. 2. 3. 0.]
                     [0. 4. 5. 6. 0.]]]]

    Code Examples:
        .. code-block:: python
1021

L
littletomatodonkey 已提交
1022 1023 1024 1025 1026
            import paddle
            import paddle.nn as nn
            import numpy as np
            input_shape = (1, 1, 2, 3)
            pad = [1, 0, 1, 2]
L
littletomatodonkey 已提交
1027 1028 1029
            mode = "constant"
            data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1
            my_pad = nn.Pad2D(padding=pad, mode=mode)
L
littletomatodonkey 已提交
1030
            result = my_pad(data)
L
littletomatodonkey 已提交
1031
            print(result)
L
littletomatodonkey 已提交
1032 1033 1034 1035 1036 1037 1038
            # [[[[0. 0. 0. 0.]
            #    [0. 1. 2. 3.]
            #    [0. 4. 5. 6.]
            #    [0. 0. 0. 0.]
            #    [0. 0. 0. 0.]]]]
    """

L
littletomatodonkey 已提交
1039 1040 1041 1042 1043 1044 1045
    def __init__(self,
                 padding,
                 mode='constant',
                 value=0.0,
                 data_format="NCHW",
                 name=None):
        super(Pad2D, self).__init__()
1046
        self._pad = _npairs(padding, 2)
L
littletomatodonkey 已提交
1047
        self._mode = mode
L
littletomatodonkey 已提交
1048 1049 1050 1051 1052 1053 1054 1055
        self._value = value
        self._data_format = data_format
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
L
littletomatodonkey 已提交
1056
                     value=self._value,
L
littletomatodonkey 已提交
1057 1058 1059
                     data_format=self._data_format,
                     name=self._name)

1060 1061 1062 1063 1064
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'padding={}, mode={}, value={}, data_format={}{}'.format(
            self._pad, self._mode, self._value, self._data_format, name_str)

L
littletomatodonkey 已提交
1065

L
littletomatodonkey 已提交
1066
class Pad3D(layers.Layer):
L
littletomatodonkey 已提交
1067
    """
L
littletomatodonkey 已提交
1068 1069 1070 1071
    This interface is used to construct a callable object of the ``Pad3D`` class.
    Pad tensor according to 'pad', 'mode' and 'value'.
    If mode is 'reflect', pad[0] and pad[1] must be no greater
    than width-1. The height and depth dimension has the same condition.
L
littletomatodonkey 已提交
1072 1073

    Parameters:
1074 1075
        padding (Tensor | List[int] | int): The padding size with data type int. If is int, use the
            same padding in all dimensions. Else [len(padding)/2] dimensions
L
littletomatodonkey 已提交
1076
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
L
littletomatodonkey 已提交
1077 1078 1079 1080 1081 1082
        mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
            When in 'constant' mode, this op uses a constant value to pad the input tensor.
            When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
            When in 'replicate' mode, uses input boundaries to pad the input tensor.
            When in 'circular' mode, uses circular input to pad the input tensor.
            Default is 'constant'.
L
littletomatodonkey 已提交
1083 1084 1085 1086 1087
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data.
           Default is  "NCDHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1088 1089

    Returns:
L
littletomatodonkey 已提交
1090 1091 1092 1093 1094 1095 1096 1097
        None

    Examples:
        .. code-block:: text

            x = [[[[[1., 2., 3.],
                    [4., 5., 6.]]]]]
            padding = [1, 2, 0, 0, 0, 0]
L
littletomatodonkey 已提交
1098
            mode = "constant"
L
littletomatodonkey 已提交
1099 1100 1101 1102 1103 1104
            value = 0.0
            Out = [[[[[0. 1. 2. 3. 0. 0.]
                      [0. 4. 5. 6. 0. 0.]]]]]

    Code Examples:
        .. code-block:: python
1105

L
littletomatodonkey 已提交
1106 1107 1108 1109 1110
            import paddle
            import paddle.nn as nn
            import numpy as np
            input_shape = (1, 1, 1, 2, 3)
            pad = [1, 0, 1, 2, 0, 0]
L
littletomatodonkey 已提交
1111 1112 1113
            mode = "constant"
            data = paddle.arange(np.prod(input_shape), dtype="float32").reshape(input_shape) + 1
            my_pad = nn.Pad3D(padding=pad, mode=mode)
L
littletomatodonkey 已提交
1114
            result = my_pad(data)
L
littletomatodonkey 已提交
1115
            print(result)
L
littletomatodonkey 已提交
1116 1117 1118 1119 1120 1121 1122
            # [[[[[0. 0. 0. 0.]
            #     [0. 1. 2. 3.]
            #     [0. 4. 5. 6.]
            #     [0. 0. 0. 0.]
            #     [0. 0. 0. 0.]]]]]
    """

L
littletomatodonkey 已提交
1123 1124 1125 1126 1127 1128 1129
    def __init__(self,
                 padding,
                 mode='constant',
                 value=0.0,
                 data_format="NCDHW",
                 name=None):
        super(Pad3D, self).__init__()
1130
        self._pad = _npairs(padding, 3)
L
littletomatodonkey 已提交
1131
        self._mode = mode
L
littletomatodonkey 已提交
1132 1133 1134 1135 1136 1137 1138 1139
        self._value = value
        self._data_format = data_format
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
L
littletomatodonkey 已提交
1140
                     value=self._value,
L
littletomatodonkey 已提交
1141 1142 1143
                     data_format=self._data_format,
                     name=self._name)

1144 1145 1146 1147 1148
    def extra_repr(self):
        name_str = ', name={}'.format(self._name) if self._name else ''
        return 'padding={}, mode={}, value={}, data_format={}{}'.format(
            self._pad, self._mode, self._value, self._data_format, name_str)

L
littletomatodonkey 已提交
1149 1150 1151

class CosineSimilarity(layers.Layer):
    """
1152
    This interface is used to compute cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1153 1154

    Parameters:
1155
        axis (int): Dimension of vectors to compute cosine similarity. Default is 1.
L
littletomatodonkey 已提交
1156
        eps(float): Small value to avoid division by zero. Default is 1e-8.
1157
    Returns:
L
littletomatodonkey 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
        None

    Examples:
        .. code-block:: text

            Case 0:
                x1 = [[0.8024077  0.9927354  0.27238318 0.8344984 ]
                     [0.48949873 0.5797396  0.65444374 0.66510963]
                     [0.1031398  0.9614342  0.08365563 0.6796464 ]
                     [0.10760343 0.7461209  0.7726148  0.5801006 ]]
                x2 = [[0.62913156 0.1536727  0.9847992  0.04591406]
                     [0.9098952  0.15715368 0.8671125  0.3156102 ]
                     [0.4427798  0.54136837 0.5276275  0.32394758]
                     [0.3769419  0.8535014  0.48041078 0.9256797 ]]
1172
                axis = 1
L
littletomatodonkey 已提交
1173 1174 1175 1176 1177
                eps = 1e-8
                Out: [0.5275037  0.8368967  0.75037485 0.9245899]

    Code Examples:
        .. code-block:: python
1178

L
littletomatodonkey 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
            import paddle
            import paddle.nn as nn
            import numpy as np

            np.random.seed(0)
            x1 = np.random.rand(2,3)
            x2 = np.random.rand(2,3)
            x1 = paddle.to_tensor(x1)
            x2 = paddle.to_tensor(x2)

1189
            cos_sim_func = nn.CosineSimilarity(axis=0)
L
littletomatodonkey 已提交
1190
            result = cos_sim_func(x1, x2)
L
littletomatodonkey 已提交
1191
            print(result)
L
littletomatodonkey 已提交
1192 1193 1194
            # [0.99806249 0.9817672  0.94987036]
    """

1195
    def __init__(self, axis=1, eps=1e-8):
L
littletomatodonkey 已提交
1196
        super(CosineSimilarity, self).__init__()
1197
        self._axis = axis
L
littletomatodonkey 已提交
1198 1199 1200
        self._eps = eps

    def forward(self, x1, x2):
1201
        return F.cosine_similarity(x1, x2, axis=self._axis, eps=self._eps)
T
tangwei12 已提交
1202

1203 1204 1205
    def extra_repr(self):
        return 'axis={_axis}, eps={_eps}'.format(**self.__dict__)

T
tangwei12 已提交
1206 1207

class Embedding(layers.Layer):
1208
    r"""
T
tangwei12 已提交
1209 1210 1211 1212
    **Embedding Layer**

    This interface is used to construct a callable object of the ``Embedding`` class.
    For specific usage, refer to code examples. It implements the function of the Embedding Layer.
T
tangwei12 已提交
1213
    This layer is used to lookup embeddings vector of ids provided by :attr:`x` .
T
tangwei12 已提交
1214
    It automatically constructs a 2D embedding matrix based on the
T
tangwei12 已提交
1215
    input :attr:`num_embeddings` and :attr:`embedding_dim`.
T
tangwei12 已提交
1216 1217 1218 1219

    The shape of output Tensor is generated by appending an emb_size dimension to the
    last dimension of the input Tensor shape.

T
tangwei12 已提交
1220
    **Note:** The id in :attr:`x` must satisfy :math:`0 =< id < num_embeddings` ,
T
tangwei12 已提交
1221 1222 1223 1224 1225 1226
    otherwise the program will throw an exception and exit.

    .. code-block:: text

        Case 1:

T
tangwei12 已提交
1227 1228 1229
        x is a Tensor. padding_idx = -1
            x.data = [[1, 3], [2, 4], [4, 127]
            x.shape = [3, 2]
T
tangwei12 已提交
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
        Given size = [128, 16]
        output is a Tensor:
            out.shape = [3, 2, 16]
            out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654]],

                        [[0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365]],

                        [[0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]]  # padding data
        The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
        It will pad all-zero data when ids is 127.

    Parameters:
        num_embeddings (int): Just one element which indicate the size
            of the dictionary of embeddings.
T
tangwei12 已提交
1247
        embedding_dim (int):  Just one element which indicate the size of each embedding vector respectively.
T
tangwei12 已提交
1248
        padding_idx(int|long|None): padding_idx needs to be in the interval [-num_embeddings, num_embeddings).
T
tangwei12 已提交
1249 1250 1251 1252 1253 1254 1255
            If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
            to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
            encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
            If set None, it makes no effect to output. Default: None.
        sparse(bool): The flag indicating whether to use sparse update. This parameter only
            affects the performance of the backwards gradient update. It is recommended to set
            True because sparse update is faster. But some optimizer does not support sparse update,
T
tangwei12 已提交
1256
            such as :ref:`api_paddle_optimizer_adadelta_Adadelta` , :ref:`api_paddle_optimizer_adamax_Adamax` , :ref:`api_paddle_optimizer_lamb_Lamb`.
T
tangwei12 已提交
1257
            In these case, sparse must be False. Default: False.
T
tangwei12 已提交
1258
        weight_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
T
tangwei12 已提交
1259
            default weight parameter property is used. See usage for details in :ref:`api_ParamAttr` . In addition,
T
tangwei12 已提交
1260 1261
            user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
            The local word vector needs to be transformed into numpy format, and the shape of local word
T
tangwei12 已提交
1262 1263
            vector should be consistent with :attr:`num_embeddings` . Then :ref:`api_initializer_NumpyArrayInitializer`
            is used to load custom or pre-trained word vectors. See code example for details.
T
tangwei12 已提交
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
        name(str|None): For detailed information, please refer
               to :ref:`api_guide_Name`. Usually name is no need to set and
               None by default.

    Attribute:
        **weight** (Parameter): the learnable weights of this layer.

    Returns:
        None

    Examples:

        .. code-block:: python

T
tangwei12 已提交
1278 1279 1280 1281 1282
            import paddle
            import numpy as np

            x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
            y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
T
tangwei12 已提交
1283

T
tangwei12 已提交
1284 1285 1286 1287 1288 1289 1290
            x = paddle.to_tensor(x_data, stop_gradient=False)
            y = paddle.to_tensor(y_data, stop_gradient=False)

            embedding = paddle.nn.Embedding(10, 3, sparse=True)

            w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32)
            embedding.weight.set_value(w0)
T
tangwei12 已提交
1291

T
tangwei12 已提交
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
            adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01)
            adam.clear_grad()

            # weight.shape = [10, 3]

            # x.data = [[3],[4],[5]]
            # x.shape = [3, 1]

            # out.data = [[2,2,2], [2,2,2], [2,2,2]]
            # out.shape = [3, 1, 3]
            out=embedding(x)
            out.backward()
            adam.step()
T
tangwei12 已提交
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

    """

    def __init__(self,
                 num_embeddings,
                 embedding_dim,
                 padding_idx=None,
                 sparse=False,
                 weight_attr=None,
                 name=None):
        super(Embedding, self).__init__()
        self._num_embeddings = num_embeddings
        self._embedding_dim = embedding_dim
        self._sparse = sparse
        self._is_distributed = False
1320
        self._padding_idx = padding_idx
T
tangwei12 已提交
1321 1322 1323 1324 1325 1326 1327

        if self._num_embeddings <= 0:
            raise ValueError("num_embeddings must be gather than 0")

        if self._embedding_dim <= 0:
            raise ValueError("embedding_dim must be gather than 0")

1328 1329 1330 1331
        padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
            num_embeddings + padding_idx)

        if padding_idx >= num_embeddings or padding_idx < -num_embeddings:
T
tangwei12 已提交
1332 1333 1334
            raise ValueError("padding_idx must be within [-{}, {})".format(
                num_embeddings, num_embeddings))

T
tangwei12 已提交
1335 1336 1337 1338 1339 1340
        self._dtype = self._helper.get_default_dtype()
        self._size = [self._num_embeddings, self._embedding_dim]

        self._weight_attr = weight_attr
        self._remote_prefetch = False
        self._name = name
T
tangwei12 已提交
1341
        self.weight = self.create_parameter(
T
tangwei12 已提交
1342 1343 1344 1345 1346
            attr=self._weight_attr,
            shape=self._size,
            dtype=self._dtype,
            is_bias=False)

T
tangwei12 已提交
1347 1348 1349
        if in_dygraph_mode() and padding_idx != -1:
            self.weight[padding_idx] = 0.0

T
tangwei12 已提交
1350 1351 1352
    def forward(self, x):
        return F.embedding(
            x,
T
tangwei12 已提交
1353
            weight=self.weight,
T
tangwei12 已提交
1354 1355 1356
            padding_idx=self._padding_idx,
            sparse=self._sparse,
            name=self._name)
1357 1358 1359 1360 1361 1362 1363 1364 1365

    def extra_repr(self):
        main_str = '{_num_embeddings}, {_embedding_dim}'
        if self._padding_idx is not None:
            main_str += ', padding_idx={_padding_idx}'
        main_str += ', sparse={_sparse}'
        if self._name is not None:
            main_str += ', name={_name}'
        return main_str.format(**self.__dict__)
F
FNRE 已提交
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435


class Unfold(layers.Layer):
    """
    This op returns a col buffer of sliding local blocks of input x, also known
    as im2col for batched 2D image tensors. For each block under the convolution filter,
    all element will be rearranged as a column. While the convolution filter sliding over
    the input feature map, a series of such columns will be formed.

    For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
    can be calculated as following.

    See ``paddle.nn.functional.unfold`` for more details.

    
    Parameters:
        kernel_sizes(int|list):   The size of convolution kernel, should be [k_h, k_w]
                                  or an integer k treated as [k, k].
        strides(int|list):        The strides, should be [stride_h, stride_w]
                                  or an integer stride treated as [sride, stride].
                                  For default, strides will be [1, 1].
        paddings(int|list):       The paddings of each dimension, should be
                                  [padding_top, padding_left, padding_bottom, padding_right]
                                  or [padding_h, padding_w] or an integer padding.
                                  If [padding_h, padding_w] was given, it will expanded to
                                  [padding_h, padding_w, padding_h, padding_w]. If an integer
                                  padding was given, [padding, padding, padding, padding] will
                                  be used. For default, paddings will be [0, 0, 0, 0]
        dilations(int|list):      the dilations of convolution kernel, should be
                                  [dilation_h, dilation_w], or an integer dilation treated as
                                  [dilation, dilation]. For default, it will be [1, 1].
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`


    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn

            x = paddle.randn((100,3,224,224))
            unfold = nn.Unfold(kernel_sizes=[3, 3])
            result = unfold(x)
            print(result)
   """

    def __init__(self,
                 kernel_sizes,
                 dilations=1,
                 paddings=0,
                 strides=1,
                 name=None):
        super(Unfold, self).__init__()

        self.kernel_sizes = kernel_sizes
        self.dilations = dilations
        self.paddings = paddings
        self.strides = strides
        self.name = name

    def forward(self, input):
        return F.unfold(input, self.kernel_sizes, self.dilations, self.paddings,
                        self.strides, self.name)

    def extra_repr(self):
        name_str = ', name={}'.format(self.name) if self.name else ''
        return 'kernel_size={}, dilation={}, padding={}, stride={}{}'.\
                format(self.kernel_sizes, self.dilations, self.paddings, self.strides, name_str)