common.py 66.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define the common classes to build a neural network
16 17
from ...fluid.dygraph import BilinearTensorProduct  #DEFINE_ALIAS
from ...fluid.dygraph import Pool2D  #DEFINE_ALIAS
18
from ...fluid.dygraph import Flatten  #DEFINE_ALIAS
19 20
from ...fluid.dygraph import layers
from .. import functional as F
21
from ...fluid.framework import _dygraph_tracer
22

C
ceci3 已提交
23
__all__ = [
24 25 26 27
    'BilinearTensorProduct',
    'Pool2D',
    'Embedding',
    'Linear',
28
    'Upsample',
29
    'Pad2D',
X
xiaoting 已提交
30 31
    'UpsamplingNearest2d',
    'UpsamplingBilinear2d',
32 33 34 35 36 37 38 39 40 41 42
    'ReflectionPad1d',
    'ReplicationPad1d',
    'ConstantPad1d',
    'ReflectionPad2d',
    'ReplicationPad2d',
    'ConstantPad2d',
    'ZeroPad2d',
    'ConstantPad3d',
    'ReplicationPad3d',
    'CosineSimilarity',
    'Dropout',
43 44
    'Dropout2d',
    'Dropout3d',
45 46
    'Bilinear',
    'AlphaDropout',
C
ceci3 已提交
47
]
48 49


50 51
class Linear(layers.Layer):
    """
52 53 54

    Fully-connected linear transformation layer. For each input :math:`X` ,
    the equation is:
55 56 57

    .. math::

58
        Out = XW + b
59

60
    where :math:`W` is the weight and :math:`b` is the bias.
61

62 63 64 65 66 67 68
    Linear layer takes only one multi-dimensional tensor as input with the
    shape :math:`[batch\_size, *, in\_features]` , where :math:`*` means any
    number of additional dimensions. It multiplies input tensor with the weight
    (a 2-D tensor of shape :math:`[in\_features, out\_features]` ) and produces
    an output tensor of shape :math:`[batch\_size, *, out\_features]` .
    If :math:`bias\_attr` is not False, the bias (a 1-D tensor of
    shape :math:`[out\_features]` ) will be created and added to the output.
69 70

    Parameters:
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
        in_features (int): The number of input units.
        out_features (int): The number of output units.
        weight_attr (ParamAttr, optional): The attribute for the learnable
            weight of this layer. The default value is None and the weight will be
            initialized to zero. For detailed information, please refer to
            paddle.ParamAttr.
        bias_attr (ParamAttr|bool, optional): The attribute for the learnable bias
            of this layer. If it is set to False, no bias will be added to the output.
            If it is set to None or one kind of ParamAttr, a bias parameter will
            be created according to ParamAttr. For detailed information, please refer
            to paddle.ParamAttr. The default value is None and the bias will be
            initialized to zero.
        name (str, optional): Normally there is no need for user to set this parameter.
            For detailed information, please refer to :ref:`api_guide_Name` .

    Attribute:
        **weight** (Parameter): the learnable weight of this layer.

        **bias** (Parameter): the learnable bias of this layer.

    Shape:
        - input: Multi-dimentional tensor with shape :math:`[batch\_size, *, in\_features]` .
        - output: Multi-dimentional tensor with shape :math:`[batch\_size, *, out\_features]` .
94 95 96 97 98

    Examples:
        .. code-block:: python

          import paddle
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119

          # Define the linear layer.
          weight_attr = paddle.ParamAttr(
              name="weight",
              initializer=paddle.nn.initializer.Constant(value=0.5))
          bias_attr = paddle.ParamAttr(
              name="bias",
              initializer=paddle.nn.initializer.Constant(value=1.0))
          linear = paddle.nn.Linear(2, 4, weight_attr=weight_attr, bias_attr=bias_attr)
          # linear.weight: [[0.5 0.5 0.5 0.5]
          #                 [0.5 0.5 0.5 0.5]]
          # linear.bias: [1. 1. 1. 1.]

          x = paddle.randn((3, 2), dtype="float32")
          # x: [[-0.32342386 -1.200079  ]
          #     [ 0.7979031  -0.90978354]
          #     [ 0.40597573  1.8095392 ]]
          y = linear(x)
          # y: [[0.23824859 0.23824859 0.23824859 0.23824859]
          #     [0.9440598  0.9440598  0.9440598  0.9440598 ]
          #     [2.1077576  2.1077576  2.1077576  2.1077576 ]]
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    """

    def __init__(self,
                 in_features,
                 out_features,
                 weight_attr=None,
                 bias_attr=None,
                 name=None):
        super(Linear, self).__init__()
        self._dtype = self._helper.get_default_dtype()
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self.name = name
        self.weight = self.create_parameter(
            shape=[in_features, out_features],
            attr=self._weight_attr,
            dtype=self._dtype,
            is_bias=False)
        self.bias = self.create_parameter(
            shape=[out_features],
            attr=self._bias_attr,
            dtype=self._dtype,
            is_bias=True)
        self.name = name

    def forward(self, input):
        out = F.linear(
            x=input, weight=self.weight, bias=self.bias, name=self.name)
        return out


151
class Upsample(layers.Layer):
152 153
    """
    This op resizes a batch of images.
154

155 156 157
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
158 159
    Where in_w is width of the input tensor, in_h is the height of the input tensor,
    in_d is the depth of the intput tensor.
160
    and the resizing only applies on the three dimensions(depth, height and width).
X
xiaoting 已提交
161

162
    Supporting resample methods:
163 164 165 166 167 168
        'linear' : Linear interpolation
        'bilinear' : Bilinear interpolation
        'trilinear' : Trilinear interpolation
        'nearest' : Nearest neighbor interpolation
        'bicubic' : Bicubic interpolation

T
tangwei12 已提交
169 170 171
    Linear interpolation is the method of using a line connecting two known quantities
    to determine the value of an unknown quantity between the two known quantities.

172 173 174 175 176 177 178 179 180
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.
T
tangwei12 已提交
181

182 183 184 185
    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.
186 187 188 189 190

    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
    The linear interpolation is performed on three directions.
X
xiaoting 已提交
191
    align_corners and align_mode are optional parameters,the calculation method
192 193
    of interpolation can be selected by them.

194 195 196 197 198 199
    Area interpolation is to perform area interpolation
    in both the 3rd dimension(in height direction) , the 4th dimension(in width
    direction) and the 5th dimension(in depth direction) on input tensor. Set to
    area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
    `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.

200 201 202 203
    Example:

    .. code-block:: text

204
        For scale_factor:
205 206 207 208 209
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
            else:
              scale_factor = float(in_size/out_size)

210 211 212 213 214 215 216 217 218 219
        Linear interpolation:
            if:
                align_corners = False , align_mode = 0
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = (W_{in}+0.5) * scale_{factor} - 0.5
            else:
                input : (N,C,W_in)
                output: (N,C,W_out) where:
                W_out = W_{in} * scale_{factor}
220 221 222 223 224 225 226 227 228 229 230 231 232 233

        Nearest neighbor interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
          else:
              align_corners = True
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
T
tangwei12 已提交
234

235 236 237
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0
238

239 240 241 242 243
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
244

245 246 247 248 249 250 251 252 253 254 255 256
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Bicubic interpolation:
          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
257

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
          else:
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

279 280
    https://en.wikipedia.org/wiki/Linear_interpolation.
    For details of linear interpolation, please refer to Wikipedia:
T
tangwei12 已提交
281

282 283
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
T
tangwei12 已提交
284

285 286
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
T
tangwei12 已提交
287

288 289
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
T
tangwei12 已提交
290

291 292
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
T
tangwei12 已提交
293

294
    Parameters:
X
xiaoting 已提交
295
        x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
296
                          its data format is specified by :attr:`data_format`.
X
xiaoting 已提交
297
        size (list|tuple|Tensor|None): Output shape of image resize
298 299 300
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
             Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
301
             If a Tensor Variable, its dimensions size should be a 1.
302 303 304
        scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`. Has to match input size if it is either a list or a tuple or a Tensor.
305
             Default: None.
306 307
        mode (str): The resample method. It supports 'linear', 'nearst', 'bilinear',
                       'bicubic' and 'trilinear' currently. Default: 'nearest'
308 309 310
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
                               corner pixels.
311 312 313 314
                               Default: False
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
                            it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale_factor*dst_index.
315
        data_format (str, optional): Specify the data format of the input, and the data format of the output
316
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
317 318 319
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
320 321 322
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
323 324 325
    Returns:
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
326
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
327
    Raises:
X
xiaoting 已提交
328
        TypeError: size should be a list or tuple or Tensor.
329 330 331 332 333 334 335 336 337 338
        ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
                    'trilinear', 'bicubic', or 'nearest' currently.
        ValueError: 'linear' only support 3-D tensor.
        ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
        ValueError: 'trilinear' only support 5-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 1 for input 3-D tensor.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: size length should be 3 for input 5-D tensor.
        ValueError: scale_factor should be greater than zero.
339 340
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
341
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
342 343 344 345

    Examples:
        .. code-block:: python
            import paddle
X
xiaoting 已提交
346
            import paddle.nn as nn
347
            import numpy as np
X
xiaoting 已提交
348 349
            paddle.disable_static()

350
            input_data = np.random.rand(2,3,6,10).astype("float32")
351
            upsample_out  = paddle.nn.Upsample(size=[12,12])
X
xiaoting 已提交
352 353 354 355 356 357

            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]

358 359 360
    """

    def __init__(self,
361 362 363 364
                 size=None,
                 scale_factor=None,
                 mode='nearest',
                 align_corners=False,
X
xiaoting 已提交
365 366 367
                 align_mode=0,
                 data_format='NCHW',
                 name=None):
368
        super(Upsample, self).__init__()
369 370 371
        self.size = size
        self.scale_factor = scale_factor
        self.mode = mode.lower()
372 373 374
        self.align_corners = align_corners
        self.align_mode = align_mode
        self.data_format = data_format
X
xiaoting 已提交
375
        self.name = name
376

X
xiaoting 已提交
377
    def forward(self, x):
378
        out = F.interpolate(
X
xiaoting 已提交
379
            x,
380 381 382
            size=self.size,
            scale_factor=self.scale_factor,
            mode=self.mode,
383 384
            align_corners=self.align_corners,
            align_mode=self.align_mode,
X
xiaoting 已提交
385 386 387 388 389 390 391 392 393 394
            data_format=self.data_format,
            name=self.name)

        return out


class UpsamplingNearest2d(layers.Layer):
    """
    This op upsamples a batch of images, using nearest neighbours' pixel values.
    The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w), 
395 396
    where in_w is width of the input tensor, in_h is the height of the input tensor.
    And the upsampling only applies on the two dimensions(height and width).
X
xiaoting 已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410

    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.
    
    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
    
        x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_h, out_w) when input is a 4-D Tensor. 
             Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
             If a Tensor Variable, its dimensions size should be a 1.
411 412 413 414 415
        scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.
             Has to match input size if it is either a list or a tuple or a Tensor.
             Default: None.
X
xiaoting 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
    Raises:
        TypeError: size should be a list or tuple or Tensor.
        ValueError: 'nearest' only support 4-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: scale_factor should be greater than zero.
        ValueError: data_format can only be 'NCHW', 'NHWC'.
    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_data = np.random.rand(2,3,6,10).astype("float32")
            upsample_out  = paddle.nn.UpsamplingNearest2d(size=[12,12])

            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]

    """

    def __init__(self,
                 size=None,
                 scale_factor=None,
                 data_format='NCHW',
                 name=None):
        super(UpsamplingNearest2d, self).__init__()
        self.size = size
        self.scale_factor = scale_factor
        self.data_format = data_format
        self.name = name

    def forward(self, x):
        out = F.interpolate(
            x,
            size=self.size,
            scale_factor=self.scale_factor,
            mode='nearest',
            align_corners=False,
            align_mode=0,
            data_format=self.data_format,
            name=self.name)

        return out


class UpsamplingBilinear2d(layers.Layer):
    """
    This op upsamples a batch of images, using bilinear' pixel values.
    The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w), 
480 481
    where in_w is width of the input tensor, in_h is the height of the input tensor.
    And the upsampling only applies on the two dimensions(height and width).
X
xiaoting 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.
    
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
    
        x (Tensor): 4-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        size (list|tuple|Tensor|None): Output shape of image resize
             layer, the shape is (out_h, out_w) when input is a 4-D Tensor. 
             Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
             If a Tensor Variable, its dimensions size should be a 1.
498 499 500 501 502
        scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
             least one of :attr:`size` or :attr:`scale_factor` must be set.
             And :attr:`size` has a higher priority than :attr:`scale_factor`.
             Has to match input size if it is either a list or a tuple or a Tensor.
             Default: None.
X
xiaoting 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
    Returns:
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
    Raises:
        TypeError: size should be a list or tuple or Tensor.
        ValueError: 'bilinear' only support 4-D tensor.
        ValueError: One of size and scale_factor must not be None.
        ValueError: size length should be 2 for input 4-D tensor.
        ValueError: scale_factor should be greater than zero.
        ValueError: data_format can only be 'NCHW', 'NHWC'.
    Examples:
        .. code-block:: python
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_data = np.random.rand(2,3,6,10).astype("float32")
            upsample_out  = paddle.nn.UpsamplingBilinear2d(size=[12,12])

            input = paddle.to_tensor(input_data)
            output = upsample_out(x=input)
            print(output.shape)
            # [2L, 3L, 12L, 12L]
    """

    def __init__(self,
                 size=None,
                 scale_factor=None,
                 data_format='NCHW',
                 name=None):
        super(UpsamplingBilinear2d, self).__init__()
        self.size = size
        self.scale_factor = scale_factor
        self.data_format = data_format
        self.name = name

    def forward(self, x):
        out = F.interpolate(
            x,
            size=self.size,
            scale_factor=self.scale_factor,
            mode='bilinear',
            align_corners=True,
            align_mode=0,
            data_format=self.data_format,
            name=self.name)
557 558

        return out
C
ceci3 已提交
559 560 561 562 563 564 565 566 567 568 569


class Pad2D(layers.Layer):
    """
        :alias_main: paddle.nn.Pad2D
        :alias: paddle.nn.Pad2D,paddle.nn.layer.Pad2D,paddle.nn.layer.common.Pad2D
    This interface is used to construct a callable object of the ``Pad2D``  class.
    The Pad2D layer pads the input tensor boundaries according to 'paddings' and 'mode'.
    If mode is 'reflect', paddings[0] and paddings[1] must be no greater
    than height-1. And the width dimension has the same condition.
    Parameters:
T
tangwei12 已提交
570 571
        paddings (int | List[int32]): The padding size. If padding is a int, uses the same
            padding in all boundaries, if padding is a List, it must contain four integers,
C
ceci3 已提交
572 573 574 575 576 577 578 579 580 581 582
            (padding_top, padding_bottom, padding_left, padding_right).
            Default is [0, 0, 0, 0].
        mode (str): Three modes: 'constant' (default), 'reflect', 'edge' .
        	When in 'constant' mode, this op uses a constant value to pad the input tensor.
        	When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
        	When in 'edge' mode, uses input boundaries to pad the input tensor.
        	Default is 'constant'
        pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
        data_format (str): An string from: "NHWC", "NCHW". Specify the data format of
                           the input data.
                           Default is  "NCHW"
T
tangwei12 已提交
583
    Returns:
C
ceci3 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
        None
    Examples:
        .. code-block:: text
            Input = [[[[1., 2., 3.],
                       [4., 5., 6.]]]]
            Case 0:
                paddings = [0, 1, 2, 3],
                mode = 'constant'
                pad_value = 0
                Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.],
                         [0., 0., 4., 5., 6., 0., 0., 0.],
                         [0., 0., 0., 0., 0., 0., 0., 0.]]]]
            Case 1:
                paddings = [0, 1, 2, 1],
                mode = 'reflect'
                Out = [[[[3., 2., 1., 2., 3., 2.],
                         [6., 5., 4., 5., 6., 5.],
                         [3., 2., 1., 2., 3., 2.]]]]
            Case 2:
                paddings = [0, 1, 2, 1],
                mode = 'edge'
                Out = [[[[1., 1., 1., 2., 3., 3.],
                         [4., 4., 4., 5., 6., 6.],
                         [4., 4., 4., 5., 6., 6.]]]]
    Code Examples:
        .. code-block:: python
            import paddle.fluid as fluid
            import paddle.nn as nn
            import numpy as np
            data = np.ones((2, 2, 2, 2)).astype('float32')
            my_pad = nn.Pad2D(paddings=[1, 1, 1, 1])
            with fluid.dygraph.guard():
                data = fluid.dygraph.to_variable(data)
                result = my_pad(data)
    """

    def __init__(self,
                 paddings=0,
                 mode='constant',
                 pad_value=0.0,
                 data_format="NCHW"):
        super(Pad2D, self).__init__()
        self._mode = mode
        self._pad_value = pad_value
        self._data_format = data_format
        self._paddings = [paddings] * 4 if isinstance(paddings,
                                                      int) else paddings

    def forward(self, input):
        return F.pad2d(
            input,
            paddings=self._paddings,
            mode=self._mode,
            pad_value=self._pad_value,
            data_format=self._data_format)
L
littletomatodonkey 已提交
639 640


641 642 643 644 645 646
class Bilinear(layers.Layer):
    """

    This layer performs bilinear on two inputs.

    .. math::
647

648
      out_{i} = x1 * W_{i} * {x2^\mathrm{T}}, i=0,1,...,size-1
649

650 651 652 653 654 655 656 657 658 659 660 661 662 663
      out = out + b

    In this formula:
     - :math:`x1`: the first input contains in1_features elements, shape is [batch_size, in1_features].
     - :math:`x2`: the second input contains in2_features elements, shape is [batch_size, in2_features].
     - :math:`W_{i}`: the i-th learned weight, shape is [in1_features, in2_features], and learned weight's shape is [out_features, in1_features, in2_features].
     - :math:`out_{i}`: the i-th element of out, shape is [batch_size, out_features].
     - :math:`b`: the learned bias, shape is [1, out_features].
     - :math:`x2^\mathrm{T}`: the transpose of :math:`x2`.

    Parameters:
       in1_features (int): The dimension of each first input(`x1`).
       in2_features (int): The dimension of each second input(`x2`).
       out_features (int): The dimension of output of this layer.
T
tangwei12 已提交
664
       weight_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
665 666 667
       this layer. The default value is None.
       bias_attr (ParamAttr, optional): The parameter attribute for the bias
           of this layer. If it is set to False, no bias will be added to the output units.
T
tangwei12 已提交
668
           If it is set to None, the bias is initialized zero. The default value is None.
669 670 671 672 673 674 675 676 677
       name (str, optional): The default value is None. Normally there is no need for user
           to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

    Attribute:
        **weight** (Parameter): the learnable weights of this layer.

        **bias** (Parameter): the learnable bias of this layer.

    Returns:
678
       Tensor: A 2-D Tensor of shape [batch_size, out_features].
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730

    Examples:
       .. code-block:: python

        import paddle
        import numpy

        paddle.disable_static()
        layer1 = numpy.random.random((5, 5)).astype('float32')
        layer2 = numpy.random.random((5, 4)).astype('float32')
        bilinear = paddle.nn.Bilinear(
            in1_features=5, in2_features=4, out_features=1000)
        result = bilinear(paddle.to_tensor(layer1),
                        paddle.to_tensor(layer2))     # result shape [5, 1000]

    """

    def __init__(self,
                 in1_features,
                 in2_features,
                 out_features,
                 weight_attr=None,
                 bias_attr=None,
                 name=None):
        super(Bilinear, self).__init__()
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
        self._name = name
        self._in1_features = in1_features
        self._in2_features = in2_features
        self._out_features = out_features
        self._dtype = self._helper.get_default_dtype()

        weight_shape = [
            self._out_features, self._in1_features, self._in2_features
        ]
        self.weight = self.create_parameter(
            attr=self._weight_attr,
            shape=weight_shape,
            dtype=self._dtype,
            is_bias=False)
        bias_shape = [1, self._out_features]
        self.bias = self.create_parameter(
            attr=self._bias_attr,
            shape=bias_shape,
            dtype=self._dtype,
            is_bias=True)

    def forward(self, x1, x2):
        return F.bilinear(x1, x2, self.weight, self.bias, self._name)


731 732 733 734
class Dropout(layers.Layer):
    """
    Dropout is a regularization technique for reducing overfitting by preventing
    neuron co-adaption during training as described in the paper:
T
tangwei12 已提交
735
    `Improving neural networks by preventing co-adaptation of feature detectors <https://arxiv.org/abs/1207.0580>`_
736 737 738 739
    The dropout operator randomly sets the outputs of some units to zero, while upscale others
    according to the given dropout probability.

    See ``paddle.nn.functional.dropout`` for more details.
740 741

    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756

    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
        axis (int | list): The axis along which the dropout is performed. Default None.
        mode(str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
757
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798

    Shape:
        - input: N-D tensor.
        - output: N-D tensor, the same shape as input.

    Examples:
        .. code-block:: python
            import paddle
            import numpy as np

            paddle.disable_static()
            x = np.array([[1,2,3], [4,5,6]]).astype('float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.Dropout(p=0.5)
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
            print(x.numpy())
            print(y_train.numpy())
            print(y_test.numpy())
   """

    def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
        super(Dropout, self).__init__()

        self.p = p
        self.axis = axis
        self.mode = mode
        self.name = name

    def forward(self, input):
        out = F.dropout(
            input,
            p=self.p,
            axis=self.axis,
            training=self.training,
            mode=self.mode,
            name=self.name)
        return out


799
class Dropout2d(layers.Layer):
800 801 802 803
    """
    Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
    a channel is a 2D feature map with the shape `HW`). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.
T
tangwei12 已提交
804 805
    Dropout2d will help promote independence between feature maps as described in the paper:
    `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
806 807 808

    See ``paddle.nn.functional.dropout2d`` for more details.

809 810
    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
    Parameters:
        p (float, optional): Probability of setting units to zero. Default: 0.5
        data_format (str, optional): Specify the data format of the input, and the data format of the output
                                     will be consistent with that of the input. An optional string from:
                                    `NCHW`, `NHWC`. The default is `NCHW`. When it is `NCHW`, the data is
                                     stored in the order of: [batch_size, input_channels, input_height, input_width].
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: 4-D tensor.
        - output: 4-D tensor, the same shape as input.

    Examples:
        .. code-block:: python
            import paddle
            import numpy as np

            paddle.disable_static()
            x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
            x = paddle.to_tensor(x)
831
            m = paddle.nn.Dropout2d(p=0.5)
832 833 834 835 836 837 838 839 840
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
            print(x.numpy())
            print(y_train.numpy())
            print(y_test.numpy())
   """

    def __init__(self, p=0.5, data_format='NCHW', name=None):
841
        super(Dropout2d, self).__init__()
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856

        self.p = p
        self.data_format = data_format
        self.name = name

    def forward(self, input):
        out = F.dropout2d(
            input,
            p=self.p,
            training=self.training,
            data_format=self.data_format,
            name=self.name)
        return out


857
class Dropout3d(layers.Layer):
858 859 860 861
    """
    Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
    a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
    on every forward call with probability `p` using samples from a Bernoulli distribution.
T
tangwei12 已提交
862 863
    Dropout3d will help promote independence between feature maps as described in the paper:
    `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
864 865 866

    See ``paddle.nn.functional.dropout3d`` for more details.

867 868
    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
        data_format (str, optional): Specify the data format of the input, and the data format of the output
                                     will be consistent with that of the input. An optional string from:
                                    `NCDHW`, `NDHWC`. The default is `NCDHW`. When it is `NCDHW`, the data is
                                     stored in the order of: [batch_size, input_channels, input_depth, input_height, input_width].
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: 5-D tensor.
        - output: 5-D tensor, the same shape as input.

    Examples:
        .. code-block:: python
            import paddle
            import numpy as np

            paddle.disable_static()
            x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
            x = paddle.to_tensor(x)
889
            m = paddle.nn.Dropout3d(p=0.5)
890 891 892 893 894 895 896 897 898
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
            print(x.numpy())
            print(y_train.numpy())
            print(y_test.numpy())
   """

    def __init__(self, p=0.5, data_format='NCDHW', name=None):
899
        super(Dropout3d, self).__init__()
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914

        self.p = p
        self.data_format = data_format
        self.name = name

    def forward(self, input):
        out = F.dropout3d(
            input,
            p=self.p,
            training=self.training,
            data_format=self.data_format,
            name=self.name)
        return out


915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
class AlphaDropout(layers.Layer):
    """
    Alpha Dropout is a type of Dropout that maintains the self-normalizing property. For an input with
    zero mean and unit standard deviation, the output of Alpha Dropout maintains the original mean and
    standard deviation of the input. Alpha Dropout fits well to SELU activate function by randomly setting
    activations to the negative saturation value.

    For more information, please refer to:
    `Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_

    In dygraph mode, please use ``eval()`` to switch to evaluation mode, where dropout is disabled.

    Parameters:
        p (float | int): Probability of setting units to zero. Default: 0.5
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Shape:
        - input: N-D tensor.
        - output: N-D tensor, the same shape as input.

    Examples:
        .. code-block:: python
            import paddle
            import numpy as np

            paddle.disable_static()
            x = np.array([[-1, 1], [-1, 1]]).astype('float32')
            x = paddle.to_tensor(x)
            m = paddle.nn.AlphaDropout(p=0.5)
            y_train = m(x)
            m.eval()  # switch the model to test phase
            y_test = m(x)
            print(x.numpy())
            print(y_train.numpy())
            # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
            print(y_test.numpy())
   """

    def __init__(self, p=0.5, name=None):
        super(AlphaDropout, self).__init__()
        self.p = p
        self.name = name

    def forward(self, input):
        out = F.alpha_dropout(
            input, p=self.p, training=self.training, name=self.name)
        return out


L
littletomatodonkey 已提交
964 965 966 967 968 969 970 971 972 973 974 975
class ReflectionPad1d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ReflectionPad1d`` class.
    Uses reflection of the input boundaries to pad the input tensor.

    Parameters:
        padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right).
        data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data.
           Default is  "NCL"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
976 977

    Returns:
L
littletomatodonkey 已提交
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
        None

    Examples:
        .. code-block:: text

            x = [[[1., 2., 3.],
                  [4., 5., 6.]]]
            padding = [1, 2],
            Out = [[[2. 1. 2. 3. 2. 1.]
                    [5. 4. 5. 6. 5. 4.]]]

    Code Examples:
        .. code-block:: python

            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 2, 3)
            pad = [1, 2]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ReflectionPad1d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[2. 1. 2. 3. 2. 1.]
            #   [5. 4. 5. 6. 5. 4.]]]
    """

    def __init__(self, padding, data_format="NCL", name=None):
        super(ReflectionPad1d, self).__init__()
        self._mode = "reflect"
        self._data_format = data_format
        self._pad = padding
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     data_format=self._data_format,
                     name=self._name)


class ReplicationPad1d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ReplicationPad1d`` class.
    Uses input boundaries to pad the input tensor.

    Parameters:
        padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right).
        data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data.
           Default is  "NCL"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1035 1036

    Returns:
L
littletomatodonkey 已提交
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
        None

    Examples:
        .. code-block:: text

            x = [[[1., 2., 3.],
                  [4., 5., 6.]]]
            padding = [1, 2],
            Out = [[[2. 1. 2. 3. 2. 1.]
                    [5. 4. 5. 6. 5. 4.]]]

    Code Examples:
        .. code-block:: python
1050

L
littletomatodonkey 已提交
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 2, 3)
            pad = [1, 2]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ReplicationPad1d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[1. 1. 2. 3. 3. 3.]
            #   [1. 4. 5. 6. 6. 6.]]]
    """

    def __init__(self, padding, data_format="NCL", name=None):
        super(ReplicationPad1d, self).__init__()
        self._mode = "replicate"
        self._data_format = data_format
        self._pad = padding
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     data_format=self._data_format,
                     name=self._name)


class ConstantPad1d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ConstantPad1d`` class.
    Uses a constant value to pad the input tensor.

    Parameters:
        padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right).
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data.
           Default is  "NCL"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1095 1096

    Returns:
L
littletomatodonkey 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
        None

    Examples:
        .. code-block:: text

            x = [[[1., 2., 3.],
                  [4., 5., 6.]]]
            padding = [1, 2],
            value = 0.0
            Out = [[[0. 1. 2. 3. 0. 0.]
                    [0. 4. 5. 6. 0. 0.]]]

    Code Examples:
        .. code-block:: python
1111

L
littletomatodonkey 已提交
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 2, 3)
            pad = [1, 2]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ConstantPad1d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[0. 1. 2. 3. 0. 0.]
            #   [0. 4. 5. 6. 0. 0.]]]
    """

    def __init__(self, padding, value=0.0, data_format="NCL", name=None):
        super(ConstantPad1d, self).__init__()
        self._mode = "constant"
        self._data_format = data_format
        self._pad = padding
        self._value = value
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     value=self._value,
                     data_format=self._data_format,
                     name=self._name)


class ConstantPad2d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ConstantPad2d`` class.
    Uses a constant value to pad the input tensor.

    Parameters:
        padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom).
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1158 1159

    Returns:
L
littletomatodonkey 已提交
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
        None

    Examples:
        .. code-block:: text

            x = [[[[1., 2., 3.],
                   [4., 5., 6.]]]]
            padding = [1, 1, 0, 0]
            value = 0.0
            Out = [[[[0. 1. 2. 3. 0.]
                     [0. 4. 5. 6. 0.]]]]

    Code Examples:
        .. code-block:: python
1174

L
littletomatodonkey 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 1, 2, 3)
            pad = [1, 0, 1, 2]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ConstantPad2d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[[0. 0. 0. 0.]
            #    [0. 1. 2. 3.]
            #    [0. 4. 5. 6.]
            #    [0. 0. 0. 0.]
            #    [0. 0. 0. 0.]]]]
    """

    def __init__(self, padding, value=0.0, data_format="NCHW", name=None):
        super(ConstantPad2d, self).__init__()
        self._mode = "constant"
        self._data_format = data_format
        self._pad = padding
        self._value = value
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     value=self._value,
                     data_format=self._data_format,
                     name=self._name)


class ZeroPad2d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ZeroPad2d`` class.
    Uses 0 to pad the input tensor.

    Parameters:
        padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom).
        data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1223 1224

    Returns:
L
littletomatodonkey 已提交
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
        None

    Examples:
        .. code-block:: text

            x = [[[[1., 2., 3.],
                   [4., 5., 6.]]]]
            padding = [1, 1, 0, 0]
            Out = [[[[0. 1. 2. 3. 0.]
                     [0. 4. 5. 6. 0.]]]]

    Code Examples:
        .. code-block:: python
1238

L
littletomatodonkey 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 1, 2, 3)
            pad = [1, 0, 1, 2]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ZeroPad2d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[[0. 0. 0. 0.]
            #    [0. 1. 2. 3.]
            #    [0. 4. 5. 6.]
            #    [0. 0. 0. 0.]
            #    [0. 0. 0. 0.]]]]
    """

    def __init__(self, padding, data_format="NCHW", name=None):
        super(ZeroPad2d, self).__init__()
        self._mode = "constant"
        self._data_format = data_format
        self._pad = padding
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     data_format=self._data_format,
                     name=self._name)


class ReplicationPad2d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ReplicationPad2d`` class.
    Uses input boundaries to pad the input tensor.

    Parameters:
        padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom).
        data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1285 1286

    Returns:
L
littletomatodonkey 已提交
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
        None

    Examples:
        .. code-block:: text

            x = [[[[1., 2., 3.],
                   [4., 5., 6.]]]]
            padding = [1, 1, 0, 0]
            Out = [[[[1. 1. 2. 3. 3.]
                     [4. 4. 5. 6. 6.]]]]

    Code Examples:
        .. code-block:: python
1300

L
littletomatodonkey 已提交
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 1, 2, 3)
            pad = [1, 0, 1, 2]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ReplicationPad2d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[[1. 1. 2. 3.]
            #    [1. 1. 2. 3.]
            #    [4. 4. 5. 6.]
            #    [4. 4. 5. 6.]
            #    [4. 4. 5. 6.]]]]
    """

    def __init__(self, padding, data_format="NCHW", name=None):
        super(ReplicationPad2d, self).__init__()
        self._mode = "replicate"
        self._data_format = data_format
        self._pad = padding
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     data_format=self._data_format,
                     name=self._name)


class ReflectionPad2d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ReflectionPad2d`` class.
    Uses reflection of the input boundaries to pad the input tensor.

    Parameters:
        padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom).
        data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data.
           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1347 1348

    Returns:
L
littletomatodonkey 已提交
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
        None

    Examples:
        .. code-block:: text

            x = [[[[1., 2., 3.],
                   [4., 5., 6.]]]]
            padding = [1, 1, 0, 0]
            Out = [[[[2. 1. 2. 3. 2.]
                     [5. 4. 5. 6. 5.]]]]

    Code Examples:
        .. code-block:: python
1362

L
littletomatodonkey 已提交
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 1, 4, 3)
            pad = [1, 0, 1, 2]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ReflectionPad2d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[[ 5.  4.  5.  6.]
            #    [ 2.  1.  2.  3.]
            #    [ 5.  4.  5.  6.]
            #    [ 8.  7.  8.  9.]
            #    [11. 10. 11. 12.]
            #    [ 8.  7.  8.  9.]
            #    [ 5.  4.  5.  6.]]]]
    """

    def __init__(self, padding, data_format="NCHW", name=None):
        super(ReflectionPad2d, self).__init__()
        self._mode = "reflect"
        self._data_format = data_format
        self._pad = padding
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     data_format=self._data_format,
                     name=self._name)


class ConstantPad3d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ConstantPad3d`` class.
    Uses a constant value to pad the input tensor.

    Parameters:
        padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
        value (float32): The value to fill the padded areas. Default is 0.0
        data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data.
           Default is  "NCDHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1412 1413

    Returns:
L
littletomatodonkey 已提交
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
        None

    Examples:
        .. code-block:: text

            x = [[[[[1., 2., 3.],
                    [4., 5., 6.]]]]]
            padding = [1, 2, 0, 0, 0, 0]
            value = 0.0
            Out = [[[[[0. 1. 2. 3. 0. 0.]
                      [0. 4. 5. 6. 0. 0.]]]]]

    Code Examples:
        .. code-block:: python
1428

L
littletomatodonkey 已提交
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 1, 1, 2, 3)
            pad = [1, 0, 1, 2, 0, 0]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ConstantPad3d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[[[0. 0. 0. 0.]
            #     [0. 1. 2. 3.]
            #     [0. 4. 5. 6.]
            #     [0. 0. 0. 0.]
            #     [0. 0. 0. 0.]]]]]
    """

    def __init__(self, padding, value=0.0, data_format="NCDHW", name=None):
        super(ConstantPad3d, self).__init__()
        self._mode = "constant"
        self._data_format = data_format
        self._pad = padding
        self._value = value
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     value=self._value,
                     data_format=self._data_format,
                     name=self._name)


class ReplicationPad3d(layers.Layer):
    """
    This interface is used to construct a callable object of the ``ReplicationPad3d`` class.
    Uses input boundaries to pad the input tensor.

    Parameters:
        padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions
            of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
        data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data.
           Default is  "NCDHW"
        name (str, optional) : The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1477 1478

    Returns:
L
littletomatodonkey 已提交
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
        None

    Examples:
        .. code-block:: text

            x = [[[[[1., 2., 3.],
                    [4., 5., 6.]]]]]
            padding = [1, 2, 0, 0, 0, 0]
            Out = [[[[[1. 1. 2. 3. 3. 3.]
                      [4. 4. 5. 6. 6. 6.]]]]]

    Code Examples:
        .. code-block:: python
1492

L
littletomatodonkey 已提交
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            input_shape = (1, 1, 1, 2, 3)
            pad = [1, 0, 1, 2, 0, 0]
            data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1
            my_pad = nn.ReplicationPad3d(padding=pad)
            data = paddle.to_tensor(data)
            result = my_pad(data)
            print(result.numpy())
            # [[[[[1. 1. 2. 3.]
            #     [1. 1. 2. 3.]
            #     [4. 4. 5. 6.]
            #     [4. 4. 5. 6.]
            #     [4. 4. 5. 6.]]]]]
    """

    def __init__(self, padding, data_format="NCDHW", name=None):
        super(ReplicationPad3d, self).__init__()
        self._mode = "replicate"
        self._data_format = data_format
        self._pad = padding
        self._name = name

    def forward(self, x):
        return F.pad(x,
                     pad=self._pad,
                     mode=self._mode,
                     data_format=self._data_format,
                     name=self._name)


class CosineSimilarity(layers.Layer):
    """
1529
    This interface is used to compute cosine similarity between x1 and x2 along axis.
L
littletomatodonkey 已提交
1530 1531

    Parameters:
1532
        axis (int): Dimension of vectors to compute cosine similarity. Default is 1.
L
littletomatodonkey 已提交
1533
        eps(float): Small value to avoid division by zero. Default is 1e-8.
1534
    Returns:
L
littletomatodonkey 已提交
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
        None

    Examples:
        .. code-block:: text

            Case 0:
                x1 = [[0.8024077  0.9927354  0.27238318 0.8344984 ]
                     [0.48949873 0.5797396  0.65444374 0.66510963]
                     [0.1031398  0.9614342  0.08365563 0.6796464 ]
                     [0.10760343 0.7461209  0.7726148  0.5801006 ]]
                x2 = [[0.62913156 0.1536727  0.9847992  0.04591406]
                     [0.9098952  0.15715368 0.8671125  0.3156102 ]
                     [0.4427798  0.54136837 0.5276275  0.32394758]
                     [0.3769419  0.8535014  0.48041078 0.9256797 ]]
1549
                axis = 1
L
littletomatodonkey 已提交
1550 1551 1552 1553 1554
                eps = 1e-8
                Out: [0.5275037  0.8368967  0.75037485 0.9245899]

    Code Examples:
        .. code-block:: python
1555

L
littletomatodonkey 已提交
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
            import paddle
            import paddle.nn as nn
            import numpy as np
            paddle.disable_static()

            np.random.seed(0)
            x1 = np.random.rand(2,3)
            x2 = np.random.rand(2,3)
            x1 = paddle.to_tensor(x1)
            x2 = paddle.to_tensor(x2)

1567
            cos_sim_func = nn.CosineSimilarity(axis=0)
L
littletomatodonkey 已提交
1568 1569 1570 1571 1572
            result = cos_sim_func(x1, x2)
            print(result.numpy())
            # [0.99806249 0.9817672  0.94987036]
    """

1573
    def __init__(self, axis=1, eps=1e-8):
L
littletomatodonkey 已提交
1574
        super(CosineSimilarity, self).__init__()
1575
        self._axis = axis
L
littletomatodonkey 已提交
1576 1577 1578
        self._eps = eps

    def forward(self, x1, x2):
1579
        return F.cosine_similarity(x1, x2, axis=self._axis, eps=self._eps)
T
tangwei12 已提交
1580 1581 1582 1583 1584 1585 1586 1587


class Embedding(layers.Layer):
    """
    **Embedding Layer**

    This interface is used to construct a callable object of the ``Embedding`` class.
    For specific usage, refer to code examples. It implements the function of the Embedding Layer.
T
tangwei12 已提交
1588
    This layer is used to lookup embeddings vector of ids provided by :attr:`x` .
T
tangwei12 已提交
1589
    It automatically constructs a 2D embedding matrix based on the
T
tangwei12 已提交
1590
    input :attr:`num_embeddings` and attr:`embedding_dim`.
T
tangwei12 已提交
1591 1592 1593 1594

    The shape of output Tensor is generated by appending an emb_size dimension to the
    last dimension of the input Tensor shape.

T
tangwei12 已提交
1595
    **Note:** The id in :attr:`x` must satisfy :math:`0 =< id < num_embeddings` ,
T
tangwei12 已提交
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
    otherwise the program will throw an exception and exit.

    .. code-block:: text

        Case 1:

        input is a Tensor. padding_idx = -1
            input.data = [[1, 3], [2, 4], [4, 127]
            input.shape = [3, 2]
        Given size = [128, 16]
        output is a Tensor:
            out.shape = [3, 2, 16]
            out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654]],

                        [[0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365]],

                        [[0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]]  # padding data
        The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
        It will pad all-zero data when ids is 127.

    Parameters:
        num_embeddings (int): Just one element which indicate the size
            of the dictionary of embeddings.
        embedding_dim:  Just one element which indicate the size of each embedding vector respectively.
T
tangwei12 已提交
1623
        padding_idx(int|long|None): padding_idx needs to be in the interval [-num_embeddings, num_embeddings).
T
tangwei12 已提交
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
            If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
            to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
            encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
            If set None, it makes no effect to output. Default: None.
        sparse(bool): The flag indicating whether to use sparse update. This parameter only
            affects the performance of the backwards gradient update. It is recommended to set
            True because sparse update is faster. But some optimizer does not support sparse update,
            such as :ref:`api_optimizer_AdadeltaOptimizer` , :ref:`api_optimizer_AdamaxOptimizer` ,
            :ref:`api_optimizer_DecayedAdagradOptimizer` , :ref:`api_optimizer_FtrlOptimizer` ,
            :ref:`api_optimizer_LambOptimizer` and :ref:`api_optimizer_LarsMomentumOptimizer` .
T
tangwei12 已提交
1634
            In these case, sparse must be False. Default: False.
T
tangwei12 已提交
1635
        weight_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
T
tangwei12 已提交
1636
            default weight parameter property is used. See usage for details in :ref:`api_ParamAttr` . In addition,
T
tangwei12 已提交
1637 1638
            user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
            The local word vector needs to be transformed into numpy format, and the shape of local word
T
tangwei12 已提交
1639 1640
            vector should be consistent with :attr:`num_embeddings` . Then :ref:`api_initializer_NumpyArrayInitializer`
            is used to load custom or pre-trained word vectors. See code example for details.
T
tangwei12 已提交
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
        name(str|None): For detailed information, please refer
               to :ref:`api_guide_Name`. Usually name is no need to set and
               None by default.

    Attribute:
        **weight** (Parameter): the learnable weights of this layer.

    Returns:
        None

    Examples:

        .. code-block:: python

T
tangwei12 已提交
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
            import paddle
            import numpy as np

            x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
            y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
            paddle.disable_static(paddle.CPUPlace())
            x = paddle.to_tensor(x_data, stop_gradient=False)
            y = paddle.to_tensor(y_data, stop_gradient=False)

            embedding = paddle.nn.Embedding(10, 3, sparse=True)

            w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32)
            embedding.weight.set_value(w0)
T
tangwei12 已提交
1668

T
tangwei12 已提交
1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
            adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01)
            adam.clear_grad()

            # weight.shape = [10, 3]

            # x.data = [[3],[4],[5]]
            # x.shape = [3, 1]

            # out.data = [[2,2,2], [2,2,2], [2,2,2]]
            # out.shape = [3, 1, 3]
            out=embedding(x)
            out.backward()
            adam.step()
T
tangwei12 已提交
1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698

    """

    def __init__(self,
                 num_embeddings,
                 embedding_dim,
                 padding_idx=None,
                 sparse=False,
                 weight_attr=None,
                 name=None):
        super(Embedding, self).__init__()
        self._num_embeddings = num_embeddings
        self._embedding_dim = embedding_dim
        self._sparse = sparse
        self._is_distributed = False
        self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
            num_embeddings + padding_idx)
T
tangwei12 已提交
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709

        if self._num_embeddings <= 0:
            raise ValueError("num_embeddings must be gather than 0")

        if self._embedding_dim <= 0:
            raise ValueError("embedding_dim must be gather than 0")

        if self._padding_idx >= num_embeddings or self._padding_idx < -num_embeddings:
            raise ValueError("padding_idx must be within [-{}, {})".format(
                num_embeddings, num_embeddings))

T
tangwei12 已提交
1710 1711 1712 1713 1714 1715
        self._dtype = self._helper.get_default_dtype()
        self._size = [self._num_embeddings, self._embedding_dim]

        self._weight_attr = weight_attr
        self._remote_prefetch = False
        self._name = name
T
tangwei12 已提交
1716
        self.weight = self.create_parameter(
T
tangwei12 已提交
1717 1718 1719 1720 1721 1722 1723 1724
            attr=self._weight_attr,
            shape=self._size,
            dtype=self._dtype,
            is_bias=False)

    def forward(self, x):
        return F.embedding(
            x,
T
tangwei12 已提交
1725
            weight=self.weight,
T
tangwei12 已提交
1726 1727 1728
            padding_idx=self._padding_idx,
            sparse=self._sparse,
            name=self._name)