nets.py 22.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
M
minqiyang 已提交
16
import six
17
from . import layers
F
fengjiayi 已提交
18

19 20 21
__all__ = [
    "simple_img_conv_pool",
    "sequence_conv_pool",
22
    "glu",
23
    "scaled_dot_product_attention",
Q
qiaolongfei 已提交
24
    "img_conv_group",
25
]
D
dzhwinter 已提交
26

F
fengjiayi 已提交
27 28 29

def simple_img_conv_pool(input,
                         num_filters,
D
dzhwinter 已提交
30
                         filter_size,
F
fengjiayi 已提交
31 32
                         pool_size,
                         pool_stride,
C
chengduoZH 已提交
33
                         pool_padding=0,
C
chengduoZH 已提交
34
                         pool_type='max',
C
chengduoZH 已提交
35 36 37 38 39 40 41 42
                         global_pooling=False,
                         conv_stride=1,
                         conv_padding=0,
                         conv_dilation=1,
                         conv_groups=1,
                         param_attr=None,
                         bias_attr=None,
                         act=None,
X
Xin Pan 已提交
43
                         use_cudnn=True):
C
chengduoZH 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
    """
    The simple_img_conv_pool is composed with one Convolution2d and one Pool2d.

    Args:
        input (Variable): The input image with [N, C, H, W] format.
        num_filters(int): The number of filter. It is as same as the output
            feature channel.
        filter_size (int|list|tuple): The filter size. If filter_size is a list or
            tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise,
            the filter_size_H = filter_size_W = filter_size.
        pool_size (int|list|tuple): The pooling size of Pool2d layer. If pool_size
            is a list or tuple, it must contain two integers, (pool_size_H, pool_size_W).
            Otherwise, the pool_size_H = pool_size_W = pool_size.
        pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride
            is a list or tuple, it must contain two integers, (pooling_stride_H, pooling_stride_W).
            Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride.
        pool_padding (int|list|tuple): The padding of Pool2d layer. If pool_padding is a list or
            tuple, it must contain two integers, (pool_padding_H, pool_padding_W).
            Otherwise, the pool_padding_H = pool_padding_W = pool_padding. Default 0.
        pool_type (str): Pooling type can be :math:`max` for max-pooling and :math:`avg` for
            average-pooling. Default :math:`max`.
        global_pooling (bool): Whether to use the global pooling. If global_pooling = true,
            pool_size and pool_padding while be ignored. Default False
C
chengduo 已提交
67
        conv_stride (int|list|tuple): The stride size of the conv2d Layer. If stride is a
C
chengduoZH 已提交
68 69
            list or tuple, it must contain two integers, (conv_stride_H, conv_stride_W). Otherwise,
            the conv_stride_H = conv_stride_W = conv_stride. Default: conv_stride = 1.
C
chengduo 已提交
70
        conv_padding (int|list|tuple): The padding size of the conv2d Layer. If padding is
C
chengduoZH 已提交
71 72
            a list or  tuple, it must contain two integers, (conv_padding_H, conv_padding_W).
            Otherwise, the conv_padding_H = conv_padding_W = conv_padding. Default: conv_padding = 0.
C
chengduo 已提交
73
        conv_dilation (int|list|tuple): The dilation size of the conv2d Layer. If dilation is
C
chengduoZH 已提交
74 75
            a list or tuple, it must contain two integers, (conv_dilation_H, conv_dilation_W).
            Otherwise, the conv_dilation_H = conv_dilation_W = conv_dilation. Default: conv_dilation = 1.
C
chengduo 已提交
76
        conv_groups (int): The groups number of the conv2d Layer. According to grouped
C
chengduoZH 已提交
77 78 79
            convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
C
chengduo 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93
            connected to the second half of the input channels. Default: groups=1.
        param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
            of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as param_attr. If the Initializer of the param_attr
            is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
            and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`.
            Default: None.
        bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. Default: None.
        act (str): Activation type for conv2d, if it is set to None, activation is not
            appended. Default: None.
C
chengduoZH 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
        use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
            library is installed. Default: True

    Return:
        Variable: The result of input after Convolution2d and Pool2d.

    Examples:
        .. code-block:: python

            img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
            conv_pool = fluid.nets.simple_img_conv_pool(input=img,
                                                        filter_size=5,
                                                        num_filters=20,
                                                        pool_size=2,
                                                        pool_stride=2,
                                                        act="relu")
    """
F
fengjiayi 已提交
111 112 113 114
    conv_out = layers.conv2d(
        input=input,
        num_filters=num_filters,
        filter_size=filter_size,
C
chengduoZH 已提交
115 116 117 118
        stride=conv_stride,
        padding=conv_padding,
        dilation=conv_dilation,
        groups=conv_groups,
F
fengjiayi 已提交
119
        param_attr=param_attr,
C
chengduoZH 已提交
120
        bias_attr=bias_attr,
C
chengduoZH 已提交
121
        act=act,
X
Xin Pan 已提交
122
        use_cudnn=use_cudnn)
F
fengjiayi 已提交
123 124 125 126

    pool_out = layers.pool2d(
        input=conv_out,
        pool_size=pool_size,
Q
Qiao Longfei 已提交
127
        pool_type=pool_type,
C
chengduoZH 已提交
128
        pool_stride=pool_stride,
C
chengduoZH 已提交
129 130
        pool_padding=pool_padding,
        global_pooling=global_pooling,
X
Xin Pan 已提交
131
        use_cudnn=use_cudnn)
Q
Qiao Longfei 已提交
132 133 134 135 136 137 138 139 140
    return pool_out


def img_conv_group(input,
                   conv_num_filter,
                   pool_size,
                   conv_padding=1,
                   conv_filter_size=3,
                   conv_act=None,
F
fengjiayi 已提交
141
                   param_attr=None,
Q
Qiao Longfei 已提交
142
                   conv_with_batchnorm=False,
W
wanghaoshuang 已提交
143
                   conv_batchnorm_drop_rate=0.0,
Q
Qiao Longfei 已提交
144
                   pool_stride=1,
C
chengduoZH 已提交
145
                   pool_type="max",
X
Xin Pan 已提交
146
                   use_cudnn=True):
Q
Qiao Longfei 已提交
147
    """
C
chengduoZH 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
    The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut,
    and Pool2d. According to the input arguments, img_conv_group will do serials of
    computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last
    result to Pool2d.

    Args:
        input (Variable): The input image with [N, C, H, W] format.
        conv_num_filter(list|tuple): Indicates the numbers of filter of this group.
        pool_size (int|list|tuple): The pooling size of Pool2d Layer. If pool_size
            is a list or tuple, it must contain two integers, (pool_size_H, pool_size_W).
            Otherwise, the pool_size_H = pool_size_W = pool_size.
        conv_padding (int|list|tuple): The padding size of the Conv2d Layer. If padding is
            a list or tuple, its length must be equal to the length of conv_num_filter.
            Otherwise the conv_padding of all Conv2d Layers are the same. Default 1.
        conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or
            tuple, its length must be equal to the length of conv_num_filter.
            Otherwise the conv_filter_size of all Conv2d Layers are the same. Default 3.
        conv_act (str): Activation type for Conv2d Layer that is not followed by BatchNorm.
            Default: None.
        param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None
        conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2d Layer.
            If conv_with_batchnorm is a list, its length must be equal to the length of
            conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the
            Conv2d Layer follows a BatchNorm. Default False.
        conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer
            after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be
            equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout
            Layers is conv_batchnorm_drop_rate. Default 0.0.
        pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride
            is a list or tuple, it must contain two integers, (pooling_stride_H,
            pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride.
            Default 1.
        pool_type (str): Pooling type can be :math:`max` for max-pooling and :math:`avg` for
            average-pooling. Default :math:`max`.
        use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
            library is installed. Default: True

    Return:
        Variable: The final result after serial computation using Convolution2d,
            BatchNorm, DropOut, and Pool2d.

    Examples:
        .. code-block:: python

            img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
            conv_pool = fluid.nets.img_conv_group(input=img,
                                                  num_channels=3,
                                                  conv_padding=1,
                                                  conv_num_filter=[3, 3],
                                                  conv_filter_size=3,
                                                  conv_act="relu",
                                                  pool_size=2,
                                                  pool_stride=2)
Q
Qiao Longfei 已提交
201 202 203
    """
    tmp = input
    assert isinstance(conv_num_filter, list) or \
204
        isinstance(conv_num_filter, tuple)
Q
Qiao Longfei 已提交
205 206 207 208 209

    def __extend_list__(obj):
        if not hasattr(obj, '__len__'):
            return [obj] * len(conv_num_filter)
        else:
C
chengduoZH 已提交
210
            assert len(obj) == len(conv_num_filter)
Q
Qiao Longfei 已提交
211 212 213 214
            return obj

    conv_padding = __extend_list__(conv_padding)
    conv_filter_size = __extend_list__(conv_filter_size)
F
fengjiayi 已提交
215
    param_attr = __extend_list__(param_attr)
Q
Qiao Longfei 已提交
216 217 218
    conv_with_batchnorm = __extend_list__(conv_with_batchnorm)
    conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate)

M
minqiyang 已提交
219
    for i in six.moves.range(len(conv_num_filter)):
Q
Qiao Longfei 已提交
220 221 222 223 224 225 226 227 228
        local_conv_act = conv_act
        if conv_with_batchnorm[i]:
            local_conv_act = None

        tmp = layers.conv2d(
            input=tmp,
            num_filters=conv_num_filter[i],
            filter_size=conv_filter_size[i],
            padding=conv_padding[i],
F
fengjiayi 已提交
229
            param_attr=param_attr[i],
C
chengduoZH 已提交
230
            act=local_conv_act,
X
Xin Pan 已提交
231
            use_cudnn=use_cudnn)
Q
Qiao Longfei 已提交
232 233

        if conv_with_batchnorm[i]:
234
            tmp = layers.batch_norm(input=tmp, act=conv_act, in_place=True)
Q
Qiao Longfei 已提交
235 236
            drop_rate = conv_batchnorm_drop_rate[i]
            if abs(drop_rate) > 1e-5:
237
                tmp = layers.dropout(x=tmp, dropout_prob=drop_rate)
Q
Qiao Longfei 已提交
238 239 240 241 242

    pool_out = layers.pool2d(
        input=tmp,
        pool_size=pool_size,
        pool_type=pool_type,
C
chengduoZH 已提交
243
        pool_stride=pool_stride,
X
Xin Pan 已提交
244
        use_cudnn=use_cudnn)
F
fengjiayi 已提交
245
    return pool_out
D
dzhwinter 已提交
246 247 248 249 250


def sequence_conv_pool(input,
                       num_filters,
                       filter_size,
F
fengjiayi 已提交
251
                       param_attr=None,
252
                       act="sigmoid",
253 254
                       pool_type="max",
                       bias_attr=None):
C
chengduoZH 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
    """
    The sequence_conv_pool is composed with Sequence Convolution and Pooling.

    Args:
        input (Variable): The input of sequence_conv, which supports variable-time
            length input sequence. The underlying of input is a matrix with shape
            (T, N), where T is the total time steps in this mini-batch and N is
            the input_hidden_size
        num_filters(int): The number of filter.
        filter_size (int): The filter size.
        param_attr (ParamAttr): The parameters to the Sequence_conv Layer. Default: None.
        act (str): Activation type for Sequence_conv Layer. Default: "sigmoid".
        pool_type (str): Pooling type can be :math:`max` for max-pooling, :math:`average` for
            average-pooling, :math:`sum` for sum-pooling, :math:`sqrt` for sqrt-pooling.
            Default :math:`max`.
270 271 272 273 274
        bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv.
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, sequence_conv
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. Default: None.
C
chengduoZH 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292

    Return:
        Variable: The final result after Sequence Convolution and Pooling.

    Examples:
        .. code-block:: python

            input_dim = len(word_dict)
            emb_dim = 128
            hid_dim = 512
            data = fluid.layers.data( ame="words", shape=[1], dtype="int64", lod_level=1)
            emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True)
            seq_conv = fluid.nets.sequence_conv_pool(input=emb,
                                                     num_filters=hid_dim,
                                                     filter_size=3,
                                                     act="tanh",
                                                     pool_type="sqrt")
    """
D
dzhwinter 已提交
293 294 295 296
    conv_out = layers.sequence_conv(
        input=input,
        num_filters=num_filters,
        filter_size=filter_size,
F
fengjiayi 已提交
297
        param_attr=param_attr,
298
        bias_attr=bias_attr,
299
        act=act)
D
dzhwinter 已提交
300

301
    pool_out = layers.sequence_pool(input=conv_out, pool_type=pool_type)
D
dzhwinter 已提交
302
    return pool_out
G
guosheng 已提交
303 304 305 306


def glu(input, dim=-1):
    """
C
chengduoZH 已提交
307 308 309
    The Gated Linear Units(GLU) composed by split, sigmoid activation and element-wise
    multiplication. Specifically, Split the input into two equal sized parts,
    :math:`a` and :math:`b`, along the given dimension and then compute as
G
guosheng 已提交
310
    following:
G
guosheng 已提交
311 312 313 314 315

        .. math::

            {GLU}(a, b)= a \otimes \sigma(b)

Y
ying 已提交
316
    Refer to `Language Modeling with Gated Convolutional Networks
G
guosheng 已提交
317
    <https://arxiv.org/pdf/1612.08083.pdf>`_.
Y
ying 已提交
318

G
guosheng 已提交
319 320
    Args:
        input (Variable): The input variable which is a Tensor or LoDTensor.
Y
ying 已提交
321
        dim (int): The dimension along which to split. If :math:`dim < 0`, the
C
chengduoZH 已提交
322
            dimension to split along is :math:`rank(input) + dim`. Default -1.
G
guosheng 已提交
323 324

    Returns:
C
chengduoZH 已提交
325
        Variable: Variable with half the size of input.
G
guosheng 已提交
326 327 328 329

    Examples:
        .. code-block:: python

Y
Yibing Liu 已提交
330 331 332 333
            data = fluid.layers.data(
                name="words", shape=[-1, 6, 3, 9], dtype="float32")
            # shape of output: [-1, 3, 3, 9]
            output = fluid.nets.glu(input=data, dim=1)
G
guosheng 已提交
334 335 336
    """

    a, b = layers.split(input, num_or_sections=2, dim=dim)
G
guosheng 已提交
337 338
    act_b = layers.sigmoid(x=b)
    out = layers.elementwise_mul(x=a, y=act_b)
G
guosheng 已提交
339
    return out
340 341


Y
ying 已提交
342 343 344
def scaled_dot_product_attention(queries,
                                 keys,
                                 values,
Y
ying 已提交
345
                                 num_heads=1,
Y
ying 已提交
346
                                 dropout_rate=0.):
347 348 349
    """
    The dot-product attention.

350 351 352
    Attention mechanism can be seen as mapping a query and a set of key-value
    pairs to an output. The output is computed as a weighted sum of the values,
    where the weight assigned to each value is computed by a compatibility
353
    function (dot-product here) of the query with the corresponding key.
Y
ying 已提交
354 355

    The dot-product attention can be implemented through (batch) matrix
356 357 358 359
    multipication as follows:

        .. math::

360
            Attention(Q, K, V)= softmax(QK^\mathrm{T})V
361

Y
ying 已提交
362
    Refer to `Attention Is All You Need
363 364
    <https://arxiv.org/pdf/1706.03762.pdf>`_.

Y
ying 已提交
365 366 367 368 369
    Args:
        queries (Variable): The input variable which should be a 3-D Tensor.
        keys (Variable): The input variable which should be a 3-D Tensor.
        values (Variable): The input variable which should be a 3-D Tensor.
        num_heads (int): Head number to compute the scaled dot product
C
chengduoZH 已提交
370
            attention. Default: 1.
Y
ying 已提交
371
        dropout_rate (float): The dropout rate to drop the attention weight.
C
chengduoZH 已提交
372
            Default: 0.0.
373 374

    Returns:
C
chengduoZH 已提交
375 376
        Variable: A 3-D Tensor computed by multi-head scaled dot product\
            attention.
377

Y
ying 已提交
378 379 380
    Raises:
        ValueError: If input queries, keys, values are not 3-D Tensors.

C
chengduoZH 已提交
381
    NOTES:
Y
ying 已提交
382
        1. When num_heads > 1, three linear projections are learned respectively
C
chengduoZH 已提交
383 384 385 386 387
           to map input queries, keys and values into queries', keys' and values'.
           queries', keys' and values' have the same shapes with queries, keys
           and values.
        2. When num_heads == 1, scaled_dot_product_attention has no learnable
           parameters.
Y
ying 已提交
388

389 390 391
    Examples:
        .. code-block:: python

C
chengduoZH 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
            queries = fluid.layers.data(name="queries",
                                        shape=[3, 5, 9],
                                        dtype="float32",
                                        append_batch_size=False)
            queries.stop_gradient = False
            keys = fluid.layers.data(name="keys",
                                     shape=[3, 6, 9],
                                     dtype="float32",
                                     append_batch_size=False)
            keys.stop_gradient = False
            values = fluid.layers.data(name="values",
                                       shape=[3, 6, 10],
                                       dtype="float32",
                                       append_batch_size=False)
            values.stop_gradient = False
            contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values)
Y
ying 已提交
408
            contexts.shape  # [3, 5, 10]
409
    """
Y
ying 已提交
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
    if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
        raise ValueError(
            "Inputs quries, keys and values should all be 3-D tensors.")

    if queries.shape[-1] != keys.shape[-1]:
        raise ValueError(
            "The hidden size of queries and keys should be the same.")
    if keys.shape[-2] != values.shape[-2]:
        raise ValueError(
            "The max sequence length in query batch and in key batch "
            "should be the same.")
    if keys.shape[-1] % num_heads != 0:
        raise ValueError("The hidden size of keys (%d) must be divisible "
                         "by the number of attention heads (%d)." %
                         (keys.shape[-1], num_heads))
    if values.shape[-1] % num_heads != 0:
        raise ValueError("The hidden size of values (%d) must be divisible "
                         "by the number of attention heads (%d)." %
                         (values.shape[-1], num_heads))

Y
ying 已提交
430
    def __compute_qkv(queries, keys, values, num_heads):
Y
ying 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
        """
        Add linear projection to queries, keys, and values.

        Args:
            queries(Tensor): a 3-D input Tensor.
            keys(Tensor): a 3-D input Tensor.
            values(Tensor): a 3-D input Tensor.
            num_heads(int): The number of heads. Linearly project the inputs
                            ONLY when num_heads > 1.

        Returns:
            Tensor: linearly projected output Tensors: queries', keys' and
                    values'. They have the same shapes with queries, keys and
                    values.
        """

Y
ying 已提交
447 448 449 450 451 452 453 454
        if num_heads == 1:
            return queries, keys, values

        q = layers.fc(input=queries, size=queries.shape[-1], num_flatten_dims=2)
        k = layers.fc(input=keys, size=keys.shape[-1], num_flatten_dims=2)
        v = layers.fc(input=values, size=values.shape[-1], num_flatten_dims=2)
        return q, k, v

Y
ying 已提交
455 456 457 458 459 460
    def __split_heads(x, num_heads):
        """
        Reshape the last dimension of inpunt tensor x so that it becomes two
        dimensions.

        Args:
Y
ying 已提交
461 462
            x(Tensor): a 3-D input Tensor.
            num_heads(int): The number of heads.
Y
ying 已提交
463 464

        Returns:
Y
ying 已提交
465 466
            Tensor: a Tensor with shape [..., n, m/num_heads], where m is size
                    of the last dimension of x.
Y
ying 已提交
467
        """
Y
ying 已提交
468 469
        if num_heads == 1:
            return x
470

Y
ying 已提交
471
        hidden_size = x.shape[-1]
472 473 474
        # reshape the 3-D input: [batch_size, max_sequence_length, hidden_dim]
        # into a 4-D output:
        # [batch_size, max_sequence_length, num_heads, hidden_size_per_head].
Y
ying 已提交
475
        reshaped = layers.reshape(
476 477
            x=x,
            shape=list(x.shape[:-1]) + [num_heads, hidden_size // num_heads])
478 479

        # permuate the dimensions into:
480 481 482 483
        # [batch_size, num_heads, max_sequence_len, hidden_size_per_head]
        return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])

    def __combine_heads(x):
Y
ying 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496
        """
        Reshape the last two dimensions of inpunt tensor x so that it becomes
        one dimension.

        Args:
            x(Tensor): a 4-D input Tensor with shape
                       [bs, num_heads, max_sequence_length, hidden_dim].

        Returns:
            Tensor: a Tensor with shape
                    [bs, max_sequence_length, num_heads * hidden_dim].
        """

Y
ying 已提交
497
        if len(x.shape) == 3: return x
498 499 500
        if len(x.shape) != 4:
            raise ValueError("Input(x) should be a 4-D Tensor.")

Y
ying 已提交
501
        trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
Y
ying 已提交
502
        return layers.reshape(
503
            x=trans_x,
504 505 506 507 508
            shape=list(
                map(int, [
                    trans_x.shape[0], trans_x.shape[1], trans_x.shape[2] *
                    trans_x.shape[3]
                ])))
509

Y
ying 已提交
510 511 512 513 514
    q, k, v = __compute_qkv(queries, keys, values, num_heads)

    q = __split_heads(q, num_heads)
    k = __split_heads(k, num_heads)
    v = __split_heads(v, num_heads)
Y
ying 已提交
515 516

    key_dim_per_head = keys.shape[-1] // num_heads
517 518
    scaled_q = layers.scale(x=q, scale=key_dim_per_head**-0.5)
    product = layers.matmul(x=k, y=scaled_q, transpose_y=True)
Y
ying 已提交
519

Y
ying 已提交
520
    weights = layers.reshape(
521
        x=layers.reshape(
Y
ying 已提交
522
            x=product, shape=[-1, product.shape[-1]], act="softmax"),
523
        shape=product.shape)
Y
ying 已提交
524
    if dropout_rate:
G
guosheng 已提交
525 526
        weights = layers.dropout(
            weights, dropout_prob=dropout_rate, is_test=False)
Y
ying 已提交
527 528
    ctx_multiheads = layers.matmul(weights, v)
    return __combine_heads(ctx_multiheads)