modeling.py 28.4 KB
Newer Older
0
0YuanZhang0 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
import re
import numpy as np
import paddle.fluid as fluid


def einsum4x4(equation, x, y):
    idx_x, idx_y, idx_z = re.split(",|->", equation)
    repeated_idx = list(set(idx_x + idx_y) - set(idx_z))
    
    unique_idx_x = list(set(idx_x) - set(idx_y))
    unique_idx_y = list(set(idx_y) - set(idx_x))
    common_idx = list(set(idx_x) & set(idx_y) - set(repeated_idx))

    new_idx_x = common_idx + unique_idx_x + repeated_idx
    new_idx_y = common_idx + unique_idx_y + repeated_idx
    new_idx_z = common_idx + unique_idx_x + unique_idx_y

    perm_x = [ idx_x.index(i) for i in new_idx_x]
    perm_y = [ idx_y.index(i) for i in new_idx_y]
    perm_z = [ new_idx_z.index(i) for i in idx_z]
 
    x = fluid.layers.transpose(x, perm=perm_x)
    y = fluid.layers.transpose(y, perm=perm_y)
    z = fluid.layers.matmul(x=x, y=y, transpose_y=True)
    z = fluid.layers.transpose(z, perm=perm_z)
    return z


def positional_embedding(pos_seq, inv_freq, bsz=None):
    pos_seq = fluid.layers.reshape(pos_seq, [-1, 1])
    inv_freq = fluid.layers.reshape(inv_freq, [1, -1])
    sinusoid_inp = fluid.layers.matmul(pos_seq, inv_freq)
    pos_emb = fluid.layers.concat(input=[fluid.layers.sin(sinusoid_inp), 
                                  fluid.layers.cos(sinusoid_inp)], axis=-1)
    pos_emb = fluid.layers.unsqueeze(pos_emb, [1])
    if bsz is not None:
        pos_emb = fluid.layers.expand(pos_emb, [1, bsz, 1])

    return pos_emb


def positionwise_ffn(inp, d_model, d_inner, dropout_prob, param_initializer=None,
                     act_type='relu', name='ff'):
0
0YuanZhang0 已提交
44 45 46
    """Position-wise Feed-forward Network."""
    if act_type not in ['relu', 'gelu']:
        raise ValueError('Unsupported activation type {}'.format(act_type))
0
0YuanZhang0 已提交
47

0
0YuanZhang0 已提交
48
    output = fluid.layers.fc(input=inp, size=d_inner, act=act_type,
0
0YuanZhang0 已提交
49 50 51 52
                           num_flatten_dims=2,
                           param_attr=fluid.ParamAttr(
                             name=name+'_layer_1_weight', initializer=param_initializer),
                           bias_attr=name+'_layer_1_bias')
0
0YuanZhang0 已提交
53
    output = fluid.layers.dropout(output, dropout_prob=dropout_prob,
0
0YuanZhang0 已提交
54
                                dropout_implementation="upscale_in_train", is_test=False)
0
0YuanZhang0 已提交
55
    output = fluid.layers.fc(output, size=d_model,
0
0YuanZhang0 已提交
56 57 58 59
                           num_flatten_dims=2,
                           param_attr=fluid.ParamAttr(
                               name=name+'_layer_2_weight', initializer=param_initializer),
                           bias_attr=name+'_layer_2_bias')
0
0YuanZhang0 已提交
60
    output = fluid.layers.dropout(output, dropout_prob=dropout_prob,
0
0YuanZhang0 已提交
61
                             dropout_implementation="upscale_in_train", is_test=False)
0
0YuanZhang0 已提交
62
    output = fluid.layers.layer_norm(output + inp, begin_norm_axis=len(output.shape)-1,
0
0YuanZhang0 已提交
63 64 65 66 67
                                   epsilon=1e-12,
                                   param_attr=fluid.ParamAttr(name=name+'_layer_norm_scale',
                                       initializer=fluid.initializer.Constant(1.)),
                                   bias_attr=fluid.ParamAttr(name+'_layer_norm_bias',
                                       initializer=fluid.initializer.Constant(0.)))
0
0YuanZhang0 已提交
68
    return output
0
0YuanZhang0 已提交
69 70 71


def head_projection(h, d_model, n_head, d_head, param_initializer, name=''):
0
0YuanZhang0 已提交
72 73
    """Project hidden states to a specific head with a 4D-shape."""
    proj_weight=fluid.layers.create_parameter(
0
0YuanZhang0 已提交
74 75 76 77 78
                shape=[d_model, n_head, d_head],
                dtype=h.dtype,
                attr=fluid.ParamAttr(name=name+'_weight', initializer=param_initializer),
                is_bias=False)
 
0
0YuanZhang0 已提交
79 80 81 82
    # ibh,hnd->ibnd 
    head = fluid.layers.mul(x=h, y=proj_weight, x_num_col_dims=2, y_num_col_dims=1)
    return head 

0
0YuanZhang0 已提交
83 84 85

def post_attention(h, attn_vec, d_model, n_head, d_head, dropout,
                   param_initializer, residual=True, name=''):
0
0YuanZhang0 已提交
86 87 88
    """Post-attention processing."""
    # post-attention projection (back to `d_model`)
    proj_o=fluid.layers.create_parameter(
0
0YuanZhang0 已提交
89 90 91 92
                shape=[d_model, n_head, d_head],
                dtype=h.dtype,
                attr=fluid.ParamAttr(name=name+'_o_weight', initializer=param_initializer),
                is_bias=False)
0
0YuanZhang0 已提交
93 94 95
    # ibnd,hnd->ibh
    proj_o = fluid.layers.transpose(proj_o, perm=[1, 2, 0])
    attn_out = fluid.layers.mul(x=attn_vec, y=proj_o, x_num_col_dims=2, y_num_col_dims=2)
0
0YuanZhang0 已提交
96

0
0YuanZhang0 已提交
97
    attn_out = fluid.layers.dropout(attn_out, dropout_prob=dropout,
0
0YuanZhang0 已提交
98 99
                             dropout_implementation="upscale_in_train", is_test=False)

0
0YuanZhang0 已提交
100 101
    if residual:
        output = fluid.layers.layer_norm(attn_out + h, begin_norm_axis=len(attn_out.shape)-1,
0
0YuanZhang0 已提交
102 103 104 105 106
                                   epsilon=1e-12,
                                   param_attr=fluid.ParamAttr(name=name+'_layer_norm_scale',
                                       initializer=fluid.initializer.Constant(1.)),
                                   bias_attr=fluid.ParamAttr(name+'_layer_norm_bias',
                                       initializer=fluid.initializer.Constant(0.)))
0
0YuanZhang0 已提交
107 108
    else:
        output = fluid.layers.layer_norm(attn_out, begin_norm_axis=len(attn_out.shape)-1,
0
0YuanZhang0 已提交
109 110 111 112 113 114
                                   epsilon=1e-12,
                                   param_attr=fluid.ParamAttr(name=name+'_layer_norm_scale',
                                       initializer=fluid.initializer.Constant(1.)),
                                   bias_attr=fluid.ParamAttr(name+'_layer_norm_bias',
                                       initializer=fluid.initializer.Constant(0.)))

0
0YuanZhang0 已提交
115 116
    return output

0
0YuanZhang0 已提交
117 118

def abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, scale):
0
0YuanZhang0 已提交
119
    """Core absolute positional attention operations."""
0
0YuanZhang0 已提交
120

0
0YuanZhang0 已提交
121
    attn_score = einsum4x4('ibnd,jbnd->ijbn', q_head, k_head) 
0
0YuanZhang0 已提交
122

0
0YuanZhang0 已提交
123 124 125
    attn_score *= scale
    if attn_mask is not None:
        attn_score = attn_score - 1e30 * attn_mask
0
0YuanZhang0 已提交
126

0
0YuanZhang0 已提交
127 128 129
    # attention probability
    attn_prob = fluid.layers.softmax(attn_score, axis=1)
    attn_prob = fluid.layers.dropout(attn_prob, dropout_prob=dropatt, 
0
0YuanZhang0 已提交
130 131
                  dropout_implementation="upscale_in_train", is_test=False)

0
0YuanZhang0 已提交
132 133 134 135
    # attention output
    attn_vec = einsum4x4('ijbn,jbnd->ibnd', attn_prob, v_head)

    return attn_vec
0
0YuanZhang0 已提交
136 137 138 139 140


def rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,
                  r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt,
                  scale):
0
0YuanZhang0 已提交
141 142 143
    """Core relative positional attention operations."""
    ## content based attention score
    ac = einsum4x4('ibnd,jbnd->ijbn', fluid.layers.elementwise_add(q_head, r_w_bias, 2), k_head_h) 
0
0YuanZhang0 已提交
144

0
0YuanZhang0 已提交
145 146
    # position based attention score
    bd = einsum4x4('ibnd,jbnd->ijbn', fluid.layers.elementwise_add(q_head, r_r_bias, 2), k_head_r)
0
0YuanZhang0 已提交
147

0
0YuanZhang0 已提交
148
    #klen = fluid.layers.slice(fluid.layers.shape(ac), axes=[0], starts=[1], ends=[2])
0
0YuanZhang0 已提交
149
 
0
0YuanZhang0 已提交
150
    bd = rel_shift(bd, klen=ac.shape[1])
0
0YuanZhang0 已提交
151

0
0YuanZhang0 已提交
152 153 154 155 156 157 158 159 160 161 162
    # segment based attention score
    if seg_mat is None:
        ef = 0
    else:
        ef = 0
        """
        bsz = fluid.layers.slice(fluid.layers.shape(q_head), axes=[0], starts=[1], ends=[2])
        bsz.stop_gradient = True
        """
        #seg_embed = fluid.layers.unsqueeze(input=seg_embed, axes=[0])
        seg_embed = fluid.layers.stack([seg_embed]*q_head.shape[0], axis=0)
0
0YuanZhang0 已提交
163
    
0
0YuanZhang0 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
        ef = einsum4x4('ibnd,isnd->ibns', fluid.layers.elementwise_add(q_head, r_s_bias, 2), seg_embed)
        ef = einsum4x4('ijbs,ibns->ijbn', seg_mat, ef)
        # merge attention scores and perform masking

    attn_score = (ac + bd + ef) * scale

    if attn_mask is not None:
        # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
        attn_score = attn_score - 1e30 * attn_mask

    # attention probability
    #attn_prob = fluid.layers.softmax(attn_score, axis=1)
    attn_score = fluid.layers.transpose(attn_score, [0, 2, 3, 1])
    attn_prob = fluid.layers.softmax(attn_score)
    attn_prob = fluid.layers.transpose(attn_prob, [0, 3, 1, 2])
    attn_prob = fluid.layers.dropout(attn_prob, dropatt, 
0
0YuanZhang0 已提交
180 181
                                   dropout_implementation="upscale_in_train")

0
0YuanZhang0 已提交
182 183 184 185
    # attention output
    attn_vec = einsum4x4('ijbn,jbnd->ibnd', attn_prob, v_head_h)
    return attn_vec

0
0YuanZhang0 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198

def rel_shift(x, klen=-1):
    """perform relative shift to form the relative attention score."""
    x_size = x.shape
    x = fluid.layers.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])
    x = fluid.layers.slice(x,  axes=[0], starts=[1], ends=[x_size[1]])
    x = fluid.layers.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])
    x = fluid.layers.slice(x, axes=[1], starts=[0], ends=[klen])

    return x


def _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None):
0
0YuanZhang0 已提交
199 200 201 202 203 204 205 206 207 208 209
    """cache hidden states into memory."""
    if mem_len is None or mem_len == 0:
        return None
    else:
        if reuse_len is not None and reuse_len > 0:
            curr_out = curr_out[:reuse_len]

        if prev_mem is None:
            new_mem = curr_out[-mem_len:]
        else:
            new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:]
0
0YuanZhang0 已提交
210

0
0YuanZhang0 已提交
211 212
    new_mem.stop_gradient = True
    return new_mem
0
0YuanZhang0 已提交
213 214 215 216


def relative_positional_encoding(qlen, klen, d_model, clamp_len, attn_type,
                                 bi_data, bsz=None, dtype=None):
0
0YuanZhang0 已提交
217 218
    """create relative positional encoding."""
    freq_seq = fluid.layers.range(0, d_model, 2.0, 'float32')
0
0YuanZhang0 已提交
219
    if dtype is not None and dtype != 'float32':
0
0YuanZhang0 已提交
220 221
        freq_seq = tf.cast(freq_seq, dtype=dtype)
    inv_freq = 1 / (10000 ** (freq_seq / d_model))
0
0YuanZhang0 已提交
222

0
0YuanZhang0 已提交
223 224 225 226 227 228
    if attn_type == 'bi':
        beg, end = klen, -qlen
    elif attn_type == 'uni':
        beg, end = klen, -1
    else:
        raise ValueError('Unknown `attn_type` {}.'.format(attn_type))
0
0YuanZhang0 已提交
229

0
0YuanZhang0 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
    if bi_data:
        fwd_pos_seq = fluid.layers.range(beg, end, -1.0, 'float32')
        bwd_pos_seq = fluid.layers.range(-beg, -end, 1.0, 'float32')

        if dtype is not None and dtype != 'float32':
            fwd_pos_seq =fluid.layers.cast(fwd_pos_seq, dtype='float32')
            bwd_pos_seq = fluid.layers.cast(bwd_pos_seq, dtype='float32')

        if clamp_len > 0:
            fwd_pos_seq = fluid.layers.clip(fwd_pos_seq, -clamp_len, clamp_len)
            bwd_pos_seq = fluid.layers.clip(bwd_pos_seq, -clamp_len, clamp_len)

        if bsz is not None:
            # With bi_data, the batch size should be divisible by 2.
            assert bsz % 2 == 0
            fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
            bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
        else:
            fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq)
            bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq)

        pos_emb = fluid.layers.concat([fwd_pos_emb, bwd_pos_emb], axis=1)
0
0YuanZhang0 已提交
252
    else:
0
0YuanZhang0 已提交
253 254 255 256 257 258 259 260
        fwd_pos_seq = fluid.layers.range(beg, end, -1.0, 'float32')
        if dtype is not None and dtype != 'float32':
            fwd_pos_seq = fluid.layers.cast(fwd_pos_seq, dtype=dtype)
        if clamp_len > 0:
            fwd_pos_seq = fluid.layers.clip(fwd_pos_seq, -clamp_len, clamp_len)
        pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz)
        fluid.layers.reshape(pos_emb, [2*qlen, -1, d_model], inplace=True)
    return pos_emb
0
0YuanZhang0 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307


def rel_multihead_attn(h, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed,
                       attn_mask, mems, d_model, n_head, d_head, dropout,
                       dropatt, initializer, name=''):
    """Multi-head attention with relative positional encoding."""

    scale = 1 / (d_head ** 0.5)
    if mems is not None and len(mems.shape) > 1:
        cat = fluid.layers.concat([mems, h], 0)
    else:
        cat = h

    # content heads
    q_head_h = head_projection(
        h, d_model, n_head, d_head, initializer, name+'_rel_attn_q')
    k_head_h = head_projection(
        cat, d_model, n_head, d_head, initializer, name+'_rel_attn_k')
    v_head_h = head_projection(
        cat, d_model, n_head, d_head, initializer, name+'_rel_attn_v')

    # positional heads
    k_head_r = head_projection(
        r, d_model, n_head, d_head, initializer, name+'_rel_attn_r')
    
    # core attention ops
    attn_vec = rel_attn_core(
        q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,
        r_r_bias, r_s_bias, attn_mask, dropatt, scale)

    # post processing
    output = post_attention(h, attn_vec, d_model, n_head, d_head, dropout, initializer, name=name+'_rel_attn')

    return output


def transformer_xl(inp_k, n_token, n_layer, d_model, n_head,
                d_head, d_inner, dropout, dropatt, attn_type,
                bi_data, initializer, mem_len=None,
                inp_q=None, mems=None,
                same_length=False, clamp_len=-1, untie_r=False,
                input_mask=None,
                perm_mask=None, seg_id=None, reuse_len=None,
                ff_activation='relu', target_mapping=None,
                use_fp16=False, name='', **kwargs):
    """
    Defines a Transformer-XL computation graph with additional
0
0YuanZhang0 已提交
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
    support for XLNet.
    Args:
    inp_k: int32 Tensor in shape [len, bsz], the input token IDs.
    seg_id: int32 Tensor in shape [len, bsz], the input segment IDs.
    input_mask: float32 Tensor in shape [len, bsz], the input mask.
        0 for real tokens and 1 for padding.
    mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
        from previous batches. The length of the list equals n_layer.
        If None, no memory is used.
    perm_mask: float32 Tensor in shape [len, len, bsz].
        If perm_mask[i, j, k] = 0, i attend to j in batch k;
        if perm_mask[i, j, k] = 1, i does not attend to j in batch k.
        If None, each position attends to all the others.
    target_mapping: float32 Tensor in shape [num_predict, len, bsz].
        If target_mapping[i, j, k] = 1, the i-th predict in batch k is
        on the j-th token.
        Only used during pretraining for partial prediction.
        Set to None during finetuning.
    inp_q: float32 Tensor in shape [len, bsz].
        1 for tokens with losses and 0 for tokens without losses.
        Only used during pretraining for two-stream attention.
        Set to None during finetuning.
    n_layer: int, the number of layers.
    d_model: int, the hidden size.
    n_head: int, the number of attention heads.
    d_head: int, the dimension size of each attention head.
    d_inner: int, the hidden size in feed-forward layers.
    ff_activation: str, "relu" or "gelu".
    untie_r: bool, whether to untie the biases in attention.
    n_token: int, the vocab size.
    is_training: bool, whether in training mode.
    use_tpu: bool, whether TPUs are used.
    use_fp16: bool, use bfloat16 instead of float32.
    dropout: float, dropout rate.
    dropatt: float, dropout rate on attention probabilities.
    init: str, the initialization scheme, either "normal" or "uniform".
    init_range: float, initialize the parameters with a uniform distribution
        in [-init_range, init_range]. Only effective when init="uniform".
    init_std: float, initialize the parameters with a normal distribution
        with mean 0 and stddev init_std. Only effective when init="normal".
    mem_len: int, the number of tokens to cache.
    reuse_len: int, the number of tokens in the currect batch to be cached
        and reused in the future.
    bi_data: bool, whether to use bidirectional input pipeline.
        Usually set to True during pretraining and False during finetuning.
    clamp_len: int, clamp all relative distances larger than clamp_len.
        -1 means no clamping.
    same_length: bool, whether to use the same attention length for each token.
    summary_type: str, "last", "first", "mean", or "attn". The method
        to pool the input to get a vector representation.
    initializer: A tf initializer.
    scope: scope name for the computation graph.
0
0YuanZhang0 已提交
360 361 362 363 364 365 366 367 368 369 370 371 372 373
    """
    print('memory input {}'.format(mems))
    data_type = "float16" if use_fp16 else "float32"
    print('Use float type {}'.format(data_type))

    qlen = inp_k.shape[0]
    mlen = mems[0].shape[0] if mems is not None else 0
    klen = mlen + qlen
    bsz = fluid.layers.slice(fluid.layers.shape(inp_k), axes=[0], starts=[1], ends=[2])

    ##### Attention mask
    # causal attention mask
    if attn_type == 'uni':
        attn_mask = fluid.layers.create_global_var(
0
0YuanZhang0 已提交
374
                    name='attn_mask', 
0
0YuanZhang0 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
                        shape=[qlen, klen, 1, 1], 
                        value=0.0, 
                        dtype=data_type, persistable=True)
    elif attn_type == 'bi':
        attn_mask = None
    else:
        raise ValueError('Unsupported attention type: {}'.format(attn_type))

    # data mask: input mask & perm mask
    if input_mask is not None and perm_mask is not None:
        data_mask = fluid.layers.unsqueeze(input_mask, [0]) + perm_mask
    elif input_mask is not None and perm_mask is None:
        data_mask = fluid.layers.unsqueeze(input_mask, [0])
        print("input mask shape", input_mask.shape)
    elif input_mask is None and perm_mask is not None:
        data_mask = perm_mask
    else:
        data_mask = None

    if data_mask is not None:
        # all mems can be attended to
        mems_mask = fluid.layers.zeros(shape=[data_mask.shape[0], mlen, 1], dtype='float32')
        mems_mask = fluid.layers.expand(mems_mask, [1, 1, bsz])
        data_mask = fluid.layers.concat([mems_mask, data_mask], 1)
        if attn_mask is None:
            attn_mask = fluid.layers.unsqueeze(data_mask, [-1])
        else:
            attn_mask += fluid.layers.unsqueeze(data_mask, [-1])
        print("mems_mask, data_mask, attn mask shape", mems_mask.shape, data_mask.shape, attn_mask.shape)
    if attn_mask is not None:
        attn_mask = fluid.layers.cast(attn_mask > 0, dtype=data_type)

    if attn_mask is not None:
        non_tgt_mask = fluid.layers.diag(np.array([-1]*qlen).astype(data_type))
        non_tgt_mask = fluid.layers.concat([fluid.layers.zeros([qlen, mlen], dtype=data_type),
                                non_tgt_mask], axis=-1)
        print("attn_mask, non_tgt_mask shape", attn_mask.shape, non_tgt_mask.shape)
        attn_mask = fluid.layers.expand(attn_mask, [qlen, 1, 1, 1])
        non_tgt_mask = fluid.layers.unsqueeze(non_tgt_mask, axes=[2, 3])
        non_tgt_mask = fluid.layers.expand(non_tgt_mask, [1, 1, bsz, 1])
        non_tgt_mask = fluid.layers.cast((attn_mask + non_tgt_mask) > 0,
                             dtype=data_type)
        non_tgt_mask.stop_gradient = True
    else:
        non_tgt_mask = None

    if untie_r:
0
0YuanZhang0 已提交
422
        r_w_bias = fluid.layers.create_parameter(shape=[n_layer, n_head, d_head], dtype=data_type, 
0
0YuanZhang0 已提交
423 424
                                 attr=fluid.ParamAttr(name=name+'_r_w_bias', initializer=initializer), 
                                 is_bias=True)
0
0YuanZhang0 已提交
425 426 427
        r_w_bias = [fluid.layers.slice(r_w_bias, axes=[0], starts=[i], ends=[i+1]) for i in range(n_layer)]
        r_w_bias = [fluid.layers.squeeze(r_w_bias[i], axes=[0]) for i in range(n_layer)]
        r_r_bias = fluid.layers.create_parameter(shape=[n_layer, n_head, d_head], dtype=data_type, 
0
0YuanZhang0 已提交
428 429
                                 attr=fluid.ParamAttr(name=name+'_r_r_bias', initializer=initializer), 
                                 is_bias=True)
0
0YuanZhang0 已提交
430 431
        r_r_bias = [fluid.layers.slice(r_r_bias, axes=[0], starts=[i], ends=[i+1]) for i in range(n_layer)]
        r_r_bias = [fluid.layers.squeeze(r_r_bias[i], axes=[0]) for i in range(n_layer)]
0
0YuanZhang0 已提交
432
    else:
0
0YuanZhang0 已提交
433
        r_w_bias = fluid.layers.create_parameter(shape=[n_head, d_head], dtype=data_type, 
0
0YuanZhang0 已提交
434 435
                                 attr=fluid.ParamAttr(name=name+'_r_w_bias', initializer=initializer), 
                                 is_bias=True)
0
0YuanZhang0 已提交
436
        r_r_bias = fluid.layers.create_parameter(shape=[n_head, d_head], dtype=data_type, 
0
0YuanZhang0 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450
                                 attr=fluid.ParamAttr(name=name+'_r_r_bias', initializer=initializer), 
                                 is_bias=True)

    lookup_table = fluid.layers.create_parameter(shape=[n_token, d_model], dtype=data_type, 
                                 attr=fluid.ParamAttr(name=name+'_word_embedding', 
                                         initializer=initializer), 
                                 is_bias=True)
    word_emb_k = fluid.layers.embedding(
        input=inp_k,
        size=[n_token, d_model],
        dtype=data_type,
        param_attr=fluid.ParamAttr(name=name+'_word_embedding', initializer=initializer))

    if inp_q is not None:
0
0YuanZhang0 已提交
451
        pass
0
0YuanZhang0 已提交
452 453 454 455 456

    output_h = fluid.layers.dropout(word_emb_k, dropout_prob=dropout,
                                   dropout_implementation="upscale_in_train") 
    
    if inp_q is not None:
0
0YuanZhang0 已提交
457
        pass
0
0YuanZhang0 已提交
458 459

    if seg_id is not None:
0
0YuanZhang0 已提交
460 461 462 463
        if untie_r:
            r_s_bias = fluid.layers.create_parameter(shape=[n_layer, n_head, d_head], dtype=data_type, 
                        attr=fluid.ParamAttr(name=name+'_r_s_bias', initializer=initializer), 
                        is_bias=True)
0
0YuanZhang0 已提交
464 465
            r_s_bias = [fluid.layers.slice(r_s_bias, axes=[0], starts=[i], ends=[i+1]) for i in range(n_layer)]
            r_s_bias = [fluid.layers.squeeze(r_s_bias[i], axes=[0]) for i in range(n_layer)]
0
0YuanZhang0 已提交
466 467 468 469
        else:
            r_s_bias = fluid.layers.create_parameter(shape=[n_head, d_head], dtype=data_type, 
                        attr=fluid.ParamAttr(name=name+'_r_s_bias', initializer=initializer), 
                        is_bias=True)
0
0YuanZhang0 已提交
470 471

        seg_embed = fluid.layers.create_parameter(shape=[n_layer, 2, n_head, d_head],
0
0YuanZhang0 已提交
472
                              dtype=data_type, attr=fluid.ParamAttr(name=name+'_seg_embed', 
0
0YuanZhang0 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
                              initializer=initializer))
        seg_embed = [fluid.layers.slice(seg_embed, axes=[0], starts=[i], ends=[i+1]) for i in range(n_layer)]
        seg_embed = [fluid.layers.squeeze(seg_embed[i], axes=[0]) for i in range(n_layer)]

        # COnver `seg_id` to one-hot seg_mat
        # seg_id: [bsz, qlen, 1]
        mem_pad = fluid.layers.fill_constant_batch_size_like(input=seg_id, shape=[-1, mlen], value=0, dtype='int64')
        # cat_ids: [bsz, klen, 1]
        cat_ids = fluid.layers.concat(input=[mem_pad, seg_id], axis=1)
        seg_id = fluid.layers.stack([seg_id] * klen, axis=2)
        cat_ids = fluid.layers.stack([cat_ids] * qlen, axis=2)
        cat_ids = fluid.layers.transpose(cat_ids, perm=[0, 2, 1])

        # seg_mat: [bsz, qlen, klen]
        seg_mat = fluid.layers.cast(
          fluid.layers.logical_not(fluid.layers.equal(seg_id, cat_ids)),
          dtype='int64')
        
        seg_mat = fluid.layers.transpose(seg_mat, perm=[1, 2, 0])
        seg_mat = fluid.layers.unsqueeze(seg_mat, [-1])
        seg_mat = fluid.layers.one_hot(seg_mat, 2)
        seg_mat.stop_gradient = True
    else:
        seg_mat = None

    pos_emb =  relative_positional_encoding(
             qlen, klen, d_model, clamp_len, attn_type, bi_data,
             bsz=bsz, dtype=data_type) 
    pos_emb = fluid.layers.dropout(pos_emb, dropout,
                            dropout_implementation="upscale_in_train")
    pos_emb.stop_gradient = True
    ##### Attention layers
    if mems is None:
0
0YuanZhang0 已提交
506
        mems = [None] * n_layer
0
0YuanZhang0 已提交
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
    for i in range(n_layer):
        # cache new mems
        #new_mems.append(_cache_mem(output_h, mems[i], mem_len, reuse_len)) 

        # segment bias
        if seg_id is None:
            r_s_bias_i = None
            seg_embed_i = None
        else:
            r_s_bias_i = r_s_bias if not untie_r else r_s_bias[i]
            seg_embed_i = seg_embed[i]

        if inp_q is not None:
            pass
        else:
            output_h = rel_multihead_attn(
              h=output_h,
              r=pos_emb,
              r_w_bias=r_w_bias if not untie_r else r_w_bias[i],
              r_r_bias=r_r_bias if not untie_r else r_r_bias[i],
              seg_mat=seg_mat,
              r_s_bias=r_s_bias_i,
              seg_embed=seg_embed_i,
              attn_mask=non_tgt_mask,
              mems=mems[i],
              d_model=d_model,
              n_head=n_head,
              d_head=d_head,
              dropout=dropout,
              dropatt=dropatt,
              initializer=initializer,
              name=name+'_layer_{}'.format(i))

        if inp_q is not None:
            pass

        output_h = positionwise_ffn(inp=output_h, d_model=d_model, 
                         d_inner=d_inner, dropout_prob=dropout, 
                         param_initializer=initializer,
                         act_type=ff_activation, name=name+'_layer_{}_ff'.format(i))
        
    if inp_q is not None:
        output = fluid.layers.dropout(output_g, dropout, 
                                      dropout_implementation="upscale_in_train")
    else:
        output = fluid.layers.dropout(output_h, dropout,
                                      dropout_implementation="upscale_in_train")
    new_mems = None
    return output, new_mems, lookup_table

0
0YuanZhang0 已提交
557

0
0YuanZhang0 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
def lm_loss(hidden, target, n_token, d_model, initializer, lookup_table=None,
            tie_weight=False, bi_data=True):

    if tie_weight:
        assert lookup_table is not None, \
          'lookup_table cannot be None for tie_weight'
        softmax_w = lookup_table
    else:
        softmax_w = fluid.layers.create_parameter(
                shape=[n_token, d_model],
                dtype=hidden.dtype,
                attr=fluid.ParamAttr(name='model_loss_weight', initializer=initializer),
                is_bias=False)

    softmax_b = fluid.layers.create_parameter(
                shape=[n_token],
                dtype=hidden.dtype,
                attr=fluid.ParamAttr(name='model_lm_loss_bias', initializer=initializer),
                is_bias=False)
        
    logits = fluid.layers.matmul(x=hidden, y=softmax_w, transpose_y=True) + softmax_b

    loss = fluid.layers.softmax_cross_entropy_with_logits(input=logits, label=target)

    return loss 

def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout,
                       dropatt, input_mask, is_training, initializer,
                       scope=None, reuse=None, use_proj=True):

0
0YuanZhang0 已提交
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
    """
    Different classification tasks may not may not share the same parameters
    to summarize the sequence features.
    If shared, one can keep the `scope` to the default value `None`.
    Otherwise, one should specify a different `scope` for each task.
    """

    with tf.variable_scope(scope, 'sequnece_summary', reuse=reuse):
        if summary_type == 'last':
            summary = hidden[-1]
        elif summary_type == 'first':
            summary = hidden[0]
        elif summary_type == 'mean':
            summary = tf.reduce_mean(hidden, axis=0)
        elif summary_type == 'attn':
            bsz = tf.shape(hidden)[1]

            summary_bias = tf.get_variable('summary_bias', [d_model],
0
0YuanZhang0 已提交
606 607
                                     dtype=hidden.dtype,
                                     initializer=initializer)
0
0YuanZhang0 已提交
608
            summary_bias = tf.tile(summary_bias[None, None], [1, bsz, 1])
0
0YuanZhang0 已提交
609

0
0YuanZhang0 已提交
610 611
            if input_mask is not None:
                input_mask = input_mask[None, :, :, None]
0
0YuanZhang0 已提交
612

0
0YuanZhang0 已提交
613
            summary = multihead_attn(summary_bias, hidden, hidden, input_mask,
0
0YuanZhang0 已提交
614 615
                               d_model, n_head, d_head, dropout, dropatt,
                               is_training, initializer, residual=False)
0
0YuanZhang0 已提交
616 617 618 619 620 621 622 623 624 625 626 627
            summary = summary[0]
        else:
            raise ValueError('Unsupported summary type {}'.format(summary_type))

        # use another projection as in BERT
        if use_proj:
            summary = tf.layers.dense(
                        summary,
                        d_model,
                        activation=tf.tanh,
                        initializer=initializer,
                        name='summary')
0
0YuanZhang0 已提交
628

0
0YuanZhang0 已提交
629 630 631 632
        # dropout
        summary = tf.layers.dropout(
                summary, dropout, training=is_training,
                name='dropout')
0
0YuanZhang0 已提交
633

0
0YuanZhang0 已提交
634
    return summary
0
0YuanZhang0 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651


def classification_loss(hidden, labels, n_class, initializer, name, reuse=None,
                        return_logits=False):
    """
      Different classification tasks should use different scope names to ensure
      different dense layers (parameters) are used to produce the logits.
      An exception will be in transfer learning, where one hopes to transfer
      the classification weights.
    """

    logits = fluid.layers.fc(
        input=hidden,
        size=n_class,
        param_attr=fluid.ParamAttr(name=name+'_logits', initializer=initializer))

    one_hot_target = fluid.layers.one_hot(labels, depth=n_class, dtype=hidden.dtype)
0
0YuanZhang0 已提交
652
    loss = -fluid.layers.reduce_sum(fluid.layers.log_softmax(logits) * one_hot_target, -1)
0
0YuanZhang0 已提交
653 654

    if return_logits:
0
0YuanZhang0 已提交
655
        return loss, logits
0
0YuanZhang0 已提交
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671

    return loss


def regression_loss(hidden, labels, initializer, name='transformer',
                    return_logits=False):
 
    logits = fluid.layers.fc(
        input=hidden,
        size=1,
        param_attr=fluid.ParamAttr(name=name+'_logits', initializer=initializer))

    logits = tf.squeeze(logits, axis=-1)
    loss = tf.square(logits - labels)

    if return_logits:
0
0YuanZhang0 已提交
672
        return loss, logits
0
0YuanZhang0 已提交
673 674

    return loss