modeling_ernie.py 30.0 KB
Newer Older
M
Meiyim 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals

import json
import logging
M
Meiyim 已提交
22 23 24 25 26
import six
if six.PY2:
    from pathlib2 import Path
else:
    from pathlib import Path
M
Meiyim 已提交
27

C
chenxuyi 已提交
28 29 30
import paddle as P
from paddle import nn
from paddle.nn import functional as F
M
Meiyim 已提交
31
from ernie.file_utils import _fetch_from_remote, add_docstring
M
Meiyim 已提交
32 33 34

log = logging.getLogger(__name__)

C
chenxuyi 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48
ACT_DICT = {
    'relu': nn.ReLU,
    'gelu': nn.GELU,
}


def _build_linear(n_in, n_out, name, init):
    return nn.Linear(
        n_in,
        n_out,
        weight_attr=P.ParamAttr(
            name='%s.w_0' % name if name is not None else None,
            initializer=init),
        bias_attr='%s.b_0' % name if name is not None else None, )
M
Meiyim 已提交
49 50 51


def _build_ln(n_in, name):
C
chenxuyi 已提交
52 53 54 55 56 57 58 59
    return nn.LayerNorm(
        normalized_shape=n_in,
        weight_attr=P.ParamAttr(
            name='%s_layer_norm_scale' % name if name is not None else None,
            initializer=nn.initializer.Constant(1.)),
        bias_attr=P.ParamAttr(
            name='%s_layer_norm_bias' % name if name is not None else None,
            initializer=nn.initializer.Constant(0.)), )
M
Meiyim 已提交
60 61 62 63


def append_name(name, postfix):
    if name is None:
C
chenxuyi 已提交
64
        ret = None
M
Meiyim 已提交
65
    elif name == '':
C
chenxuyi 已提交
66
        ret = postfix
M
Meiyim 已提交
67
    else:
C
chenxuyi 已提交
68 69
        ret = '%s_%s' % (name, postfix)
    return ret
M
Meiyim 已提交
70 71


C
chenxuyi 已提交
72
class AttentionLayer(nn.Layer):
M
Meiyim 已提交
73 74
    def __init__(self, cfg, name=None):
        super(AttentionLayer, self).__init__()
C
chenxuyi 已提交
75 76
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
M
Meiyim 已提交
77 78 79
        d_model = cfg['hidden_size']
        n_head = cfg['num_attention_heads']
        assert d_model % n_head == 0
C
chenxuyi 已提交
80 81 82 83
        d_model_q = cfg.get('query_hidden_size_per_head',
                            d_model // n_head) * n_head
        d_model_v = cfg.get('value_hidden_size_per_head',
                            d_model // n_head) * n_head
M
Meiyim 已提交
84 85
        self.n_head = n_head
        self.d_key = d_model_q // n_head
C
chenxuyi 已提交
86 87 88 89 90 91 92 93 94
        self.q = _build_linear(d_model, d_model_q,
                               append_name(name, 'query_fc'), initializer)
        self.k = _build_linear(d_model, d_model_q,
                               append_name(name, 'key_fc'), initializer)
        self.v = _build_linear(d_model, d_model_v,
                               append_name(name, 'value_fc'), initializer)
        self.o = _build_linear(d_model_v, d_model,
                               append_name(name, 'output_fc'), initializer)
        self.dropout = nn.Dropout(p=cfg['attention_probs_dropout_prob'])
M
Meiyim 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

    def forward(self, queries, keys, values, attn_bias, past_cache):
        assert len(queries.shape) == len(keys.shape) == len(values.shape) == 3
        #bsz, q_len, q_dim = queries.shape
        #bsz, k_len, k_dim = keys.shape
        #bsz, v_len, v_dim = values.shape
        #assert k_len == v_len

        q = self.q(queries)
        k = self.k(keys)
        v = self.v(values)

        cache = (k, v)
        if past_cache is not None:
            cached_k, cached_v = past_cache
C
chenxuyi 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
            k = P.concat([cached_k, k], 1)
            v = P.concat([cached_v, v], 1)

        q = q.reshape(
            [0, 0, self.n_head, q.shape[-1] // self.n_head]).transpose(
                [0, 2, 1, 3])  #[batch, head, seq, dim]
        k = k.reshape(
            [0, 0, self.n_head, k.shape[-1] // self.n_head]).transpose(
                [0, 2, 1, 3])  #[batch, head, seq, dim]
        v = v.reshape(
            [0, 0, self.n_head, v.shape[-1] // self.n_head]).transpose(
                [0, 2, 1, 3])  #[batch, head, seq, dim]

        q = q.scale(self.d_key**-0.5)
        score = q.matmul(k, transpose_y=True)
M
Meiyim 已提交
125 126
        if attn_bias is not None:
            score += attn_bias
C
chenxuyi 已提交
127
        score = F.softmax(score)
M
Meiyim 已提交
128 129
        score = self.dropout(score)

C
chenxuyi 已提交
130 131
        out = score.matmul(v).transpose([0, 2, 1, 3])
        out = out.reshape([0, 0, out.shape[2] * out.shape[3]])
M
Meiyim 已提交
132 133 134 135
        out = self.o(out)
        return out, cache


C
chenxuyi 已提交
136
class PositionwiseFeedForwardLayer(nn.Layer):
M
Meiyim 已提交
137 138
    def __init__(self, cfg, name=None):
        super(PositionwiseFeedForwardLayer, self).__init__()
C
chenxuyi 已提交
139 140
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
M
Meiyim 已提交
141 142
        d_model = cfg['hidden_size']
        d_ffn = cfg.get('intermediate_size', 4 * d_model)
C
chenxuyi 已提交
143 144 145 146 147 148 149 150
        self.act = ACT_DICT[cfg['hidden_act']]()
        self.i = _build_linear(
            d_model,
            d_ffn,
            append_name(name, 'fc_0'),
            initializer, )
        self.o = _build_linear(d_ffn, d_model,
                               append_name(name, 'fc_1'), initializer)
M
Meiyim 已提交
151
        prob = cfg.get('intermediate_dropout_prob', 0.)
C
chenxuyi 已提交
152
        self.dropout = nn.Dropout(p=prob)
M
Meiyim 已提交
153 154

    def forward(self, inputs):
C
chenxuyi 已提交
155
        hidden = self.act(self.i(inputs))
M
Meiyim 已提交
156 157 158 159 160
        hidden = self.dropout(hidden)
        out = self.o(hidden)
        return out


C
chenxuyi 已提交
161
class ErnieBlock(nn.Layer):
M
Meiyim 已提交
162 163 164
    def __init__(self, cfg, name=None):
        super(ErnieBlock, self).__init__()
        d_model = cfg['hidden_size']
C
chenxuyi 已提交
165 166 167 168 169 170
        self.attn = AttentionLayer(
            cfg, name=append_name(name, 'multi_head_att'))
        self.ln1 = _build_ln(d_model, name=append_name(name, 'post_att'))
        self.ffn = PositionwiseFeedForwardLayer(
            cfg, name=append_name(name, 'ffn'))
        self.ln2 = _build_ln(d_model, name=append_name(name, 'post_ffn'))
M
Meiyim 已提交
171
        prob = cfg.get('intermediate_dropout_prob', cfg['hidden_dropout_prob'])
C
chenxuyi 已提交
172
        self.dropout = nn.Dropout(p=prob)
M
Meiyim 已提交
173 174

    def forward(self, inputs, attn_bias=None, past_cache=None):
C
chenxuyi 已提交
175 176 177
        attn_out, cache = self.attn(
            inputs, inputs, inputs, attn_bias,
            past_cache=past_cache)  #self attn
M
Meiyim 已提交
178
        attn_out = self.dropout(attn_out)
C
chenxuyi 已提交
179 180
        hidden = attn_out + inputs
        hidden = self.ln1(hidden)  # dropout/ add/ norm
M
Meiyim 已提交
181 182 183 184 185 186 187

        ffn_out = self.ffn(hidden)
        ffn_out = self.dropout(ffn_out)
        hidden = ffn_out + hidden
        hidden = self.ln2(hidden)
        return hidden, cache

C
chenxuyi 已提交
188 189

class ErnieEncoderStack(nn.Layer):
M
Meiyim 已提交
190 191 192
    def __init__(self, cfg, name=None):
        super(ErnieEncoderStack, self).__init__()
        n_layers = cfg['num_hidden_layers']
C
chenxuyi 已提交
193 194 195 196
        self.block = nn.LayerList([
            ErnieBlock(cfg, append_name(name, 'layer_%d' % i))
            for i in range(n_layers)
        ])
M
Meiyim 已提交
197 198 199

    def forward(self, inputs, attn_bias=None, past_cache=None):
        if past_cache is not None:
C
chenxuyi 已提交
200 201 202 203
            assert isinstance(
                past_cache, tuple
            ), 'unknown type of `past_cache`, expect tuple or list. got %s' % repr(
                type(past_cache))
M
Meiyim 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
            past_cache = list(zip(*past_cache))
        else:
            past_cache = [None] * len(self.block)
        cache_list_k, cache_list_v, hidden_list = [], [], [inputs]

        for b, p in zip(self.block, past_cache):
            inputs, cache = b(inputs, attn_bias=attn_bias, past_cache=p)
            cache_k, cache_v = cache
            cache_list_k.append(cache_k)
            cache_list_v.append(cache_v)
            hidden_list.append(inputs)

        return inputs, hidden_list, (cache_list_k, cache_list_v)


class PretrainedModel(object):
    bce = 'https://ernie-github.cdn.bcebos.com/'
    resource_map = {
        'ernie-1.0': bce + 'model-ernie1.0.1.tar.gz',
        'ernie-2.0-en': bce + 'model-ernie2.0-en.1.tar.gz',
C
chenxuyi 已提交
224
        'ernie-2.0-large-en': bce + 'model-ernie2.0-large-en.1.tar.gz',
M
Meiyim 已提交
225 226
        'ernie-tiny': bce + 'model-ernie_tiny.1.tar.gz',
    }
C
chenxuyi 已提交
227

M
Meiyim 已提交
228
    @classmethod
C
chenxuyi 已提交
229 230 231 232 233 234 235
    def from_pretrained(cls,
                        pretrain_dir_or_url,
                        force_download=False,
                        **kwargs):
        if not Path(pretrain_dir_or_url).exists() and str(
                pretrain_dir_or_url) in cls.resource_map:
            url = cls.resource_map[str(pretrain_dir_or_url)]
M
Meiyim 已提交
236 237 238
            log.info('get pretrain dir from %s' % url)
            pretrain_dir = _fetch_from_remote(url, force_download)
        else:
C
chenxuyi 已提交
239 240
            log.info('pretrain dir %s not in %s, read from local' %
                     (pretrain_dir_or_url, repr(cls.resource_map)))
W
Weiyue Su 已提交
241
            pretrain_dir = Path(pretrain_dir_or_url)
M
Meiyim 已提交
242

M
Meiyim 已提交
243
        if not pretrain_dir.exists():
M
Meiyim 已提交
244
            raise ValueError('pretrain dir not found: %s' % pretrain_dir)
C
chenxuyi 已提交
245
        state_dict_path = pretrain_dir / 'saved_weights.pdparams'
M
Meiyim 已提交
246
        config_path = pretrain_dir / 'ernie_config.json'
M
Meiyim 已提交
247

M
Meiyim 已提交
248
        if not config_path.exists():
M
Meiyim 已提交
249
            raise ValueError('config path not found: %s' % config_path)
C
chenxuyi 已提交
250
        name_prefix = kwargs.pop('name', None)
M
Meiyim 已提交
251
        cfg_dict = dict(json.loads(config_path.open().read()), **kwargs)
M
Meiyim 已提交
252
        model = cls(cfg_dict, name=name_prefix)
C
chenxuyi 已提交
253

M
Meiyim 已提交
254 255
        log.info('loading pretrained model from %s' % pretrain_dir)

C
chenxuyi 已提交
256
        #param_path = pretrain_dir / 'params'
M
Meiyim 已提交
257 258 259 260
        #if os.path.exists(param_path):
        #    raise NotImplementedError()
        #    log.debug('load pretrained weight from program state')
        #    F.io.load_program_state(param_path) #buggy in dygraph.gurad, push paddle to fix
C
chenxuyi 已提交
261 262
        if state_dict_path.exists():
            m = P.load(state_dict_path)
M
Meiyim 已提交
263 264 265
            for k, v in model.state_dict().items():
                if k not in m:
                    log.warn('param:%s not set in pretrained model, skip' % k)
C
chenxuyi 已提交
266 267
                    m[k] = v  # FIXME: no need to do this in the future
            model.set_state_dict(m)
M
Meiyim 已提交
268
        else:
C
chenxuyi 已提交
269 270
            raise ValueError('weight file not found in pretrain dir: %s' %
                             pretrain_dir)
M
Meiyim 已提交
271 272 273
        return model


C
chenxuyi 已提交
274
class ErnieModel(nn.Layer, PretrainedModel):
M
Meiyim 已提交
275 276 277 278 279
    def __init__(self, cfg, name=None):
        """
        Fundamental pretrained Ernie model
        """
        log.debug('init ErnieModel with config: %s' % repr(cfg))
C
chenxuyi 已提交
280
        nn.Layer.__init__(self)
M
Meiyim 已提交
281 282 283 284 285 286 287
        d_model = cfg['hidden_size']
        d_emb = cfg.get('emb_size', cfg['hidden_size'])
        d_vocab = cfg['vocab_size']
        d_pos = cfg['max_position_embeddings']
        d_sent = cfg.get("sent_type_vocab_size") or cfg['type_vocab_size']
        self.n_head = cfg['num_attention_heads']
        self.return_additional_info = cfg.get('return_additional_info', False)
C
chenxuyi 已提交
288 289
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
M
Meiyim 已提交
290 291

        self.ln = _build_ln(d_model, name=append_name(name, 'pre_encoder'))
C
chenxuyi 已提交
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
        self.word_emb = nn.Embedding(
            d_vocab,
            d_emb,
            weight_attr=P.ParamAttr(
                name=append_name(name, 'word_embedding'),
                initializer=initializer))
        self.pos_emb = nn.Embedding(
            d_pos,
            d_emb,
            weight_attr=P.ParamAttr(
                name=append_name(name, 'pos_embedding'),
                initializer=initializer))
        self.sent_emb = nn.Embedding(
            d_sent,
            d_emb,
            weight_attr=P.ParamAttr(
                name=append_name(name, 'sent_embedding'),
                initializer=initializer))
M
Meiyim 已提交
310
        prob = cfg['hidden_dropout_prob']
C
chenxuyi 已提交
311
        self.dropout = nn.Dropout(p=prob)
M
Meiyim 已提交
312

C
chenxuyi 已提交
313 314
        self.encoder_stack = ErnieEncoderStack(cfg,
                                               append_name(name, 'encoder'))
M
Meiyim 已提交
315
        if cfg.get('has_pooler', True):
C
chenxuyi 已提交
316 317 318 319 320
            self.pooler = _build_linear(
                cfg['hidden_size'],
                cfg['hidden_size'],
                append_name(name, 'pooled_fc'),
                initializer, )
M
Meiyim 已提交
321 322 323 324
        else:
            self.pooler = None
        self.train()

C
chenxuyi 已提交
325
    #FIXME:remove this
M
Meiyim 已提交
326
    def eval(self):
C
chenxuyi 已提交
327
        if P.in_dynamic_mode():
M
Meiyim 已提交
328 329 330 331
            super(ErnieModel, self).eval()
        self.training = False
        for l in self.sublayers():
            l.training = False
C
chenxuyi 已提交
332
        return self
M
Meiyim 已提交
333 334

    def train(self):
C
chenxuyi 已提交
335
        if P.in_dynamic_mode():
M
Meiyim 已提交
336 337 338 339
            super(ErnieModel, self).train()
        self.training = True
        for l in self.sublayers():
            l.training = True
C
chenxuyi 已提交
340 341 342 343 344 345 346 347 348 349
        return self

    def forward(self,
                src_ids,
                sent_ids=None,
                pos_ids=None,
                input_mask=None,
                attn_bias=None,
                past_cache=None,
                use_causal_mask=False):
M
Meiyim 已提交
350 351
        """
        Args:
C
chenxuyi 已提交
352
            src_ids (`Variable` of shape `[batch_size, seq_len]`):
M
Meiyim 已提交
353
                Indices of input sequence tokens in the vocabulary.
C
chenxuyi 已提交
354
            sent_ids (optional, `Variable` of shape `[batch_size, seq_len]`):
M
Meiyim 已提交
355 356
                aka token_type_ids, Segment token indices to indicate first and second portions of the inputs.
                if None, assume all tokens come from `segment_a`
C
chenxuyi 已提交
357
            pos_ids(optional, `Variable` of shape `[batch_size, seq_len]`):
M
Meiyim 已提交
358
                Indices of positions of each input sequence tokens in the position embeddings.
C
chenxuyi 已提交
359
            input_mask(optional `Variable` of shape `[batch_size, seq_len]`):
M
Meiyim 已提交
360
                Mask to avoid performing attention on the padding token indices of the encoder input.
C
chenxuyi 已提交
361
            attn_bias(optional, `Variable` of shape `[batch_size, seq_len, seq_len] or False`):
M
Meiyim 已提交
362
                3D version of `input_mask`, if set, overrides `input_mask`; if set not False, will not apply attention mask
M
Meiyim 已提交
363 364
            past_cache(optional, tuple of two lists: cached key and cached value,
                each is a list of `Variable`s of shape `[batch_size, seq_len, hidden_size]`):
C
chenxuyi 已提交
365
                cached key/value tensor that will be concated to generated key/value when performing self attention.
M
Meiyim 已提交
366 367
                if set, `attn_bias` should not be None.

M
Meiyim 已提交
368 369 370 371 372
        Returns:
            pooled (`Variable` of shape `[batch_size, hidden_size]`):
                output logits of pooler classifier
            encoded(`Variable` of shape `[batch_size, seq_len, hidden_size]`):
                output logits of transformer stack
M
Meiyim 已提交
373 374
            info (Dictionary):
                addtional middle level info, inclues: all hidden stats, k/v caches.
M
Meiyim 已提交
375
        """
C
chenxuyi 已提交
376 377 378 379
        assert len(
            src_ids.
            shape) == 2, 'expect src_ids.shape = [batch, sequecen], got %s' % (
                repr(src_ids.shape))
M
Meiyim 已提交
380
        assert attn_bias is not None if past_cache else True, 'if `past_cache` is specified; attn_bias should not be None'
C
chenxuyi 已提交
381
        d_seqlen = P.shape(src_ids)[1]
M
Meiyim 已提交
382
        if pos_ids is None:
C
chenxuyi 已提交
383 384
            pos_ids = P.arange(
                0, d_seqlen, 1, dtype='int32').reshape([1, -1]).cast('int64')
M
Meiyim 已提交
385 386
        if attn_bias is None:
            if input_mask is None:
C
chenxuyi 已提交
387
                input_mask = P.cast(src_ids != 0, 'float32')
M
Meiyim 已提交
388
            assert len(input_mask.shape) == 2
C
chenxuyi 已提交
389 390
            input_mask = input_mask.unsqueeze(-1)
            attn_bias = input_mask.matmul(input_mask, transpose_y=True)
M
Meiyim 已提交
391
            if use_causal_mask:
C
chenxuyi 已提交
392 393 394 395 396
                sequence = P.reshape(
                    P.arange(
                        0, d_seqlen, 1, dtype='float32') + 1., [1, 1, -1, 1])
                causal_mask = (sequence.matmul(
                    1. / sequence, transpose_y=True) >= 1.).cast('float32')
M
Meiyim 已提交
397 398
                attn_bias *= causal_mask
        else:
C
chenxuyi 已提交
399 400 401
            assert len(
                attn_bias.shape
            ) == 3, 'expect attn_bias tobe rank 3, got %r' % attn_bias.shape
M
Meiyim 已提交
402
        attn_bias = (1. - attn_bias) * -10000.0
C
chenxuyi 已提交
403 404 405
        attn_bias = attn_bias.unsqueeze(1).tile(
            [1, self.n_head, 1, 1])  # avoid broadcast =_=

M
Meiyim 已提交
406
        if sent_ids is None:
C
chenxuyi 已提交
407
            sent_ids = P.zeros_like(src_ids)
M
Meiyim 已提交
408 409 410 411 412 413 414 415

        src_embedded = self.word_emb(src_ids)
        pos_embedded = self.pos_emb(pos_ids)
        sent_embedded = self.sent_emb(sent_ids)
        embedded = src_embedded + pos_embedded + sent_embedded

        embedded = self.dropout(self.ln(embedded))

C
chenxuyi 已提交
416 417
        encoded, hidden_list, cache_list = self.encoder_stack(
            embedded, attn_bias, past_cache=past_cache)
M
Meiyim 已提交
418
        if self.pooler is not None:
C
chenxuyi 已提交
419
            pooled = F.tanh(self.pooler(encoded[:, 0, :]))
M
Meiyim 已提交
420 421 422 423 424 425 426 427 428 429
        else:
            pooled = None

        additional_info = {
            'hiddens': hidden_list,
            'caches': cache_list,
        }

        if self.return_additional_info:
            return pooled, encoded, additional_info
C
chenxuyi 已提交
430 431
        return pooled, encoded

M
Meiyim 已提交
432 433 434

class ErnieModelForSequenceClassification(ErnieModel):
    """
C
chenxuyi 已提交
435
    Ernie Model for text classfication or pointwise ranking tasks
M
Meiyim 已提交
436 437 438
    """

    def __init__(self, cfg, name=None):
C
chenxuyi 已提交
439 440
        super(ErnieModelForSequenceClassification, self).__init__(
            cfg, name=name)
M
Meiyim 已提交
441

C
chenxuyi 已提交
442 443 444 445
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
        self.classifier = _build_linear(cfg['hidden_size'], cfg['num_labels'],
                                        append_name(name, 'cls'), initializer)
M
Meiyim 已提交
446 447

        prob = cfg.get('classifier_dropout_prob', cfg['hidden_dropout_prob'])
C
chenxuyi 已提交
448 449
        self.dropout = nn.Dropout(p=prob)
        self.train()
M
Meiyim 已提交
450

M
Meiyim 已提交
451
    @add_docstring(ErnieModel.forward.__doc__)
M
Meiyim 已提交
452 453 454
    def forward(self, *args, **kwargs):
        """
        Args:
C
chenxuyi 已提交
455
            labels (optional, `Variable` of shape [batch_size]):
M
Meiyim 已提交
456 457 458 459 460 461 462 463 464
                ground truth label id for each sentence
        Returns:
            loss (`Variable` of shape []):
                Cross entropy loss mean over batch
                if labels not set, returns None
            logits (`Variable` of shape [batch_size, hidden_size]):
                output logits of classifier
        """
        labels = kwargs.pop('labels', None)
C
chenxuyi 已提交
465 466
        pooled, encoded = super(ErnieModelForSequenceClassification,
                                self).forward(*args, **kwargs)
M
Meiyim 已提交
467 468 469 470
        hidden = self.dropout(pooled)
        logits = self.classifier(hidden)

        if labels is not None:
C
chenxuyi 已提交
471 472 473
            if len(labels.shape) != 1:
                labels = labels.squeeze()
            loss = F.cross_entropy(logits, labels)
M
Meiyim 已提交
474 475 476 477 478 479 480 481 482
        else:
            loss = None
        return loss, logits


class ErnieModelForTokenClassification(ErnieModel):
    """
    Ernie Model for Named entity tasks(NER)
    """
C
chenxuyi 已提交
483

M
Meiyim 已提交
484 485 486
    def __init__(self, cfg, name=None):
        super(ErnieModelForTokenClassification, self).__init__(cfg, name=name)

C
chenxuyi 已提交
487 488 489 490
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
        self.classifier = _build_linear(cfg['hidden_size'], cfg['num_labels'],
                                        append_name(name, 'cls'), initializer)
M
Meiyim 已提交
491 492

        prob = cfg.get('classifier_dropout_prob', cfg['hidden_dropout_prob'])
C
chenxuyi 已提交
493 494
        self.dropout = nn.Dropout(p=prob)
        self.train()
M
Meiyim 已提交
495

M
Meiyim 已提交
496
    @add_docstring(ErnieModel.forward.__doc__)
497
    def forward(self, *args, **kwargs):
M
Meiyim 已提交
498 499
        """
        Args:
C
chenxuyi 已提交
500
            labels (optional, `Variable` of shape [batch_size, seq_len]):
M
Meiyim 已提交
501 502 503 504 505 506 507
                ground truth label id for each token
        Returns:
            loss (`Variable` of shape []):
                Cross entropy loss mean over batch and time, ignore positions where label == -100
                if labels not set, returns None
            logits (`Variable` of shape [batch_size, seq_len, hidden_size]):
                output logits of classifier
508 509 510 511
            loss_weights (`Variable` of shape [batch_size, seq_len]):
                weigths of loss for each tokens.
            ignore_index (int):
                when label == `ignore_index`, this token will not contribute to loss
M
Meiyim 已提交
512
        """
513 514 515
        ignore_index = kwargs.pop('ignore_index', -100)
        labels = kwargs.pop('labels', None)
        loss_weights = kwargs.pop('loss_weights', None)
C
chenxuyi 已提交
516 517 518
        pooled, encoded = super(ErnieModelForTokenClassification,
                                self).forward(*args, **kwargs)
        hidden = self.dropout(encoded)  # maybe not?
M
Meiyim 已提交
519 520 521
        logits = self.classifier(hidden)

        if labels is not None:
C
chenxuyi 已提交
522 523 524 525
            if len(labels.shape) != 2:
                labels = labels.squeeze()
            loss = F.cross_entropy(
                logits, labels, ignore_index=ignore_index, reduction='none')
M
Meiyim 已提交
526
            if loss_weights is not None:
C
chenxuyi 已提交
527 528
                loss = loss * loss_weights
            loss = loss.mean()
M
Meiyim 已提交
529 530 531 532 533 534 535 536 537
        else:
            loss = None
        return loss, logits


class ErnieModelForQuestionAnswering(ErnieModel):
    """
    Ernie model for reading comprehension tasks (SQuAD)
    """
C
chenxuyi 已提交
538

M
Meiyim 已提交
539 540 541
    def __init__(self, cfg, name=None):
        super(ErnieModelForQuestionAnswering, self).__init__(cfg, name=name)

C
chenxuyi 已提交
542 543 544 545 546
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
        self.classifier = _build_linear(cfg['hidden_size'], 2,
                                        append_name(name, 'cls_mrc'),
                                        initializer)
M
Meiyim 已提交
547 548

        prob = cfg.get('classifier_dropout_prob', cfg['hidden_dropout_prob'])
C
chenxuyi 已提交
549 550
        self.dropout = nn.Dropout(p=prob)
        self.train()
M
Meiyim 已提交
551

M
Meiyim 已提交
552
    @add_docstring(ErnieModel.forward.__doc__)
M
Meiyim 已提交
553 554 555
    def forward(self, *args, **kwargs):
        """
        Args:
C
chenxuyi 已提交
556
            start_pos (optional, `Variable` of shape [batch_size]):
M
Meiyim 已提交
557
                token index of start of answer span in `context`
C
chenxuyi 已提交
558
            end_pos (optional, `Variable` of shape [batch_size]):
M
Meiyim 已提交
559 560 561 562 563 564 565 566 567 568 569 570 571
                token index of end of answer span in `context`
        Returns:
            loss (`Variable` of shape []):
                Cross entropy loss mean over batch and time, ignore positions where label == -100
                if labels not set, returns None
            start_logits (`Variable` of shape [batch_size, hidden_size]):
                output logits of start position, use argmax(start_logit) to get start index
            end_logits (`Variable` of shape [batch_size, hidden_size]):
                output logits of end position, use argmax(end_logit) to get end index
        """

        start_pos = kwargs.pop('start_pos', None)
        end_pos = kwargs.pop('end_pos', None)
C
chenxuyi 已提交
572 573
        pooled, encoded = super(ErnieModelForQuestionAnswering, self).forward(
            *args, **kwargs)
M
Meiyim 已提交
574 575
        encoded = self.dropout(encoded)
        encoded = self.classifier(encoded)
C
chenxuyi 已提交
576
        start_logit, end_logits = P.unstack(encoded, axis=-1)
M
Meiyim 已提交
577
        if start_pos is not None and end_pos is not None:
C
chenxuyi 已提交
578 579 580 581 582 583 584
            if len(start_pos.shape) != 1:
                start_pos = start_pos.squeeze()
            if len(end_pos.shape) != 1:
                end_pos = end_pos.squeeze()
            start_loss = F.cross_entropy(start_logit, start_pos)
            end_loss = F.cross_entropy(end_logits, end_pos)
            loss = (start_loss.mean() + end_loss.mean()) / 2.
M
Meiyim 已提交
585 586 587 588 589
        else:
            loss = None
        return loss, start_logit, end_logits


C
chenxuyi 已提交
590
class NSPHead(nn.Layer):
M
Meiyim 已提交
591 592
    def __init__(self, cfg, name=None):
        super(NSPHead, self).__init__()
C
chenxuyi 已提交
593 594 595 596
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
        self.nsp = _build_linear(cfg['hidden_size'], 2,
                                 append_name(name, 'nsp_fc'), initializer)
M
Meiyim 已提交
597 598 599 600

    def forward(self, inputs, labels):
        """
        Args:
C
chenxuyi 已提交
601
            start_pos (optional, `Variable` of shape [batch_size]):
M
Meiyim 已提交
602
                token index of start of answer span in `context`
C
chenxuyi 已提交
603
            end_pos (optional, `Variable` of shape [batch_size]):
M
Meiyim 已提交
604 605 606 607 608 609 610 611 612 613 614 615
                token index of end of answer span in `context`
        Returns:
            loss (`Variable` of shape []):
                Cross entropy loss mean over batch and time, ignore positions where label == -100
                if labels not set, returns None
            start_logits (`Variable` of shape [batch_size, hidden_size]):
                output logits of start position
            end_logits (`Variable` of shape [batch_size, hidden_size]):
                output logits of end position
        """

        logits = self.nsp(inputs)
C
chenxuyi 已提交
616
        loss = F.cross_entropy(logits, labels)
M
Meiyim 已提交
617 618 619 620 621 622 623
        return loss


class ErnieModelForPretraining(ErnieModel):
    """
    Ernie Model for Masked Languate Model pretrain
    """
C
chenxuyi 已提交
624

M
Meiyim 已提交
625 626
    def __init__(self, cfg, name=None):
        super(ErnieModelForPretraining, self).__init__(cfg, name=name)
C
chenxuyi 已提交
627 628
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
M
Meiyim 已提交
629 630 631
        d_model = cfg['hidden_size']
        d_vocab = cfg['vocab_size']

C
chenxuyi 已提交
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
        self.pooler_heads = nn.LayerList([NSPHead(cfg, name=name)])
        self.mlm = _build_linear(
            d_model,
            d_model,
            append_name(name, 'mask_lm_trans_fc'),
            initializer, )
        self.act = ACT_DICT[cfg['hidden_act']]()
        self.mlm_ln = _build_ln(
            d_model, name=append_name(name, 'mask_lm_trans'))
        self.mlm_bias = P.create_parameter(
            dtype='float32',
            shape=[d_vocab],
            attr=P.ParamAttr(
                name=append_name(name, 'mask_lm_out_fc.b_0'),
                initializer=nn.initializer.Constant(value=0.0)),
            is_bias=True, )
        self.train()
M
Meiyim 已提交
649

M
Meiyim 已提交
650
    @add_docstring(ErnieModel.forward.__doc__)
M
Meiyim 已提交
651 652 653
    def forward(self, *args, **kwargs):
        """
        Args:
C
chenxuyi 已提交
654
            nsp_labels (optional, `Variable` of shape [batch_size]):
M
Meiyim 已提交
655
                labels for `next sentence prediction` tasks
C
chenxuyi 已提交
656
            mlm_pos (optional, `Variable` of shape [n_mask, 2]):
M
Meiyim 已提交
657
                index of mask_id in `src_ids`, can be obtained from `fluid.layers.where(src_ids==mask_id)`
C
chenxuyi 已提交
658
            labels (optional, `Variable` of shape [n_mask]):
M
Meiyim 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671
                labels for `mask language model` tasks, the original token indices in masked position in `src_ids`
        Returns:
            loss (`Variable` of shape []):
                total_loss of `next sentence prediction` and `masked language model`
            mlm_loss (`Variable` of shape []):
                loss for `masked language model` task
            nsp_loss (`Variable` of shape []):
                loss for `next sentence prediction` task
        """

        mlm_labels = kwargs.pop('labels')
        mlm_pos = kwargs.pop('mlm_pos')
        nsp_labels = kwargs.pop('nsp_labels')
C
chenxuyi 已提交
672 673 674 675
        pooled, encoded = super(ErnieModelForPretraining, self).forward(
            *args, **kwargs)
        if len(mlm_labels.shape) != 1:
            mlm_labels = mlm_labels.squeeze()
M
Meiyim 已提交
676
        if len(nsp_labels.shape) == 1:
C
chenxuyi 已提交
677
            nsp_labels = nsp_labels.squeeze()
M
Meiyim 已提交
678 679 680

        nsp_loss = self.pooler_heads[0](pooled, nsp_labels)

C
chenxuyi 已提交
681 682
        encoded_2d = encoded.gather_nd(mlm_pos)
        encoded_2d = self.act(self.mlm(encoded_2d))
M
Meiyim 已提交
683
        encoded_2d = self.mlm_ln(encoded_2d)
C
chenxuyi 已提交
684 685 686
        logits_2d = encoded_2d.matmul(
            self.word_emb.weight, transpose_y=True) + self.mlm_bias
        mlm_loss = F.cross_entropy(logits_2d, mlm_labels)
M
Meiyim 已提交
687 688 689
        total_loss = mlm_loss + nsp_loss
        return total_loss, mlm_loss, nsp_loss

M
Meiyim 已提交
690 691 692 693 694 695

class ErnieModelForGeneration(ErnieModel):
    """
    Ernie Model for sequence to sequence generation.
    """
    resource_map = {
C
chenxuyi 已提交
696 697 698 699 700 701
        'ernie-gen-base-en':
        ErnieModel.bce + 'model-ernie-gen-base-en.1.tar.gz',
        'ernie-gen-large-en':
        ErnieModel.bce + 'model-ernie-gen-large-en.1.tar.gz',
        'ernie-gen-large-430g-en':
        ErnieModel.bce + 'model-ernie-gen-large-430g-en.1.tar.gz',
M
Meiyim 已提交
702 703
        'ernie-1.0': ErnieModel.bce + 'model-ernie1.0.1.tar.gz',
    }
C
chenxuyi 已提交
704

M
Meiyim 已提交
705 706 707 708
    def __init__(self, cfg, name=None):
        cfg['return_additional_info'] = True
        cfg['has_pooler'] = False
        super(ErnieModelForGeneration, self).__init__(cfg, name=name)
C
chenxuyi 已提交
709 710
        initializer = nn.initializer.TruncatedNormal(
            std=cfg['initializer_range'])
M
Meiyim 已提交
711 712 713
        d_model = cfg['hidden_size']
        d_vocab = cfg['vocab_size']

C
chenxuyi 已提交
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
        self.mlm = _build_linear(
            d_model,
            d_model,
            append_name(name, 'mask_lm_trans_fc'),
            initializer, )
        self.act = ACT_DICT[cfg['hidden_act']]()
        self.mlm_ln = _build_ln(
            d_model, name=append_name(name, 'mask_lm_trans'))
        self.mlm_bias = P.create_parameter(
            dtype='float32',
            shape=[d_vocab],
            attr=P.ParamAttr(
                name=append_name(name, 'mask_lm_out_fc.b_0'),
                initializer=nn.initializer.Constant(value=0.0)),
            is_bias=True, )
        self.train()
M
Meiyim 已提交
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753

    @add_docstring(ErnieModel.forward.__doc__)
    def forward(self, *args, **kwargs):
        """
        Args
            tgt_labels(`Variable` of shape [batch_size, seqlen] or [batch, seqlen, vocab_size]):
                ground trouth target sequence id (hard label) or distribution (soft label)
            tgt_pos(`Variable` of shape [n_targets, 2]):
                index of tgt_labels in `src_ids`, can be obtained from `fluid.layers.where(src_ids==mask_id)`
            encoder_only(Bool):
                if set, will not return loss, logits_2d
        Returns:
            loss(`Variable` of shape []):
                cross entropy loss mean over every target label. if `encode_only`, returns None.
            logits(`Variable` of shape [n_targets, vocab_size]):
                logits for every targets. if `encode_only`, returns None.
            info(Dictionary): see `ErnieModel`
        """
        tgt_labels = kwargs.pop('tgt_labels', None)
        tgt_pos = kwargs.pop('tgt_pos', None)
        encode_only = kwargs.pop('encode_only', False)
        _, encoded, info = ErnieModel.forward(self, *args, **kwargs)
        if encode_only:
            return None, None, info
C
chenxuyi 已提交
754 755
        if tgt_labels is None or tgt_pos is None:
            encoded = self.act(self.mlm(encoded))
M
Meiyim 已提交
756
            encoded = self.mlm_ln(encoded)
C
chenxuyi 已提交
757 758 759
            logits = encoded.matmul(
                self.word_emb.weight, transpose_y=True) + self.mlm_bias
            output_ids = logits.cast('float32').argmax(-1)
M
Meiyim 已提交
760 761
            return output_ids, logits, info
        else:
C
chenxuyi 已提交
762 763
            encoded_2d = encoded.gather_nd(tgt_pos)
            encoded_2d = self.act(self.mlm(encoded_2d))
M
Meiyim 已提交
764
            encoded_2d = self.mlm_ln(encoded_2d)
C
chenxuyi 已提交
765 766 767 768
            logits_2d = encoded_2d.matmul(
                self.word_emb.weight, transpose_y=True) + self.mlm_bias
            assert len(
                tgt_labels.shape) == 2, 'expect 2d label, got %r' % tgt_labels
M
Meiyim 已提交
769

C
chenxuyi 已提交
770 771
            loss = F.cross_entropy(logits_2d, tgt_labels, soft_label=True)
            return loss, logits_2d, info