mlm.py 5.2 KB
Newer Older
X
xixiaoyao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# -*- coding: UTF-8 -*-
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle.fluid as fluid
W
wangxiao1021 已提交
17
from paddlepalm.head.base_head import Head
X
xixiaoyao 已提交
18
from paddle.fluid import layers
W
wangxiao1021 已提交
19 20
import numpy as np
import os
X
xixiaoyao 已提交
21 22
from paddlepalm.backbone.utils.transformer import pre_process_layer

W
wangxiao1021 已提交
23
class MaskLM(Head):
X
xixiaoyao 已提交
24
    '''
W
wangxiao1021 已提交
25
    mlm
X
xixiaoyao 已提交
26
    '''
W
wangxiao1021 已提交
27
    def __init__(self, input_dim, vocab_size, hidden_act, dropout_prob=0.0, \
W
wangxiao1021 已提交
28
                 param_initializer_range=0.02, phase='train'):
X
xixiaoyao 已提交
29
        self._is_training = phase == 'train'
W
wangxiao1021 已提交
30 31 32 33 34 35 36
        self._emb_size = input_dim
        self._hidden_size = input_dim
        self._dropout_prob = dropout_prob if phase == 'train' else 0.0
        self._preds = []

        self._vocab_size = vocab_size
        self._hidden_act = hidden_act
W
wangxiao1021 已提交
37
        self._initializer_range = param_initializer_range
X
xixiaoyao 已提交
38 39 40 41
    
    @property
    def inputs_attrs(self):
        reader = {
W
wangxiao1021 已提交
42 43 44 45
            "token_ids":[[-1, -1], 'int64'],
            "mask_label": [[-1], 'int64'],
            "mask_pos": [[-1], 'int64'],
            }
X
xixiaoyao 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
        if not self._is_training:
            del reader['mask_label']
        bb = {
            "encoder_outputs": [[-1, -1, self._hidden_size], 'float32'],
            "embedding_table": [[-1, self._vocab_size, self._emb_size], 'float32']}
        return {'reader': reader, 'backbone': bb}

    @property
    def outputs_attrs(self):
        if self._is_training:
            return {"loss": [[1], 'float32']}
        else:
            return {"logits": [[-1], 'float32']}

    def build(self, inputs, scope_name=""):
        mask_pos = inputs["reader"]["mask_pos"]
        if self._is_training:
            mask_label = inputs["reader"]["mask_label"] 
W
wangxiao1021 已提交
64 65 66 67 68 69 70
            l1 = fluid.layers.shape(inputs["reader"]["token_ids"] )[0]
            # bxs = inputs["reader"]["token_ids"].shape[2].value
            l2 = fluid.layers.shape(inputs["reader"]["token_ids"][0])[0]
            bxs = (l1*l2).astype(np.int64)
            # max_position = inputs["reader"]["batchsize_x_seqlen"] - 1
            max_position = bxs - 1

X
xixiaoyao 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
            mask_pos = fluid.layers.elementwise_min(mask_pos, max_position)
            mask_pos.stop_gradient = True

        word_emb = inputs["backbone"]["embedding_table"]
        enc_out = inputs["backbone"]["encoder_outputs"]

        emb_size = word_emb.shape[-1]

        _param_initializer = fluid.initializer.TruncatedNormal(
            scale=self._initializer_range)

        reshaped_emb_out = fluid.layers.reshape(
            x=enc_out, shape=[-1, emb_size])

        # extract masked tokens' feature
        mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)

        # transform: fc
        mask_trans_feat = fluid.layers.fc(
            input=mask_feat,
            size=emb_size,
            act=self._hidden_act,
            param_attr=fluid.ParamAttr(
                name=scope_name+'mask_lm_trans_fc.w_0',
                initializer=_param_initializer),
W
wangxiao1021 已提交
96
                bias_attr=fluid.ParamAttr(name=scope_name+'mask_lm_trans_fc.b_0'))
X
xixiaoyao 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
        # transform: layer norm
        mask_trans_feat = pre_process_layer(
            mask_trans_feat, 'n', name=scope_name+'mask_lm_trans')

        mask_lm_out_bias_attr = fluid.ParamAttr(
            name=scope_name+"mask_lm_out_fc.b_0",
            initializer=fluid.initializer.Constant(value=0.0))

        fc_out = fluid.layers.matmul(
            x=mask_trans_feat,
            y=word_emb,
            transpose_y=True)
        fc_out += fluid.layers.create_parameter(
            shape=[self._vocab_size],
            dtype='float32',
            attr=mask_lm_out_bias_attr,
            is_bias=True)

        if self._is_training:
W
wangxiao1021 已提交
116 117 118
            inputs = fluid.layers.softmax(fc_out)
            mask_lm_loss = fluid.layers.cross_entropy(
                input=inputs, label=mask_label)
X
xixiaoyao 已提交
119 120 121 122 123
            loss = fluid.layers.mean(mask_lm_loss)
            return {'loss': loss}
        else:
            return {'logits': fc_out}

W
wangxiao1021 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
    def batch_postprocess(self, rt_outputs):
        if not self._is_training:
            logits = rt_outputs['logits']
            preds = np.argmax(logits, -1)
            self._preds.extend(preds.tolist())
            return preds

    def epoch_postprocess(self, post_inputs, output_dir=None):
        # there is no post_inputs needed and not declared in epoch_inputs_attrs, hence no elements exist in post_inputs
        if not self._is_training:
            if output_dir is None:
                for p in self._preds:
                    print(p)
            else:
                with open(os.path.join(output_dir, 'predictions.json'), 'w') as writer:
                    for p in self._preds:
                        writer.write(str(p)+'\n')
                print('Predictions saved at '+os.path.join(output_dir, 'predictions.json'))

X
xixiaoyao 已提交
143