bert.py 6.1 KB
Newer Older
X
xixiaoyao 已提交
1
# -*- coding: UTF-8 -*-
X
xixiaoyao 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
X
xixiaoyao 已提交
15 16
"""v1.1 
BERT model."""
X
xixiaoyao 已提交
17 18 19 20 21

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

X
xixiaoyao 已提交
22
from paddle import fluid
X
xixiaoyao 已提交
23 24
from paddle.fluid import layers

X
xixiaoyao 已提交
25 26 27
from paddlepalm.backbone.utils.transformer import pre_process_layer, encoder
from paddlepalm.interface import backbone

X
xixiaoyao 已提交
28
    
X
xixiaoyao 已提交
29
class Model(backbone):
X
xixiaoyao 已提交
30
    
X
xixiaoyao 已提交
31
    def __init__(self, config, phase):
X
xixiaoyao 已提交
32

X
xixiaoyao 已提交
33
        # self._is_training = phase == 'train' # backbone一般不用关心运行阶段,因为outputs在任何阶段基本不会变
X
xixiaoyao 已提交
34 35 36 37 38 39 40 41 42 43
        self._emb_size = config["hidden_size"]
        self._n_layer = config["num_hidden_layers"]
        self._n_head = config["num_attention_heads"]
        self._voc_size = config["vocab_size"]
        self._max_position_seq_len = config["max_position_embeddings"]
        self._sent_types = config["type_vocab_size"]
        self._hidden_act = config["hidden_act"]
        self._prepostprocess_dropout = config["hidden_dropout_prob"]
        self._attention_dropout = config["attention_probs_dropout_prob"]

X
xixiaoyao 已提交
44 45 46
        self._word_emb_name = "word_embedding"
        self._pos_emb_name = "pos_embedding"
        self._sent_emb_name = "sent_embedding"
X
xixiaoyao 已提交
47 48 49 50 51 52

        # Initialize all weigths by truncated normal initializer, and all biases 
        # will be initialized by constant zero by default.
        self._param_initializer = fluid.initializer.TruncatedNormal(
            scale=config["initializer_range"])

X
xixiaoyao 已提交
53 54
    @property
    def inputs_attr(self):
X
xixiaoyao 已提交
55 56 57 58
        return {"token_ids": [[-1, -1, 1], 'int64'],
                "position_ids": [[-1, -1, 1], 'int64'],
                "segment_ids": [[-1, -1, 1], 'int64'],
                "input_mask": [[-1, -1, 1], 'float32']}
X
xixiaoyao 已提交
59

X
xixiaoyao 已提交
60 61
    @property
    def outputs_attr(self):
X
xixiaoyao 已提交
62
        return {"word_embedding": [[-1, -1, self._emb_size], 'float32'],
X
xixiaoyao 已提交
63
                "embedding_table": [[-1, self._voc_size, self._emb_size], 'float32'],
X
xixiaoyao 已提交
64 65 66
                "encoder_outputs": [[-1, -1, self._emb_size], 'float32'],
                "sentence_embedding": [[-1, self._emb_size], 'float32'],
                "sentence_pair_embedding": [[-1, self._emb_size], 'float32']}
X
xixiaoyao 已提交
67

X
xixiaoyao 已提交
68
    def build(self, inputs, scope_name=""):
X
xixiaoyao 已提交
69 70 71 72
        src_ids = inputs['token_ids']
        pos_ids = inputs['position_ids']
        sent_ids = inputs['segment_ids']
        input_mask = inputs['input_mask']
X
xixiaoyao 已提交
73 74

        self._emb_dtype = 'float32'
X
xixiaoyao 已提交
75
        # padding id in vocabulary must be set to 0
X
xixiaoyao 已提交
76
        emb_out = fluid.layers.embedding(
X
xixiaoyao 已提交
77 78
            input=src_ids,
            size=[self._voc_size, self._emb_size],
X
xixiaoyao 已提交
79
            dtype=self._emb_dtype,
X
xixiaoyao 已提交
80
            param_attr=fluid.ParamAttr(
X
xixiaoyao 已提交
81
                name=scope_name+self._word_emb_name, initializer=self._param_initializer),
X
xixiaoyao 已提交
82
            is_sparse=False)
X
xixiaoyao 已提交
83 84 85

        # fluid.global_scope().find_var('backbone-word_embedding').get_tensor()
        embedding_table = fluid.default_main_program().global_block().var(scope_name+self._word_emb_name)
X
xixiaoyao 已提交
86
        
X
xixiaoyao 已提交
87
        position_emb_out = fluid.layers.embedding(
X
xixiaoyao 已提交
88 89
            input=pos_ids,
            size=[self._max_position_seq_len, self._emb_size],
X
xixiaoyao 已提交
90
            dtype=self._emb_dtype,
X
xixiaoyao 已提交
91
            param_attr=fluid.ParamAttr(
X
xixiaoyao 已提交
92
                name=scope_name+self._pos_emb_name, initializer=self._param_initializer))
X
xixiaoyao 已提交
93

X
xixiaoyao 已提交
94
        sent_emb_out = fluid.layers.embedding(
X
xixiaoyao 已提交
95 96
            sent_ids,
            size=[self._sent_types, self._emb_size],
X
xixiaoyao 已提交
97
            dtype=self._emb_dtype,
X
xixiaoyao 已提交
98
            param_attr=fluid.ParamAttr(
X
xixiaoyao 已提交
99
                name=scope_name+self._sent_emb_name, initializer=self._param_initializer))
X
xixiaoyao 已提交
100

X
xixiaoyao 已提交
101 102
        emb_out = emb_out + position_emb_out
        emb_out = emb_out + sent_emb_out
X
xixiaoyao 已提交
103

X
xixiaoyao 已提交
104
        emb_out = pre_process_layer(
X
xixiaoyao 已提交
105
            emb_out, 'nd', self._prepostprocess_dropout, name=scope_name+'pre_encoder')
X
xixiaoyao 已提交
106

X
xixiaoyao 已提交
107 108
        self_attn_mask = fluid.layers.matmul(
            x=input_mask, y=input_mask, transpose_y=True)
X
xixiaoyao 已提交
109

X
xixiaoyao 已提交
110 111 112
        self_attn_mask = fluid.layers.scale(
            x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
        n_head_self_attn_mask = fluid.layers.stack(
X
xixiaoyao 已提交
113 114 115
            x=[self_attn_mask] * self._n_head, axis=1)
        n_head_self_attn_mask.stop_gradient = True

X
xixiaoyao 已提交
116
        enc_out = encoder(
X
xixiaoyao 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
            enc_input=emb_out,
            attn_bias=n_head_self_attn_mask,
            n_layer=self._n_layer,
            n_head=self._n_head,
            d_key=self._emb_size // self._n_head,
            d_value=self._emb_size // self._n_head,
            d_model=self._emb_size,
            d_inner_hid=self._emb_size * 4,
            prepostprocess_dropout=self._prepostprocess_dropout,
            attention_dropout=self._attention_dropout,
            relu_dropout=0,
            hidden_act=self._hidden_act,
            preprocess_cmd="",
            postprocess_cmd="dan",
            param_initializer=self._param_initializer,
            name=scope_name+'encoder')

        
        next_sent_feat = fluid.layers.slice(
            input=enc_out, axes=[1], starts=[0], ends=[1])
        next_sent_feat = fluid.layers.reshape(next_sent_feat, [-1, next_sent_feat.shape[-1]])
        next_sent_feat = fluid.layers.fc(
            input=next_sent_feat,
            size=self._emb_size,
            act="tanh",
            param_attr=fluid.ParamAttr(
                name=scope_name+"pooled_fc.w_0", initializer=self._param_initializer),
            bias_attr=scope_name+"pooled_fc.b_0")

        return {'embedding_table': embedding_table,
                'word_embedding': emb_out,
X
xixiaoyao 已提交
148 149 150
                'encoder_outputs': enc_out,
                'sentence_embedding': next_sent_feat,
                'sentence_pair_embedding': next_sent_feat}
X
xixiaoyao 已提交
151

X
xixiaoyao 已提交
152 153
    def postprocess(self, rt_outputs):
        pass
X
xixiaoyao 已提交
154 155