提交 e0df8292 编写于 作者: W wanghaoshuang

Add teacher model of bert.

上级 84d76531
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from ..bert import cls
from .cls import *
__all__ = []
__all__ += cls.__all__
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask, padding and batching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def mask(batch_tokens, total_token_num, vocab_size, CLS=1, SEP=2, MASK=3):
"""
Add mask for batch_tokens, return out, mask_label, mask_pos;
Note: mask_pos responding the batch_tokens after padded;
"""
max_len = max([len(sent) for sent in batch_tokens])
mask_label = []
mask_pos = []
prob_mask = np.random.rand(total_token_num)
# Note: the first token is [CLS], so [low=1]
replace_ids = np.random.randint(1, high=vocab_size, size=total_token_num)
pre_sent_len = 0
prob_index = 0
for sent_index, sent in enumerate(batch_tokens):
mask_flag = False
prob_index += pre_sent_len
for token_index, token in enumerate(sent):
prob = prob_mask[prob_index + token_index]
if prob > 0.15:
continue
elif 0.03 < prob <= 0.15:
# mask
if token != SEP and token != CLS:
mask_label.append(sent[token_index])
sent[token_index] = MASK
mask_flag = True
mask_pos.append(sent_index * max_len + token_index)
elif 0.015 < prob <= 0.03:
# random replace
if token != SEP and token != CLS:
mask_label.append(sent[token_index])
sent[token_index] = replace_ids[prob_index + token_index]
mask_flag = True
mask_pos.append(sent_index * max_len + token_index)
else:
# keep the original token
if token != SEP and token != CLS:
mask_label.append(sent[token_index])
mask_pos.append(sent_index * max_len + token_index)
pre_sent_len = len(sent)
# ensure at least mask one word in a sentence
while not mask_flag:
token_index = int(np.random.randint(1, high=len(sent) - 1, size=1))
if sent[token_index] != SEP and sent[token_index] != CLS:
mask_label.append(sent[token_index])
sent[token_index] = MASK
mask_flag = True
mask_pos.append(sent_index * max_len + token_index)
mask_label = np.array(mask_label).astype("int64").reshape([-1, 1])
mask_pos = np.array(mask_pos).astype("int64").reshape([-1, 1])
return batch_tokens, mask_label, mask_pos
def prepare_batch_data(insts,
total_token_num,
voc_size=0,
pad_id=None,
cls_id=None,
sep_id=None,
mask_id=None,
return_input_mask=True,
return_max_len=True,
return_num_token=False):
"""
1. generate Tensor of data
2. generate Tensor of position
3. generate self attention mask, [shape: batch_size * max_len * max_len]
"""
batch_src_ids = [inst[0] for inst in insts]
batch_sent_ids = [inst[1] for inst in insts]
batch_pos_ids = [inst[2] for inst in insts]
labels_list = []
# compatible with squad, whose example includes start/end positions,
# or unique id
for i in range(3, len(insts[0]), 1):
labels = [inst[i] for inst in insts]
labels = np.array(labels).astype("int64").reshape([-1, 1])
labels_list.append(labels)
# First step: do mask without padding
if mask_id >= 0:
out, mask_label, mask_pos = mask(
batch_src_ids,
total_token_num,
vocab_size=voc_size,
CLS=cls_id,
SEP=sep_id,
MASK=mask_id)
else:
out = batch_src_ids
# Second step: padding
src_id, self_input_mask = pad_batch_data(
out, pad_idx=pad_id, return_input_mask=True)
pos_id = pad_batch_data(
batch_pos_ids,
pad_idx=pad_id,
return_pos=False,
return_input_mask=False)
sent_id = pad_batch_data(
batch_sent_ids,
pad_idx=pad_id,
return_pos=False,
return_input_mask=False)
if mask_id >= 0:
return_list = [
src_id, pos_id, sent_id, self_input_mask, mask_label, mask_pos
] + labels_list
else:
return_list = [src_id, pos_id, sent_id, self_input_mask] + labels_list
return return_list if len(return_list) > 1 else return_list[0]
def pad_batch_data(insts,
pad_idx=0,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and input mask.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array([
list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in insts
])
return_list += [inst_data.astype("int64").reshape([-1, max_len])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array([[1] * len(inst) + [0] *
(max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
return return_list if len(return_list) > 1 else return_list[0]
if __name__ == "__main__":
pass
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT fine-tuning in Paddle Dygraph Mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
import sys
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf8')
import ast
import time
import argparse
import numpy as np
import multiprocessing
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable, Layer
import reader.cls as reader
from .model.bert import BertConfig
from .model.cls import ClsModelLayer
from .optimization import Optimizer
from .utils.args import ArgumentGroup, print_arguments, check_cuda
from .utils.init import init_from_static_model
__all__ = ["BERTClassifier"]
def create_data(batch):
"""
convert data to variable
"""
src_ids = to_variable(batch[0], "src_ids")
position_ids = to_variable(batch[1], "position_ids")
sentence_ids = to_variable(batch[2], "sentence_ids")
input_mask = to_variable(batch[3], "input_mask")
labels = to_variable(batch[4], "labels")
labels.stop_gradient = True
return src_ids, position_ids, sentence_ids, input_mask, labels
class BERTClassifier(Layer):
def __init__(self,
num_labels,
task_name="mnli",
model_path=None,
use_cuda=True):
super(BERTClassifier, self).__init__()
self.task_name = task_name.lower()
BERT_BASE_PATH = "./data/pretrained_models/uncased_L-12_H-768_A-12/"
bert_config_path = BERT_BASE_PATH + "/bert_config.json"
self.vocab_path = BERT_BASE_PATH + "/vocab.txt"
self.init_pretraining_params = BERT_BASE_PATH + "/dygraph_params/"
self.do_lower_case = True
self.bert_config = BertConfig(bert_config_path)
if use_cuda:
self.dev_count = fluid.core.get_cuda_device_count()
else:
self.dev_count = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
self.trainer_count = fluid.dygraph.parallel.Env().nranks
self.processors = {
'xnli': reader.XnliProcessor,
'cola': reader.ColaProcessor,
'mrpc': reader.MrpcProcessor,
'mnli': reader.MnliProcessor,
}
self.cls_model = ClsModelLayer(
self.bert_config, num_labels, return_pooled_out=True)
if self.init_pretraining_params:
print("Load pre-trained model from %s" %
self.init_pretraining_params)
init_from_static_model(self.init_pretraining_params,
self.cls_model, self.bert_config)
if model_path is not None:
#restore the model
print("Load params from %s" % model_path)
model_dict, _ = fluid.load_dygraph(model_path)
self.cls_model.load_dict(model_dict)
def forward(self, input):
return self.cls_model(input)
def test(self, data_dir, batch_size=64, max_seq_len=512):
processor = self.processors[self.task_name](
data_dir=data_dir,
vocab_path=self.vocab_path,
max_seq_len=max_seq_len,
do_lower_case=self.do_lower_case,
in_tokens=False)
test_data_generator = processor.data_generator(
batch_size=batch_size, phase='dev', epoch=1, shuffle=False)
self.cls_model.eval()
total_cost, final_acc, avg_acc, total_num_seqs = [], [], [], []
for batch in test_data_generator():
data_ids = create_data(batch)
total_loss, _, _, np_acces, np_num_seqs = self.cls_model(data_ids)
np_loss = total_loss.numpy()
np_acc = np_acces[-1].numpy()
np_avg_acc = np.mean([acc.numpy() for acc in np_acces])
np_num_seqs = np_num_seqs.numpy()
total_cost.extend(np_loss * np_num_seqs)
final_acc.extend(np_acc * np_num_seqs)
avg_acc.extend(np_avg_acc * np_num_seqs)
total_num_seqs.extend(np_num_seqs)
print("[evaluation] classifier[-1] average acc: %f; average acc: %f" %
(np.sum(final_acc) / np.sum(total_num_seqs),
np.sum(avg_acc) / np.sum(total_num_seqs)))
self.cls_model.train()
def fit(self,
data_dir,
epoch,
batch_size=64,
use_cuda=True,
max_seq_len=512,
warmup_proportion=0.1,
use_data_parallel=False,
learning_rate=0.0001,
weight_decay=0.01,
lr_scheduler="linear_warmup_decay",
skip_steps=10,
save_steps=10000,
checkpoints="checkpoints"):
processor = self.processors[self.task_name](
data_dir=data_dir,
vocab_path=self.vocab_path,
max_seq_len=max_seq_len,
do_lower_case=self.do_lower_case,
in_tokens=False,
random_seed=5512)
shuffle_seed = 1 if self.trainer_count > 1 else None
train_data_generator = processor.data_generator(
batch_size=batch_size,
phase='train',
epoch=epoch,
dev_count=self.trainer_count,
shuffle=True,
shuffle_seed=shuffle_seed)
num_train_examples = processor.get_num_examples(phase='train')
max_train_steps = epoch * num_train_examples // batch_size // self.trainer_count
warmup_steps = int(max_train_steps * warmup_proportion)
print("Device count: %d" % self.dev_count)
print("Trainer count: %d" % self.trainer_count)
print("Num train examples: %d" % num_train_examples)
print("Max train steps: %d" % max_train_steps)
print("Num warmup steps: %d" % warmup_steps)
if use_data_parallel:
strategy = fluid.dygraph.parallel.prepare_context()
optimizer = Optimizer(
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=learning_rate,
model_cls=self.cls_model,
weight_decay=weight_decay,
scheduler=lr_scheduler,
loss_scaling=1.0,
parameter_list=self.cls_model.parameters())
if use_data_parallel:
self.cls_model = fluid.dygraph.parallel.DataParallel(
self.cls_model, strategy)
train_data_generator = fluid.contrib.reader.distributed_batch_reader(
train_data_generator)
steps = 0
time_begin = time.time()
for batch in train_data_generator():
data_ids = create_data(batch)
total_loss, logits, losses, accuracys, num_seqs = self.cls_model(
data_ids)
optimizer.optimization(
total_loss,
use_data_parallel=use_data_parallel,
model=self.cls_model)
self.cls_model.clear_gradients()
if steps != 0 and steps % skip_steps == 0:
time_end = time.time()
used_time = time_end - time_begin
current_example, current_epoch = processor.get_train_progress()
localtime = time.asctime(time.localtime(time.time()))
print(
"%s, epoch: %s, steps: %s, dy_graph loss: %f, acc: %f, speed: %f steps/s"
% (localtime, current_epoch, steps, total_loss.numpy(),
accuracys[-1].numpy(), skip_steps / used_time))
time_begin = time.time()
if steps != 0 and steps % save_steps == 0 and fluid.dygraph.parallel.Env(
).local_rank == 0:
self.test(data_dir, batch_size=8, max_seq_len=512)
save_path = os.path.join(checkpoints,
"steps" + "_" + str(steps))
fluid.save_dygraph(self.cls_model.state_dict(), save_path)
fluid.save_dygraph(optimizer.optimizer.state_dict(), save_path)
print("Save model parameters and optimizer status at %s" %
save_path)
steps += 1
if fluid.dygraph.parallel.Env().local_rank == 0:
save_path = os.path.join(checkpoints, "final")
fluid.save_dygraph(self.cls_model.state_dict(), save_path)
fluid.save_dygraph(optimizer.optimizer.state_dict(), save_path)
print("Save model parameters and optimizer status at %s" %
save_path)
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"dygraph transformer layers"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard
from .transformer_encoder import EncoderLayer, PrePostProcessLayer
class BertConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path) as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing bert model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict[key]
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
class BertModelLayer(Layer):
"""
bert
"""
def __init__(self, config, return_pooled_out=True, use_fp16=False):
super(BertModelLayer, self).__init__()
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
self._sent_types = config['type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
self.return_pooled_out = return_pooled_out
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._dtype = "float16" if use_fp16 else "float32"
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._src_emb = Embedding(
size=[self._voc_size, self._emb_size],
param_attr=fluid.ParamAttr(
name=self._word_emb_name, initializer=self._param_initializer),
dtype=self._dtype)
self._pos_emb = Embedding(
size=[self._max_position_seq_len, self._emb_size],
param_attr=fluid.ParamAttr(
name=self._pos_emb_name, initializer=self._param_initializer),
dtype=self._dtype)
self._sent_emb = Embedding(
size=[self._sent_types, self._emb_size],
param_attr=fluid.ParamAttr(
name=self._sent_emb_name, initializer=self._param_initializer),
dtype=self._dtype)
self.pooled_fc = Linear(
input_dim=self._emb_size,
output_dim=self._emb_size,
param_attr=fluid.ParamAttr(
name="pooled_fc.w_0", initializer=self._param_initializer),
bias_attr="pooled_fc.b_0",
act="tanh")
self.pre_process_layer = PrePostProcessLayer(
"nd", self._emb_size, self._prepostprocess_dropout, "")
self._encoder = EncoderLayer(
hidden_act=self._hidden_act,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer)
def forward(self, src_ids, position_ids, sentence_ids, input_mask):
"""
forward
"""
src_emb = self._src_emb(src_ids)
pos_emb = self._pos_emb(position_ids)
sent_emb = self._sent_emb(sentence_ids)
emb_out = src_emb + pos_emb
emb_out = emb_out + sent_emb
emb_out = self.pre_process_layer(emb_out)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
enc_outputs = self._encoder(emb_out, n_head_self_attn_mask)
if not self.return_pooled_out:
return enc_outputs
next_sent_feats = []
for enc_output in enc_outputs:
next_sent_feat = fluid.layers.slice(
input=enc_output, axes=[1], starts=[0], ends=[1])
next_sent_feat = self.pooled_fc(next_sent_feat)
next_sent_feat = fluid.layers.reshape(
next_sent_feat, shape=[-1, self._emb_size])
next_sent_feats.append(next_sent_feat)
return enc_outputs, next_sent_feats
class PretrainModelLayer(Layer):
"""
pretrain model
"""
def __init__(self,
config,
return_pooled_out=True,
weight_sharing=True,
use_fp16=False):
super(PretrainModelLayer, self).__init__()
self.config = config
self._voc_size = config['vocab_size']
self._emb_size = config['hidden_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._word_emb_name = "word_embedding"
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._weight_sharing = weight_sharing
self.use_fp16 = use_fp16
self._dtype = "float16" if use_fp16 else "float32"
self.bert_layer = BertModelLayer(
config=self.config, return_pooled_out=True, use_fp16=self.use_fp16)
self.pre_process_layer = PrePostProcessLayer(
"n", self._emb_size, self._prepostprocess_dropout, "pre_encoder")
self.pooled_fc = Linear(
input_dim=self._emb_size,
output_dim=self._emb_size,
param_attr=fluid.ParamAttr(
name="mask_lm_trans_fc.w_0",
initializer=self._param_initializer),
bias_attr="mask_lm_trans_fc.b_0",
act="tanh")
self.mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if not self._weight_sharing:
self.out_fc = Linear(
input_dim=self._emb_size,
output_dim=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=self.mask_lm_out_bias_attr)
else:
self.fc_create_params = self.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=self.mask_lm_out_bias_attr,
is_bias=True)
self.next_sent_fc = Linear(
input_dim=self._emb_size,
output_dim=2,
param_attr=fluid.ParamAttr(
name="next_sent_fc.w_0", initializer=self._param_initializer),
bias_attr="next_sent_fc.b_0")
def forward(self, src_ids, position_ids, sentence_ids, input_mask,
mask_label, mask_pos, labels):
"""
forward
"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
enc_output, next_sent_feat = self.bert_layer(src_ids, position_ids,
sentence_ids, input_mask)
reshaped_emb_out = fluid.layers.reshape(
x=enc_output, shape=[-1, self._emb_size])
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
mask_trans_feat = self.pooled_fc(mask_feat)
mask_trans_feat = self.pre_process_layer(None, mask_trans_feat, "n",
self._prepostprocess_dropout)
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=self.bert_layer._src_emb._w,
transpose_y=True)
fc_out += self.fc_create_params
else:
fc_out = self.out_fc(mask_trans_feat)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
next_sent_fc_out = self.next_sent_fc(next_sent_feat)
next_sent_loss, next_sent_softmax = fluid.layers.softmax_with_cross_entropy(
logits=next_sent_fc_out, label=labels, return_softmax=True)
next_sent_acc = fluid.layers.accuracy(
input=next_sent_softmax, label=labels)
mean_next_sent_loss = fluid.layers.mean(next_sent_loss)
loss = mean_next_sent_loss + mean_mask_lm_loss
return next_sent_acc, mean_mask_lm_loss, loss
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"dygraph transformer layers"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear, Layer
from .bert import BertModelLayer
class ClsModelLayer(Layer):
"""
classify model
"""
def __init__(self,
config,
num_labels,
is_training=True,
return_pooled_out=True,
loss_scaling=1.0,
use_fp16=False):
super(ClsModelLayer, self).__init__()
self.config = config
self.is_training = is_training
self.use_fp16 = use_fp16
self.loss_scaling = loss_scaling
self.bert_layer = BertModelLayer(
config=self.config, return_pooled_out=True, use_fp16=self.use_fp16)
self.cls_fc = Linear(
input_dim=self.config["hidden_size"],
output_dim=num_labels,
param_attr=fluid.ParamAttr(
name="cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name="cls_out_b", initializer=fluid.initializer.Constant(0.)))
def forward(self, data_ids):
"""
forward
"""
src_ids = data_ids[0]
position_ids = data_ids[1]
sentence_ids = data_ids[2]
input_mask = data_ids[3]
labels = data_ids[4]
enc_outputs, next_sent_feats = self.bert_layer(
src_ids, position_ids, sentence_ids, input_mask)
logits = []
losses = []
accuracys = []
for next_sent_feat in next_sent_feats:
cls_feat = fluid.layers.dropout(
x=next_sent_feat,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
logit = self.cls_fc(cls_feat)
logits.append(logit)
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logit, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
losses.append(loss)
if self.use_fp16 and self.loss_scaling > 1.0:
loss *= self.loss_scaling
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(
input=probs, label=labels, total=num_seqs)
accuracys.append(accuracy)
total_loss = fluid.layers.sum(losses)
return total_loss, logits, losses, accuracys, num_seqs
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"dygraph transformer layers"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer
class PrePostProcessLayer(Layer):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate, name):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = []
self.exec_order = ""
for cmd in self.process_cmd:
if cmd == "a": # add residual connection
self.functors.append(
lambda x, y: x + y if y is not None else x)
self.exec_order += "a"
elif cmd == "n": # add layer normalization
self.functors.append(
self.add_sublayer(
"layer_norm_%d" % len(
self.sublayers(include_sublayers=False)),
LayerNorm(
normalized_shape=d_model,
param_attr=fluid.ParamAttr(
name=name + "_layer_norm_scale",
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name=name + "_layer_norm_bias",
initializer=fluid.initializer.Constant(0.)))))
self.exec_order += "n"
elif cmd == "d": # add dropout
if dropout_rate:
self.functors.append(lambda x: fluid.layers.dropout(
x, dropout_prob=dropout_rate, is_test=False))
self.exec_order += "d"
def forward(self, x, residual=None):
for i, cmd in enumerate(self.exec_order):
if cmd == "a":
x = self.functors[i](x, residual)
else:
x = self.functors[i](x)
return x
class PositionwiseFeedForwardLayer(Layer):
"""
PositionwiseFeedForwardLayer
"""
def __init__(self,
hidden_act,
d_inner_hid,
d_model,
dropout_rate,
param_initializer=None,
name=""):
super(PositionwiseFeedForwardLayer, self).__init__()
self._i2h = Linear(
input_dim=d_model,
output_dim=d_inner_hid,
param_attr=fluid.ParamAttr(
name=name + '_fc_0.w_0', initializer=param_initializer),
bias_attr=name + '_fc_0.b_0',
act=hidden_act)
self._h2o = Linear(
input_dim=d_inner_hid,
output_dim=d_model,
param_attr=fluid.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
self._dropout_rate = dropout_rate
def forward(self, x):
"""
forward
:param x:
:return:
"""
hidden = self._i2h(x)
if self._dropout_rate:
hidden = fluid.layers.dropout(
hidden,
dropout_prob=self._dropout_rate,
upscale_in_train="upscale_in_train",
is_test=False)
out = self._h2o(hidden)
return out
class MultiHeadAttentionLayer(Layer):
"""
MultiHeadAttentionLayer
"""
def __init__(self,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
gather_idx=None,
static_kv=False,
param_initializer=None,
name=""):
super(MultiHeadAttentionLayer, self).__init__()
self._n_head = n_head
self._d_key = d_key
self._d_value = d_value
self._d_model = d_model
self._dropout_rate = dropout_rate
self._q_fc = Linear(
input_dim=d_model,
output_dim=d_key * n_head,
param_attr=fluid.ParamAttr(
name=name + '_query_fc.w_0', initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
self._k_fc = Linear(
input_dim=d_model,
output_dim=d_key * n_head,
param_attr=fluid.ParamAttr(
name=name + '_key_fc.w_0', initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
self._v_fc = Linear(
input_dim=d_model,
output_dim=d_value * n_head,
param_attr=fluid.ParamAttr(
name=name + '_value_fc.w_0', initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
self._proj_fc = Linear(
input_dim=d_value * n_head,
output_dim=d_model,
param_attr=fluid.ParamAttr(
name=name + '_output_fc.w_0', initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
def forward(self, queries, keys, values, attn_bias):
"""
forward
:param queries:
:param keys:
:param values:
:param attn_bias:
:return:
"""
# compute q ,k ,v
keys = queries if keys is None else keys
values = keys if values is None else values
q = self._q_fc(queries)
k = self._k_fc(keys)
v = self._v_fc(values)
# split head
q_hidden_size = q.shape[-1]
reshaped_q = fluid.layers.reshape(
x=q,
shape=[0, 0, self._n_head, q_hidden_size // self._n_head],
inplace=False)
transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
k_hidden_size = k.shape[-1]
reshaped_k = fluid.layers.reshape(
x=k,
shape=[0, 0, self._n_head, k_hidden_size // self._n_head],
inplace=False)
transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3])
v_hidden_size = v.shape[-1]
reshaped_v = fluid.layers.reshape(
x=v,
shape=[0, 0, self._n_head, v_hidden_size // self._n_head],
inplace=False)
transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3])
scaled_q = fluid.layers.scale(x=transpose_q, scale=self._d_key**-0.5)
# scale dot product attention
product = fluid.layers.matmul(
#x=transpose_q,
x=scaled_q,
y=transpose_k,
transpose_y=True)
#alpha=self._d_model**-0.5)
if attn_bias is not None:
product += attn_bias
weights = fluid.layers.softmax(product)
if self._dropout_rate:
weights_droped = fluid.layers.dropout(
weights,
dropout_prob=self._dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = fluid.layers.matmul(weights_droped, transpose_v)
else:
out = fluid.layers.matmul(weights, transpose_v)
# combine heads
if len(out.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = fluid.layers.transpose(out, perm=[0, 2, 1, 3])
final_out = fluid.layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=False)
# fc to output
proj_out = self._proj_fc(final_out)
return proj_out
class EncoderSubLayer(Layer):
"""
EncoderSubLayer
"""
def __init__(self,
hidden_act,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=""):
super(EncoderSubLayer, self).__init__()
self.name = name
self._preprocess_cmd = preprocess_cmd
self._postprocess_cmd = postprocess_cmd
self._prepostprocess_dropout = prepostprocess_dropout
self._preprocess_layer = PrePostProcessLayer(
self._preprocess_cmd,
d_model,
prepostprocess_dropout,
name=name + "_pre_att")
self._multihead_attention_layer = MultiHeadAttentionLayer(
d_key,
d_value,
d_model,
n_head,
attention_dropout,
None,
None,
False,
param_initializer,
name=name + "_multi_head_att")
self._postprocess_layer = PrePostProcessLayer(
self._postprocess_cmd,
d_model,
self._prepostprocess_dropout,
name=name + "_post_att")
self._preprocess_layer2 = PrePostProcessLayer(
self._preprocess_cmd,
d_model,
self._prepostprocess_dropout,
name=name + "_pre_ffn")
self._positionwise_feed_forward = PositionwiseFeedForwardLayer(
hidden_act,
d_inner_hid,
d_model,
relu_dropout,
param_initializer,
name=name + "_ffn")
self._postprocess_layer2 = PrePostProcessLayer(
self._postprocess_cmd,
d_model,
self._prepostprocess_dropout,
name=name + "_post_ffn")
def forward(self, enc_input, attn_bias):
"""
forward
:param enc_input:
:param attn_bias:
:return:
"""
pre_process_multihead = self._preprocess_layer(enc_input)
attn_output = self._multihead_attention_layer(pre_process_multihead,
None, None, attn_bias)
attn_output = self._postprocess_layer(attn_output, enc_input)
pre_process2_output = self._preprocess_layer2(attn_output)
ffd_output = self._positionwise_feed_forward(pre_process2_output)
return self._postprocess_layer2(ffd_output, attn_output)
class EncoderLayer(Layer):
"""
encoder
"""
def __init__(self,
hidden_act,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=""):
super(EncoderLayer, self).__init__()
self._preprocess_cmd = preprocess_cmd
self._encoder_sublayers = list()
self._prepostprocess_dropout = prepostprocess_dropout
self._n_layer = n_layer
self._hidden_act = hidden_act
self._preprocess_layer = PrePostProcessLayer(
self._preprocess_cmd, 3, self._prepostprocess_dropout,
"post_encoder")
for i in range(n_layer):
self._encoder_sublayers.append(
self.add_sublayer(
'esl_%d' % i,
EncoderSubLayer(
hidden_act,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
param_initializer,
name=name + '_layer_' + str(i))))
def forward(self, enc_input, attn_bias):
"""
forward
:param enc_input:
:param attn_bias:
:return:
"""
outputs = []
for i in range(self._n_layer):
enc_output = self._encoder_sublayers[i](enc_input, attn_bias)
outputs.append(enc_output)
enc_input = enc_output
return outputs
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization and learning rate scheduling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
class ConstantLR(LearningRateDecay):
def __init__(self, learning_rate, begin=0, step=1, dtype='float32'):
super(ConstantLR, self).__init__(begin, step, dtype)
self.learning_rate = learning_rate
def step(self):
return self.learning_rate
class LinearDecay(LearningRateDecay):
def __init__(self,
learning_rate,
warmup_steps,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
begin=0,
step=1,
dtype='float32'):
super(LinearDecay, self).__init__(begin, step, dtype)
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
def step(self):
if self.step_num < self.warmup_steps:
decayed_lr = self.learning_rate * (self.step_num /
self.warmup_steps)
decayed_lr = self.create_lr_var(decayed_lr)
else:
tmp_step_num = self.step_num
tmp_decay_steps = self.decay_steps
if self.cycle:
div_res = fluid.layers.ceil(
self.create_lr_var(tmp_step_num / float(self.decay_steps)))
if tmp_step_num == 0:
div_res = self.create_lr_var(1.0)
tmp_decay_steps = self.decay_steps * div_res
else:
tmp_step_num = self.create_lr_var(
tmp_step_num
if tmp_step_num < self.decay_steps else self.decay_steps)
decayed_lr = (self.learning_rate - self.end_learning_rate) * \
((1 - tmp_step_num / tmp_decay_steps) ** self.power) + self.end_learning_rate
return decayed_lr
class Optimizer(object):
def __init__(self,
warmup_steps,
num_train_steps,
learning_rate,
model_cls,
weight_decay,
scheduler='linear_warmup_decay',
loss_scaling=1.0,
parameter_list=None):
self.warmup_steps = warmup_steps
self.num_train_steps = num_train_steps
self.learning_rate = learning_rate
self.model_cls = model_cls
self.weight_decay = weight_decay
self.scheduler = scheduler
self.loss_scaling = loss_scaling
self.parameter_list = parameter_list
self.scheduled_lr = 0.0
self.optimizer = self.lr_schedule()
def lr_schedule(self):
if self.warmup_steps > 0:
if self.scheduler == 'noam_decay':
self.scheduled_lr = fluid.dygraph.NoamDecay(1 / (
self.warmup_steps * (self.learning_rate**2)),
self.warmup_steps)
elif self.scheduler == 'linear_warmup_decay':
self.scheduled_lr = LinearDecay(self.learning_rate,
self.warmup_steps,
self.num_train_steps, 0.0)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay'")
optimizer = fluid.optimizer.Adam(
learning_rate=self.scheduled_lr,
parameter_list=self.parameter_list)
else:
self.scheduled_lr = ConstantLR(self.learning_rate)
optimizer = fluid.optimizer.Adam(
learning_rate=self.scheduled_lr,
parameter_list=self.parameter_list)
return optimizer
def exclude_from_weight_decay(self, name):
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
def optimization(self, loss, use_data_parallel=False, model=None):
param_list = dict()
clip_norm_thres = 1.0
#grad_clip = fluid.clip.GradientClipByGlobalNorm(clip_norm_thres)
if use_data_parallel:
loss = model.scale_loss(loss)
loss.backward()
if self.weight_decay > 0:
for param in self.model_cls.parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
if use_data_parallel:
assert model is not None
model.apply_collective_grads()
#_, param_grads = self.optimizer.minimize(loss, grad_clip=grad_clip)
_, param_grads = self.optimizer.minimize(loss)
if self.weight_decay > 0:
for param, grad in param_grads:
if self.exclude_from_weight_decay(param.name):
continue
if isinstance(self.scheduled_lr.step(), float):
updated_param = param.numpy() - param_list[
param.name].numpy(
) * self.weight_decay * self.scheduled_lr.step()
else:
updated_param = param.numpy(
) - param_list[param.name].numpy(
) * self.weight_decay * self.scheduled_lr.step().numpy()
updated_param_var = fluid.dygraph.to_variable(updated_param)
param = updated_param_var
#param = fluid.layers.reshape(x=updated_param_var, shape=list(updated_param_var.shape))
此差异已折叠。
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import types
import gzip
import logging
import re
import six
import collections
import tokenization
import paddle
import paddle.fluid as fluid
from batching import prepare_batch_data
class DataReader(object):
def __init__(self,
data_dir,
vocab_path,
batch_size=4096,
in_tokens=True,
max_seq_len=512,
shuffle_files=True,
epoch=100,
voc_size=0,
is_test=False,
generate_neg_sample=False):
self.vocab = self.load_vocab(vocab_path)
self.data_dir = data_dir
self.batch_size = batch_size
self.in_tokens = in_tokens
self.shuffle_files = shuffle_files
self.epoch = epoch
self.current_epoch = 0
self.current_file_index = 0
self.total_file = 0
self.current_file = None
self.voc_size = voc_size
self.max_seq_len = max_seq_len
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.mask_id = self.vocab["[MASK]"]
self.is_test = is_test
self.generate_neg_sample = generate_neg_sample
if self.in_tokens:
assert self.batch_size >= self.max_seq_len, "The number of " \
"tokens in batch should not be smaller than max seq length."
if self.is_test:
self.epoch = 1
self.shuffle_files = False
def get_progress(self):
"""return current progress of traning data
"""
return self.current_epoch, self.current_file_index, self.total_file, self.current_file
def parse_line(self, line, max_seq_len=512):
""" parse one line to token_ids, sentence_ids, pos_ids, label
"""
line = line.strip().decode().split(";")
assert len(line) == 4, "One sample must have 4 fields!"
(token_ids, sent_ids, pos_ids, label) = line
token_ids = [int(token) for token in token_ids.split(" ")]
sent_ids = [int(token) for token in sent_ids.split(" ")]
pos_ids = [int(token) for token in pos_ids.split(" ")]
assert len(token_ids) == len(sent_ids) == len(
pos_ids
), "[Must be true]len(token_ids) == len(sent_ids) == len(pos_ids)"
label = int(label)
if len(token_ids) > max_seq_len:
return None
return [token_ids, sent_ids, pos_ids, label]
def read_file(self, file):
assert file.endswith('.gz'), "[ERROR] %s is not a gzip file" % file
file_path = self.data_dir + "/" + file
with gzip.open(file_path, "rb") as f:
for line in f:
parsed_line = self.parse_line(
line, max_seq_len=self.max_seq_len)
if parsed_line is None:
continue
yield parsed_line
def convert_to_unicode(self, text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(self, vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
fin = open(vocab_file)
for num, line in enumerate(fin):
items = self.convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def random_pair_neg_samples(self, pos_samples):
""" randomly generate negtive samples using pos_samples
Args:
pos_samples: list of positive samples
Returns:
neg_samples: list of negtive samples
"""
np.random.shuffle(pos_samples)
num_sample = len(pos_samples)
neg_samples = []
miss_num = 0
for i in range(num_sample):
pair_index = (i + 1) % num_sample
origin_src_ids = pos_samples[i][0]
origin_sep_index = origin_src_ids.index(2)
pair_src_ids = pos_samples[pair_index][0]
pair_sep_index = pair_src_ids.index(2)
src_ids = origin_src_ids[:origin_sep_index + 1] + pair_src_ids[
pair_sep_index + 1:]
if len(src_ids) >= self.max_seq_len:
miss_num += 1
continue
sent_ids = [0] * len(origin_src_ids[:origin_sep_index + 1]) + [
1
] * len(pair_src_ids[pair_sep_index + 1:])
pos_ids = list(range(len(src_ids)))
neg_sample = [src_ids, sent_ids, pos_ids, 0]
assert len(src_ids) == len(sent_ids) == len(
pos_ids
), "[ERROR]len(src_id) == lne(sent_id) == len(pos_id) must be True"
neg_samples.append(neg_sample)
return neg_samples, miss_num
def mixin_negtive_samples(self, pos_sample_generator, buffer=1000):
""" 1. generate negtive samples by randomly group sentence_1 and sentence_2 of positive samples
2. combine negtive samples and positive samples
Args:
pos_sample_generator: a generator producing a parsed positive sample, which is a list: [token_ids, sent_ids, pos_ids, 1]
Returns:
sample: one sample from shuffled positive samples and negtive samples
"""
pos_samples = []
num_total_miss = 0
pos_sample_num = 0
try:
while True:
while len(pos_samples) < buffer:
pos_sample = next(pos_sample_generator)
label = pos_sample[3]
assert label == 1, "positive sample's label must be 1"
pos_samples.append(pos_sample)
pos_sample_num += 1
neg_samples, miss_num = self.random_pair_neg_samples(
pos_samples)
num_total_miss += miss_num
samples = pos_samples + neg_samples
pos_samples = []
np.random.shuffle(samples)
for sample in samples:
yield sample
except StopIteration:
print("stopiteration: reach end of file")
if len(pos_samples) == 1:
yield pos_samples[0]
elif len(pos_samples) == 0:
yield None
else:
neg_samples, miss_num = self.random_pair_neg_samples(
pos_samples)
num_total_miss += miss_num
samples = pos_samples + neg_samples
pos_samples = []
np.random.shuffle(samples)
for sample in samples:
yield sample
print("miss_num:%d\tideal_total_sample_num:%d\tmiss_rate:%f" %
(num_total_miss, pos_sample_num * 2,
num_total_miss / (pos_sample_num * 2)))
def data_generator(self):
"""
data_generator
"""
files = os.listdir(self.data_dir)
self.total_file = len(files)
assert self.total_file > 0, "[Error] data_dir is empty"
def wrapper():
def reader():
for epoch in range(self.epoch):
self.current_epoch = epoch + 1
if self.shuffle_files:
np.random.shuffle(files)
for index, file in enumerate(files):
self.current_file_index = index + 1
self.current_file = file
sample_generator = self.read_file(file)
if not self.is_test and self.generate_neg_sample:
sample_generator = self.mixin_negtive_samples(
sample_generator)
for sample in sample_generator:
if sample is None:
continue
yield sample
def batch_reader(reader, batch_size, in_tokens):
batch, total_token_num, max_len = [], 0, 0
for parsed_line in reader():
token_ids, sent_ids, pos_ids, label = parsed_line
max_len = max(max_len, len(token_ids))
if in_tokens:
to_append = (len(batch) + 1) * max_len <= batch_size
else:
to_append = len(batch) < batch_size
if to_append:
batch.append(parsed_line)
total_token_num += len(token_ids)
else:
yield batch, total_token_num
batch, total_token_num, max_len = [parsed_line], len(
token_ids), len(token_ids)
if len(batch) > 0:
yield batch, total_token_num
for batch_data, total_token_num in batch_reader(
reader, self.batch_size, self.in_tokens):
yield prepare_batch_data(
batch_data,
total_token_num,
voc_size=self.voc_size,
pad_id=self.pad_id,
cls_id=self.cls_id,
sep_id=self.sep_id,
mask_id=self.mask_id,
return_input_mask=True,
return_max_len=False,
return_num_token=False)
return wrapper
if __name__ == "__main__":
pass
此差异已折叠。
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import io
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
fin = io.open(vocab_file, encoding="utf8")
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class CharTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in text.lower().split(" "):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import sys
import os
def usage():
"""
usage information
"""
print
print("please use command: ")
print(
"python convert_static_to_dygraph.py input_params_dir output_params_dir"
)
print
def convert_static_to_dygraph(static_model_path, dygraph_model_path):
"""
convert paddle static bert model to dygraph model
"""
def mkdir(path):
if not os.path.isdir(path):
if os.path.split(path)[0]:
mkdir(os.path.split(path)[0])
else:
return
os.mkdir(path)
if os.path.exists(dygraph_model_path):
shutil.rmtree(dygraph_model_path)
mkdir(dygraph_model_path)
if not os.path.exists(static_model_path):
print("paddle static model path doesn't exist.....")
return -1
file_list = []
for root, dirs, files in os.walk(static_model_path):
file_list.extend(files)
os.makedirs(os.path.join(dygraph_model_path, "PretrainModelLayer_0"))
os.makedirs(
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0"))
os.makedirs(
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/PrePostProcessLayer_0"))
os.makedirs(
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/PrePostProcessLayer_0"))
#os.chdir(static_model_path)
#convert embedding file
embedding_type = ["word", "pos", "sent"]
for i in range(3):
src_name = embedding_type[i] + "_embedding"
trg_name = "Embedding_" + str(i) + "." + src_name
shutil.copyfile(
os.path.join(static_model_path, src_name),
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/" + trg_name))
#convert pre_encoder file
shutil.copyfile(
os.path.join(static_model_path, "pre_encoder_layer_norm_scale"),
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/PrePostProcessLayer_0/LayerNorm_0._layer_norm_scale"
))
shutil.copyfile(
os.path.join(static_model_path, "pre_encoder_layer_norm_bias"),
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/PrePostProcessLayer_0/LayerNorm_0._layer_norm_bias"
))
#convert mask lm params file
shutil.copyfile(
os.path.join(static_model_path, "mask_lm_out_fc.b_0"),
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/Layer_0.mask_lm_out_fc.b_0"))
shutil.copyfile(
os.path.join(static_model_path, "mask_lm_trans_fc.b_0"),
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/FC_0.mask_lm_trans_fc.b_0"))
shutil.copyfile(
os.path.join(static_model_path, "mask_lm_trans_fc.w_0"),
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/FC_0.mask_lm_trans_fc.w_0"))
shutil.copyfile(
os.path.join(static_model_path, "mask_lm_trans_layer_norm_bias"),
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/PrePostProcessLayer_0/LayerNorm_0._layer_norm_bias"
))
shutil.copyfile(
os.path.join(static_model_path, "mask_lm_trans_layer_norm_scale"),
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/PrePostProcessLayer_0/LayerNorm_0._layer_norm_scale"
))
shutil.copyfile(
os.path.join(static_model_path, "next_sent_fc.b_0"),
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/FC_1.next_sent_fc.b_0"))
shutil.copyfile(
os.path.join(static_model_path, "next_sent_fc.w_0"),
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/FC_1.next_sent_fc.w_0"))
shutil.copyfile(
os.path.join(static_model_path, "pooled_fc.b_0"),
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/FC_0.pooled_fc.b_0"))
shutil.copyfile(
os.path.join(static_model_path, "pooled_fc.w_0"),
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/FC_0.pooled_fc.w_0"))
encoder_num = 0
for f in file_list:
if not f.startswith("encoder_layer"):
continue
layer_num = f.split('_')[2]
if int(layer_num) > encoder_num:
encoder_num = int(layer_num)
encoder_num += 1
for i in range(encoder_num):
encoder_dir = "EncoderSubLayer_" + str(i)
os.makedirs(
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/" +
"EncoderLayer_0/", encoder_dir))
os.makedirs(
os.path.join(dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/" +
"EncoderLayer_0/", encoder_dir +
"/PositionwiseFeedForwardLayer_0"))
os.makedirs(
os.path.join(
dygraph_model_path, "PretrainModelLayer_0/BertModelLayer_0/" +
"EncoderLayer_0/", encoder_dir + "/MultiHeadAttentionLayer_0"))
os.makedirs(
os.path.join(
dygraph_model_path, "PretrainModelLayer_0/BertModelLayer_0/" +
"EncoderLayer_0/", encoder_dir + "/PrePostProcessLayer_1"))
os.makedirs(
os.path.join(
dygraph_model_path, "PretrainModelLayer_0/BertModelLayer_0/" +
"EncoderLayer_0/", encoder_dir + "/PrePostProcessLayer_3"))
encoder_map_dict = {
"ffn_fc_0.b_0":
("PositionwiseFeedForwardLayer_0", "FC_0.ffn_fc_0.b_0"),
"ffn_fc_0.w_0":
("PositionwiseFeedForwardLayer_0", "FC_0.ffn_fc_0.w_0"),
"ffn_fc_1.b_0":
("PositionwiseFeedForwardLayer_0", "FC_1.ffn_fc_1.b_0"),
"ffn_fc_1.w_0":
("PositionwiseFeedForwardLayer_0", "FC_1.ffn_fc_1.w_0"),
"multi_head_att_key_fc.b_0":
("MultiHeadAttentionLayer_0", "FC_1.key_fc.b_0"),
"multi_head_att_key_fc.w_0":
("MultiHeadAttentionLayer_0", "FC_1.key_fc.w_0"),
"multi_head_att_output_fc.b_0":
("MultiHeadAttentionLayer_0", "FC_3.output_fc.b_0"),
"multi_head_att_output_fc.w_0":
("MultiHeadAttentionLayer_0", "FC_3.output_fc.w_0"),
"multi_head_att_query_fc.b_0":
("MultiHeadAttentionLayer_0", "FC_0.query_fc.b_0"),
"multi_head_att_query_fc.w_0":
("MultiHeadAttentionLayer_0", "FC_0.query_fc.w_0"),
"multi_head_att_value_fc.b_0":
("MultiHeadAttentionLayer_0", "FC_2.value_fc.b_0"),
"multi_head_att_value_fc.w_0":
("MultiHeadAttentionLayer_0", "FC_2.value_fc.w_0"),
"post_att_layer_norm_bias":
("PrePostProcessLayer_1", "LayerNorm_0.post_att_layer_norm_bias"),
"post_att_layer_norm_scale":
("PrePostProcessLayer_1", "LayerNorm_0.post_att_layer_norm_scale"),
"post_ffn_layer_norm_bias":
("PrePostProcessLayer_3", "LayerNorm_0.post_ffn_layer_norm_bias"),
"post_ffn_layer_norm_scale":
("PrePostProcessLayer_3", "LayerNorm_0.post_ffn_layer_norm_scale")
}
for f in file_list:
if not f.startswith("encoder_layer"):
continue
layer_num = f.split('_')[2]
suffix_name = "_".join(f.split('_')[3:])
in_dir = encoder_map_dict[suffix_name][0]
rename = encoder_map_dict[suffix_name][1]
encoder_layer = "EncoderSubLayer_" + layer_num
shutil.copyfile(
os.path.join(static_model_path, f),
os.path.join(
dygraph_model_path,
"PretrainModelLayer_0/BertModelLayer_0/EncoderLayer_0/" +
encoder_layer + "/" + in_dir + "/" + rename))
if __name__ == "__main__":
if len(sys.argv) < 3:
usage()
exit(1)
static_model_path = sys.argv[1]
dygraph_model_path = sys.argv[2]
convert_static_to_dygraph(static_model_path, dygraph_model_path)
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
def cast_fp16_to_fp32(i, o, prog):
prog.global_block().append_op(
type="cast",
inputs={"X": i},
outputs={"Out": o},
attrs={
"in_dtype": fluid.core.VarDesc.VarType.FP16,
"out_dtype": fluid.core.VarDesc.VarType.FP32
})
def cast_fp32_to_fp16(i, o, prog):
prog.global_block().append_op(
type="cast",
inputs={"X": i},
outputs={"Out": o},
attrs={
"in_dtype": fluid.core.VarDesc.VarType.FP32,
"out_dtype": fluid.core.VarDesc.VarType.FP16
})
def copy_to_master_param(p, block):
v = block.vars.get(p.name, None)
if v is None:
raise ValueError("no param name %s found!" % p.name)
new_p = fluid.framework.Parameter(
block=block,
shape=v.shape,
dtype=fluid.core.VarDesc.VarType.FP32,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
gradient_clip_attr=p.gradient_clip_attr,
error_clip=p.error_clip,
name=v.name + ".master")
return new_p
def create_master_params_grads(params_grads, main_prog, startup_prog,
loss_scaling):
master_params_grads = []
tmp_role = main_prog._current_role
OpRole = fluid.core.op_proto_and_checker_maker.OpRole
main_prog._current_role = OpRole.Backward
for p, g in params_grads:
# create master parameters
master_param = copy_to_master_param(p, main_prog.global_block())
startup_master_param = startup_prog.global_block()._clone_variable(
master_param)
startup_p = startup_prog.global_block().var(p.name)
cast_fp16_to_fp32(startup_p, startup_master_param, startup_prog)
# cast fp16 gradients to fp32 before apply gradients
if g.name.find("layer_norm") > -1:
if loss_scaling > 1:
scaled_g = g / float(loss_scaling)
else:
scaled_g = g
master_params_grads.append([p, scaled_g])
continue
master_grad = fluid.layers.cast(g, "float32")
if loss_scaling > 1:
master_grad = master_grad / float(loss_scaling)
master_params_grads.append([master_param, master_grad])
main_prog._current_role = tmp_role
return master_params_grads
def master_param_to_train_param(master_params_grads, params_grads, main_prog):
for idx, m_p_g in enumerate(master_params_grads):
train_p, _ = params_grads[idx]
if train_p.name.find("layer_norm") > -1:
continue
with main_prog._optimized_guard([m_p_g[0], m_p_g[1]]):
cast_fp32_to_fp16(m_p_g[0], train_p, main_prog)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册