提交 5f1a2063 编写于 作者: S smallv0221

Merge branch 'develop' of https://github.com/PaddlePaddle/models into yxp1216

......@@ -15,6 +15,9 @@ import paddle
import paddle.fluid as fluid
import utils.utility as utility
AMP_MODEL_LIST = ["ResNet50", "SE_ResNet50_vd"]
def _calc_label_smoothing_loss(softmax_out, label, class_dim, epsilon):
"""Calculate label smoothing loss
......@@ -34,11 +37,12 @@ def _calc_label_smoothing_loss(softmax_out, label, class_dim, epsilon):
def _basic_model(data, model, args, is_train):
image = data[0]
label = data[1]
if args.model == "ResNet50":
if args.model in AMP_MODEL_LIST:
image_data = (fluid.layers.cast(image, 'float16')
if args.use_pure_fp16 and not args.use_dali else image)
image_transpose = fluid.layers.transpose(
image_data, [0, 2, 3, 1]) if args.data_format == 'NHWC' else image_data
image_data,
[0, 2, 3, 1]) if args.data_format == 'NHWC' else image_data
image_transpose.stop_gradient = image.stop_gradient
net_out = model.net(input=image_transpose,
class_dim=args.class_dim,
......@@ -55,8 +59,8 @@ def _basic_model(data, model, args, is_train):
else:
cost = fluid.layers.cross_entropy(input=softmax_out, label=label)
target_cost = (fluid.layers.reduce_sum(cost) if args.use_pure_fp16
else fluid.layers.mean(cost))
target_cost = (fluid.layers.reduce_sum(cost)
if args.use_pure_fp16 else fluid.layers.mean(cost))
acc_top1 = fluid.layers.accuracy(input=softmax_out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(
input=softmax_out, label=label, k=min(5, args.class_dim))
......@@ -98,11 +102,12 @@ def _mixup_model(data, model, args, is_train):
y_b = data[2]
lam = data[3]
if args.model == "ResNet50":
if args.model in AMP_MODEL_LIST:
image_data = (fluid.layers.cast(image, 'float16')
if args.use_pure_fp16 and not args.use_dali else image)
image_transpose = fluid.layers.transpose(
image_data, [0, 2, 3, 1]) if args.data_format == 'NHWC' else image_data
image_data,
[0, 2, 3, 1]) if args.data_format == 'NHWC' else image_data
image_transpose.stop_gradient = image.stop_gradient
net_out = model.net(input=image_transpose,
class_dim=args.class_dim,
......
......@@ -21,8 +21,10 @@ import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
import math
__all__ = ["SE_ResNet_vd", "SE_ResNet18_vd","SE_ResNet34_vd", "SE_ResNet50_vd", "SE_ResNet101_vd", "SE_ResNet152_vd",
"SE_ResNet200_vd"]
__all__ = [
"SE_ResNet_vd", "SE_ResNet18_vd", "SE_ResNet34_vd", "SE_ResNet50_vd",
"SE_ResNet101_vd", "SE_ResNet152_vd", "SE_ResNet200_vd"
]
class SE_ResNet_vd():
......@@ -30,7 +32,7 @@ class SE_ResNet_vd():
self.layers = layers
self.is_3x3 = is_3x3
def net(self, input, class_dim=1000):
def net(self, input, class_dim=1000, data_format="NCHW"):
is_3x3 = self.is_3x3
layers = self.layers
supported_layers = [18, 34, 50, 101, 152, 200]
......@@ -51,67 +53,95 @@ class SE_ResNet_vd():
reduction_ratio = 16
if is_3x3 == False:
conv = self.conv_bn_layer(
input=input, num_filters=64, filter_size=7, stride=2, act='relu')
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu',
data_format=data_format)
else:
conv = self.conv_bn_layer(
input=input, num_filters=32, filter_size=3, stride=2, act='relu', name='conv1_1')
input=input,
num_filters=32,
filter_size=3,
stride=2,
act='relu',
name='conv1_1',
data_format=data_format)
conv = self.conv_bn_layer(
input=conv, num_filters=32, filter_size=3, stride=1, act='relu', name='conv1_2')
input=conv,
num_filters=32,
filter_size=3,
stride=1,
act='relu',
name='conv1_2',
data_format=data_format)
conv = self.conv_bn_layer(
input=conv, num_filters=64, filter_size=3, stride=1, act='relu', name='conv1_3')
input=conv,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name='conv1_3',
data_format=data_format)
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
pool_type='max',
data_format=data_format)
if layers >= 50:
for block in range(len(depth)):
for i in range(depth[block]):
if layers in [101, 152, 200] and block == 2:
if i == 0:
conv_name="res"+str(block+2)+"a"
conv_name = "res" + str(block + 2) + "a"
else:
conv_name="res"+str(block+2)+"b"+str(i)
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name="res"+str(block+2)+chr(97+i)
conv_name = "res" + str(block + 2) + chr(97 + i)
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
if_first=block==i==0,
if_first=block == i == 0,
reduction_ratio=reduction_ratio,
name=conv_name)
name=conv_name,
data_format=data_format)
else:
for block in range(len(depth)):
for i in range(depth[block]):
conv_name="res"+str(block+2)+chr(97+i)
conv_name = "res" + str(block + 2) + chr(97 + i)
conv = self.basic_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
if_first=block==i==0,
if_first=block == i == 0,
reduction_ratio=reduction_ratio,
name=conv_name)
name=conv_name,
data_format=data_format)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
input=conv,
pool_size=7,
pool_type='avg',
global_pooling=True,
data_format=data_format)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(input=pool,
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), name='fc6_weights'),
initializer=fluid.initializer.Uniform(-stdv, stdv),
name='fc6_weights'),
bias_attr=ParamAttr(name='fc6_offset'))
return out
def conv_bn_layer(self,
input,
num_filters,
......@@ -119,7 +149,8 @@ class SE_ResNet_vd():
stride=1,
groups=1,
act=None,
name=None):
name=None,
data_format='NCHW'):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
......@@ -129,18 +160,20 @@ class SE_ResNet_vd():
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
bias_attr=False,
data_format=data_format)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(input=conv,
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
moving_variance_name=bn_name + '_variance',
data_layout=data_format)
def conv_bn_layer_new(self,
input,
......@@ -149,13 +182,16 @@ class SE_ResNet_vd():
stride=1,
groups=1,
act=None,
name=None):
pool = fluid.layers.pool2d(input=input,
name=None,
data_format='NCHW'):
pool = fluid.layers.pool2d(
input=input,
pool_size=2,
pool_stride=2,
pool_padding=0,
pool_type='avg',
ceil_mode=True)
ceil_mode=True,
data_format=data_format)
conv = fluid.layers.conv2d(
input=pool,
......@@ -166,130 +202,198 @@ class SE_ResNet_vd():
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
bias_attr=False,
data_format=data_format)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(input=conv,
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
moving_variance_name=bn_name + '_variance',
data_layout=data_format)
def shortcut(self, input, ch_out, stride, name, if_first=False):
def shortcut(self,
input,
ch_out,
stride,
name,
if_first=False,
data_format='NCHW'):
if data_format == 'NCHW':
ch_in = input.shape[1]
else:
ch_in = input.shape[-1]
if ch_in != ch_out or stride != 1:
if if_first:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
return self.conv_bn_layer(
input,
ch_out,
1,
stride,
name=name,
data_format=data_format)
else:
return self.conv_bn_layer_new(input, ch_out, 1, stride, name=name)
return self.conv_bn_layer_new(
input,
ch_out,
1,
stride,
name=name,
data_format=data_format)
elif if_first:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
return self.conv_bn_layer(
input, ch_out, 1, stride, name=name, data_format=data_format)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name, if_first, reduction_ratio):
def bottleneck_block(self, input, num_filters, stride, name, if_first,
reduction_ratio, data_format):
conv0 = self.conv_bn_layer(
input=input,
num_filters=num_filters,
filter_size=1,
act='relu',
name=name+"_branch2a")
name=name + "_branch2a",
data_format=data_format)
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
name=name+"_branch2b")
conv2 =self.conv_bn_layer(
name=name + "_branch2b",
data_format=data_format)
conv2 = self.conv_bn_layer(
input=conv1,
num_filters=num_filters * 4,
filter_size=1,
act=None,
name=name+"_branch2c")
name=name + "_branch2c",
data_format=data_format)
scale = self.squeeze_excitation(
input=conv2,
num_channels=num_filters * 4,
reduction_ratio=reduction_ratio,
name='fc_'+name)
name='fc_' + name,
data_format=data_format)
short = self.shortcut(input, num_filters * 4, stride, if_first=if_first, name=name + "_branch1")
short = self.shortcut(
input,
num_filters * 4,
stride,
if_first=if_first,
name=name + "_branch1",
data_format=data_format)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def basic_block(self, input, num_filters, stride, name, if_first, reduction_ratio):
conv0 = self.conv_bn_layer(input=input,
def basic_block(self, input, num_filters, stride, name, if_first,
reduction_ratio, data_format):
conv0 = self.conv_bn_layer(
input=input,
num_filters=num_filters,
filter_size=3,
act='relu',
stride=stride,
name=name+"_branch2a")
conv1 = self.conv_bn_layer(input=conv0,
name=name + "_branch2a",
data_format=data_format)
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
act=None,
name=name+"_branch2b")
name=name + "_branch2b",
data_format=data_format)
scale = self.squeeze_excitation(
input=conv1,
num_channels=num_filters,
reduction_ratio=reduction_ratio,
name='fc_'+name)
short = self.shortcut(input,
name='fc_' + name,
data_format=data_format)
short = self.shortcut(
input,
num_filters,
stride,
if_first=if_first,
name=name + "_branch1")
name=name + "_branch1",
data_format=data_format)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def squeeze_excitation(self, input, num_channels, reduction_ratio, name=None):
def squeeze_excitation(self,
input,
num_channels,
reduction_ratio,
name=None,
data_format='NCHW'):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
input=input,
pool_size=0,
pool_type='avg',
global_pooling=True,
data_format=data_format)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(input=pool,
squeeze = fluid.layers.fc(
input=pool,
size=num_channels // reduction_ratio,
act='relu',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv),name=name+'_sqz_weights'),
bias_attr=ParamAttr(name=name+'_sqz_offset'))
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + '_sqz_weights'),
bias_attr=ParamAttr(name=name + '_sqz_offset'))
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(input=squeeze,
excitation = fluid.layers.fc(
input=squeeze,
size=num_channels,
act='sigmoid',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name+'_exc_weights'),
bias_attr=ParamAttr(name=name+'_exc_offset'))
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
name=name + '_exc_weights'),
bias_attr=ParamAttr(name=name + '_exc_offset'))
# scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
# return scale
input_in = fluid.layers.transpose(
input, [0, 3, 1, 2]) if data_format == 'NHWC' else input
input_in.stop_gradient = input.stop_gradient
scale = fluid.layers.elementwise_mul(x=input_in, y=excitation, axis=0)
scale_out = fluid.layers.transpose(
scale, [0, 2, 3, 1]) if data_format == 'NHWC' else scale
scale_out.stop_gradient = scale.stop_gradient
return scale_out
def SE_ResNet18_vd():
model = SE_ResNet_vd(layers=18, is_3x3 = True)
model = SE_ResNet_vd(layers=18, is_3x3=True)
return model
def SE_ResNet34_vd():
model = SE_ResNet_vd(layers=34, is_3x3 = True)
model = SE_ResNet_vd(layers=34, is_3x3=True)
return model
def SE_ResNet50_vd():
model = SE_ResNet_vd(layers=50, is_3x3 = True)
model = SE_ResNet_vd(layers=50, is_3x3=True)
return model
def SE_ResNet101_vd():
model = SE_ResNet_vd(layers=101, is_3x3 = True)
model = SE_ResNet_vd(layers=101, is_3x3=True)
return model
def SE_ResNet152_vd():
model = SE_ResNet_vd(layers=152, is_3x3 = True)
model = SE_ResNet_vd(layers=152, is_3x3=True)
return model
def SE_ResNet200_vd():
model = SE_ResNet_vd(layers=200, is_3x3 = True)
model = SE_ResNet_vd(layers=200, is_3x3=True)
return model
#SE_ResNet50_vd
export CUDA_VISIBLE_DEVICES=0
export FLAGS_conv_workspace_size_limit=4000 #MB
export FLAGS_cudnn_exhaustive_search=1
export FLAGS_cudnn_batchnorm_spatial_persistent=1
DATA_DIR="Your image dataset path, e.g. /work/datasets/ILSVRC2012/"
DATA_FORMAT="NHWC"
USE_AMP=true #whether to use amp
USE_PURE_FP16=false
MULTI_PRECISION=${USE_PURE_FP16}
USE_DALI=true
USE_ADDTO=true
if ${USE_ADDTO} ;then
export FLAGS_max_inplace_grad_add=8
fi
if ${USE_DALI}; then
export FLAGS_fraction_of_gpu_memory_to_use=0.8
fi
python train.py \
--model=SE_ResNet50_vd \
--data_dir=${DATA_DIR} \
--batch_size=128 \
--lr_strategy=cosine_decay \
--use_amp=${USE_AMP} \
--use_pure_fp16=${USE_PURE_FP16} \
--multi_precision=${MULTI_PRECISION} \
--data_format=${DATA_FORMAT} \
--lr=0.1 \
--num_epochs=200 \
--model_save_dir=output/ \
--l2_decay=1e-4 \
--use_mixup=False \
--use_label_smoothing=True \
--label_smoothing_epsilon=0.1 \
--enable_addto=${USE_ADDTO} \
--use_dali=${USE_DALI} \
--image_shape 4 224 224 \
--fuse_bn_act_ops=true \
--fuse_bn_add_act_ops=true \
--fuse_elewise_add_act_ops=true \
......@@ -268,6 +268,7 @@ def train(args):
#NOTE: this is for benchmark
if args.max_iter and total_batch_num == args.max_iter:
return
reader_cost_averager.record(time.time() - batch_start)
train_batch_metrics = exe.run(compiled_train_prog,
......
简体中文 | [English](./README_en.md)
# PaddleNLP
![License](https://img.shields.io/badge/license-Apache%202-red.svg)
......@@ -6,9 +8,9 @@
## Introduction
PaddleNLP aims to accelerate NLP applications by powerful model zoo, easy-to-use API and detailed tutorials, It's also the NLP best practice for PaddlePaddle 2.0 API system.
PaddleNLP aims to accelerate NLP applications through powerful model zoo, easy-to-use API with detailed tutorials, It's also the NLP best practice for PaddlePaddle 2.0 API system.
**TODO:** Add an architecture chart for PaddleNLP
**This project is still UNDER ACTIVE DEVELOPMENT.**
## Features
......@@ -29,7 +31,7 @@ PaddleNLP aims to accelerate NLP applications by powerful model zoo, easy-to-use
* paddlepaddle >= 2.0.0-rc1
```
pip install paddlenlp
pip install paddlenlp>=2.0.0a
```
## Quick Start
......@@ -37,36 +39,72 @@ pip install paddlenlp
### Quick Dataset Loading
```python
dataset = paddlenlp.datasets.ChnSentiCorp(split="train")
from paddlenlp.datasets import ChnSentiCrop
train_ds, test_ds = ChnSentiCorp.get_datasets(['train','test'])
```
### Reusable Text Emebdding
For more Dataset API usage, please refer to [Dataset API](./docs/datasets.md).
### Chinese Text Emebdding Loading
```python
wordemb = paddlenlp.embedding.SkipGram("Text8")
wordemb("language")
>>> [1.0, 2.0, 3.0, ...., 5.0, 6.0]
from paddlenlp.embeddings import TokenEmbedding
wordemb = TokenEmbedding("w2v.baidu_encyclopedia.target.word-word.dim300")
print(wordemb.cosine_sim("国王", "王后"))
>>> 0.63395125
wordemb.cosine_sim("艺术", "火车")
>>> 0.14792643
```
### High Quality Chinsese Pre-trained Model
For more token embedding usage, please refer to [examples/word_embedding](./example/../examples/word_embedding/README.md).
### One-Line Classical Model Building
```python
from paddlenlp.transformer import ErnieModel
ernie = ErnieModel.from_pretrained("ernie-1.0-chinese")
sequence_output, pooled_output = ernie.forward(input_ids, segment_ids)
from paddlenlp.models import Ernie, Senta, SimNet
ernie = Ernie("ernie-1.0", num_classes=2, task="seq-cls")
senta = Senta(network="bow", vocab_size=1024, num_classes=2)
simnet = SimNet(network="gru", vocab_size=1024, num_classes=2)
```
### Rich Chinsese Pre-trained Models
```python
from paddlenlp.transformers import ErnieModel, BertModel, RobertaModel, ElectraModel
ernie = ErnieModel.from_pretrained('ernie-1.0')
bert = BertModel.from_pretrained('bert-wwm-chinese')
roberta = RobertaModel.from_pretrained('roberta-wwm-ext')
electra = ElectraModel.from_pretrained('chinese-electra-small')
```
For more pretrained model selection, please refer to [Pretrained-Models](./docs/transformers.md)
## API Usage
* [Transformer API](./docs/transformers.md)
* [Dataset API](./docs/datasets.md)
* [Embedding API](./docs/embeddings.md)
* [Metrics API](./docs/embeddings.md)
* [Models API](./docs/models.md)
## Tutorials
List our notebook tutorials based on AI Studio.
Please refer to our official AI Studio account for more interactive tutorials: [PaddleNLP on AI Studio](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/574995)
## Community
* SIG for Pretrained Model Contribution
* SIG for Dataset Integration
## FAQ
* SIG for Tutorial Writing
## License
PaddleNLP is provided under the [Apache-2.0 license](./LICENSE).
PaddleNLP is provided under the [Apache-2.0 License](./LICENSE).
English | [简体中文] (./README.md)
# PaddleNLP
![License](https://img.shields.io/badge/license-Apache%202-red.svg)
![python version](https://img.shields.io/badge/python-3.6+-orange.svg)
![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg)
## Introduction
PaddleNLP aims to accelerate NLP applications through powerful model zoo, easy-to-use API with detailed tutorials, It's also the NLP best practice for PaddlePaddle 2.0 API system.
**This project is still UNDER ACTIVE DEVELOPMENT.**
## Features
* **Rich and Powerful Model Zoo**
- Our Model Zoo covers mainstream NLP applications, including Lexical Analysis, Syntactic Parsing, Machine Translation, Text Classification, Text Generation, Text Matching, General Dialogue and Question Answering etc.
* **Easy-to-use API**
- The API is fully integrated with PaddlePaddle high-level API system. It minimizes the number of user actions required for common use cases like data loading, text pre-processing, training and evaluation. which enables you to deal with text problems more productively.
* **High Performance and Large-scale Training**
- We provide a highly optimized ditributed training implementation for BERT with Fleet API, it can fully utilize GPU clusters for large-scale model pre-training. Please refer to our [benchmark](./benchmark/bert) for more information.
* **Detailed Tutorials and Industrial Practices**
- We offers detailed and interactable notebook tutorials to show you the best practices of PaddlePaddle 2.0.
## Installation
### Prerequisites
* python >= 3.6
* paddlepaddle >= 2.0.0-rc1
```
pip install paddlenlp>=2.0.0a
```
## Quick Start
### Quick Dataset Loading
```python
from paddlenlp.datasets import ChnSentiCrop
train_ds, test_ds = ChnSentiCorp.get_datasets(['train','test'])
```
### Chinese Text Emebdding Loading
```python
from paddlenlp.embeddings import TokenEmbedding
wordemb = TokenEmbedding("w2v.baidu_encyclopedia.target.word-word.dim300")
print(wordemb.cosine_sim("国王", "王后"))
>>> 0.63395125
wordemb.cosine_sim("艺术", "火车")
>>> 0.14792643
```
### One-Line Classical Model Building
```python
from paddlenlp.models import Ernie
ernie = Ernie(Ernie.Task.SeqCls)
ernie.forward(input_ids, segment_ids)
```
### Rich Chinsese Pre-trained Models
```python
from paddlenlp.transformers import ErnieModel, BertModel, RobertaModel, ElectraModel
ernie = ErnieModel.from_pretrained('ernie-1.0')
bert = BertModel.from_pretrained('bert-wwm-chinese')
roberta = RobertaModel.from_pretrained('roberta-wwm-ext')
electra = ElectraModel.from_pretrained('chinese-electra-small')
```
For more pretrained model selection, please refer to [PretrainedModels](./paddlenlp/transformers/README.md)
## API Usage
* [Transformer API](./docs/transformers.md)
* [Dataset API](./docs/datasets.md)
* [Embedding API](./docs/embeddings.md)
* [Metrics API](./docs/embeddings.md)
* [Models API](./docs/models.md)
## Tutorials
Please refer to our official AI Studio account for more interactive tutorials: [PaddleNLP on AI Studio](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/574995)
## Community
* SIG for Pretrained Model Contribution
* SIG for Dataset Integration
* SIG for Tutorial Writing
## License
PaddleNLP is provided under the [Apache-2.0 License](./LICENSE).
# BERT Benchmark with Fleet API
BERT - Bidirectional Encoder Representations from Transformers [论文链接](https://arxiv.org/abs/1810.04805)
PaddlePaddle实现了BERT的预训练模型(Pre-training)和下游任务(Fine-tunning)。在预训练任务上提供单机版本和多机版本,同时提供混合精度接口来进行加速,可以任务需要进行选择。
## 数据集
### Pre-training数据集
先配置运行环境
export PYTHONPATH=/home/fangzeyang/PaddleNLP
export DATA_DIR=/home/fangzeyang/bert_data/wikicorpus_en
### Fine-tunning数据集
在fine-tunning数据集上集成了glue任务榜的数据集,在代码示例中主要是提供SST-2和QNLI的下游任务的fine-tuning方法,同时示例程序中可以下载好训练数据和测试数据,可以直接进行模型训练。具体的glue相关数据和任务类型可以见链接[glue任务链接](https://gluebenchmark.com/tasks)
## NLP 任务中的Pretraining
## Pre-training任务训练
### 环境变量设置
1. paddlenlp的安装
pip install paddlenlp==2.0.0a2 -i https://pypi.org/simple
2. 设置预训练的数据地址环境变量
```shell
export DATA_DIR=${HOME}/bert_data/wikicorpus_en
```
### 运行模型训练脚本
```shell
1. 如果是需要多单机多卡/多机多卡训练,则使用下面的命令进行训练
......@@ -40,18 +51,32 @@ python ./run_pretrain_single.py \
--output_dir ./tmp2/ \
--logging_steps 1 \
--save_steps 20000 \
--max_steps 1000000 \
--use_amp True\
--enable_addto True
--max_steps 1000000
```
## NLP 任务的 Fine-tuning
### 训练速度对比
进行速度对比的模型是bert-based模型,主要对比的方式是单机单机和多机多卡(4机32卡)下面进行速度对比,所有的GPU测试配置都是基于 Tesla V100-SXM2-16GB,下面的配置如下:
- InfiniBand 100 Gb/sec (4X EDR), Mellanox Technologies MT27700 Family
- 48 CPU(s), Intel(R) Xeon(R) Gold 5118 CPU @ 2.30GHz
- Memory 500G
- Ubuntu 16.04.4 LTS (GNU/Linux 4.4.0-116-generic x86_64)
- CUDA Version: 10.2, Driver API Version: 10.2, Driver Version: 440.33.01
- cuDNN Version: 7.6
- PaddlePaddle version: paddlepadle-gpu >= 2.0.0rc1
- PaddleNLP version: paddlenlp >= 2.0.0a2
在完成 BERT 模型的预训练后,即可利用预训练参数在特定的 NLP 任务上做 Fine-tuning。以下利用开源的预训练模型,示例如何进行分类任务的 Fine-tuning。
速度统计方式是统计每秒预训练模型能处理的样本数量,其中
- batch_size=64
- max_seq_length=128
### 语句和句对分类任务
下面是具体速度对比情况:
| node num | node num | gpu num/node | gpu num | batch_size/gpu |Throughput | Speedup |
|----------| -------- | -------------| ------- | -------- | ----------| ------- |
以 GLUE/SST-2 任务为例,启动 Fine-tuning 的方式如下(`paddlenlp` 要已经安装或能在 `PYTHONPATH` 中找到):
## Fine-tuning任务训练
在完成 BERT 模型的预训练后,即可利用预训练参数在特定的 NLP 任务上做 Fine-tuning。以下利用开源的预训练模型,示例如何进行分类任务的 Fine-tuning。
```shell
export CUDA_VISIBLE_DEVICES=0
......@@ -68,7 +93,6 @@ python -u ./run_glue.py \
--logging_steps 1 \
--save_steps 500 \
--output_dir ./tmp/$TASK_NAME/
```
其中参数释义如下:
......
......@@ -44,6 +44,11 @@ def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--select_device",
default="gpu",
type=str,
help="The device that selecting for the training, must be gpu/xpu.")
parser.add_argument(
"--task_name",
default=None,
......@@ -253,7 +258,7 @@ def convert_example(example,
def do_train(args):
# Set the paddle execute enviroment
paddle.enable_static()
place = paddle.CUDAPlace(0)
place = paddle.set_device(args.select_device)
set_seed(args)
# Create the main_program for the training and dev_program for the validation
......
......@@ -13,6 +13,8 @@
# limitations under the License.
import argparse
import collections
import itertools
import os
import random
import time
......@@ -21,6 +23,7 @@ from functools import partial
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import distutils.util
import paddle
import paddle.distributed.fleet as fleet
......@@ -35,6 +38,11 @@ MODEL_CLASSES = {"bert": (BertForPretraining, BertTokenizer)}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--select_device",
default="gpu",
type=str,
help="The device that selecting for the training, must be gpu/xpu.")
parser.add_argument(
"--model_type",
default=None,
......@@ -117,6 +125,22 @@ def parse_args():
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--seed", type=int, default=42, help="Random seed for initialization")
parser.add_argument(
"--use_amp",
type=distutils.util.strtobool,
default=False,
help="Enable mixed precision training.")
parser.add_argument(
"--enable_addto",
type=distutils.util.strtobool,
default=False,
help="Whether to enable the addto strategy for gradient accumulation or not. This is only used for AMP training."
)
parser.add_argument(
"--scale_loss",
type=float,
default=1.0,
help="The value of scale_loss for fp16.")
args = parser.parse_args()
return args
......@@ -149,6 +173,26 @@ def reset_program_state_dict(model, state_dict):
return new_state_dict
def build_compiled_program(main_program, loss):
exec_strategy = paddle.static.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10000
build_strategy = paddle.static.BuildStrategy()
build_strategy.enable_addto = args.enable_addto
main_program = paddle.static.CompiledProgram(
main_program).with_data_parallel(
loss_name=loss.name,
exec_strategy=exec_strategy,
build_strategy=build_strategy)
return main_program
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
......@@ -158,16 +202,10 @@ class WorkerInitObj(object):
random.seed(self.seed + id)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
def do_train(args):
# Initialize the paddle and paddle fleet execute enviroment
paddle.enable_static()
place = paddle.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0)))
place = paddle.set_device(args.select_device)
fleet.init(is_collective=True)
# Create the random seed for the worker
......@@ -175,6 +213,8 @@ def do_train(args):
worker_init = WorkerInitObj(args.seed + fleet.worker_index())
# Define the input data in the static mode
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
data_holders = create_data_holder(args)
[
......@@ -186,9 +226,10 @@ def do_train(args):
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = BertForPretraining(
BertModel(**model_class.pretrained_init_configuration[
args.model_name_or_path]))
config = model_class.pretrained_init_configuration[args.model_name_or_path]
if config["vocab_size"] % 8 != 0:
config["vocab_size"] += 8 - (config["vocab_size"] % 8)
model = BertForPretraining(BertModel(**config))
criterion = BertPretrainingCriterion(model.bert.config["vocab_size"])
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
......@@ -219,7 +260,14 @@ def do_train(args):
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
if args.use_amp:
amp_list = paddle.fluid.contrib.mixed_precision.AutoMixedPrecisionLists(
custom_white_list=['softmax', 'layer_norm', 'gelu'])
optimizer = paddle.fluid.contrib.mixed_precision.decorate(
optimizer,
amp_list,
init_loss_scaling=args.scale_loss,
use_dynamic_loss_scaling=True)
# Use the fleet api to compile the distributed optimizer
strategy = fleet.DistributedStrategy()
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......@@ -227,13 +275,14 @@ def do_train(args):
# Define the Executor for running the static model
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
exe.run(startup_program)
state_dict = model.state_dict()
# Use the state dict to update the parameter
reset_state_dict = reset_program_state_dict(model, state_dict)
paddle.static.set_program_state(paddle.static.default_main_program(),
reset_state_dict)
paddle.static.set_program_state(main_program, reset_state_dict)
# Construct the compiled program
main_program = build_compiled_program(main_program, loss)
pool = ThreadPoolExecutor(1)
global_step = 0
......@@ -269,7 +318,7 @@ def do_train(args):
for step, batch in enumerate(train_data_loader):
global_step += 1
loss_return = exe.run(paddle.static.default_main_program(),\
loss_return = exe.run(main_program,
feed=batch,
fetch_list=[loss])
# In the new 2.0 api, must call this function to change the learning_rate
......
......@@ -34,6 +34,11 @@ MODEL_CLASSES = {"bert": (BertForPretraining, BertTokenizer)}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--select_device",
default="gpu",
type=str,
help="The device that selecting for the training, must be gpu/xpu.")
parser.add_argument(
"--model_type",
default=None,
......@@ -132,16 +137,11 @@ def parse_args():
type=float,
default=1.0,
help="The value of scale_loss for fp16.")
parser.add_argument(
"--use_dynamic_loss_scaling",
type=distutils.util.strtobool,
default=True,
help="Whether to use dynamic loss scaling.")
args = parser.parse_args()
return args
def construct_compiled_program(main_program, loss):
def build_compiled_program(main_program, loss):
exec_strategy = paddle.static.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10000
......@@ -179,7 +179,7 @@ def set_seed(seed):
def do_train(args):
# Initialize the paddle execute enviroment
paddle.enable_static()
place = paddle.CUDAPlace(0)
place = paddle.set_device(args.select_device)
# Set the random seed
set_seed(args.seed)
......@@ -233,12 +233,12 @@ def do_train(args):
])
if args.use_amp:
amp_list = paddle.fluid.contrib.mixed_precision.AutoMixedPrecisionLists(
custom_white_list=['layer_norm', 'softmax'])
custom_white_list=['layer_norm', 'softmax', 'gelu'])
optimizer = paddle.fluid.contrib.mixed_precision.decorate(
optimizer,
amp_list,
init_loss_scaling=args.scale_loss,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling)
use_dynamic_loss_scaling=True)
optimizer.minimize(loss)
# Define the Executor for running the static model
......@@ -250,7 +250,7 @@ def do_train(args):
reset_state_dict = reset_program_state_dict(model, state_dict)
paddle.static.set_program_state(main_program, reset_state_dict)
# Construct the compiled program
main_program = construct_compiled_program(main_program, loss)
main_program = build_compiled_program(main_program, loss)
global_step = 0
tic_train = time.time()
epoch = 0
......
......@@ -20,5 +20,5 @@ python3 train.py
``` shell
cd dygraph/
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python3 -m paddle.distributed.launch --selected_gpus=0,1,2,3,4,5,6,7 train.py
python3 -m paddle.distributed.launch --gpus "0,1,2,3,4,5,6,7" train.py
```
......@@ -10,35 +10,21 @@ init_from_pretrain_model: ""
init_from_params: "./trained_models/step_final/"
# The directory for saving model
save_model: "trained_models"
# The directory for saving inference model.
inference_model_dir: "infer_model"
# Set seed for CE or debug
random_seed: None
# The pattern to match training data files.
training_file: "../gen_data/wmt14_ende_data_bpe/train.tok.clean.bpe.33708.en-de"
# The pattern to match validation data files.
validation_file: "../gen_data/wmt14_ende_data_bpe/newstest2013.tok.bpe.33708.en-de"
# The pattern to match test data files.
predict_file: "../gen_data/wmt14_ende_data_bpe/newstest2014.tok.bpe.33708.en-de"
# The file to output the translation results of predict_file to.
output_file: "predict.txt"
# The path of vocabulary file of source language.
src_vocab_fpath: "../gen_data/wmt14_ende_data_bpe/vocab_all.bpe.33708"
# The path of vocabulary file of target language.
trg_vocab_fpath: "../gen_data/wmt14_ende_data_bpe/vocab_all.bpe.33708"
# The <bos>, <eos> and <unk> tokens in the dictionary.
special_token: ["<s>", "<e>", "<unk>"]
# The directory to store data.
root: None
# Whether to use cuda
use_gpu: True
# Args for reader, see reader.py for details
token_delimiter: " "
use_token_batch: True
pool_size: 200000
sort_type: "global"
shuffle: False
shuffle_batch: False
batch_size: 4096
infer_batch_size: 16
......
......@@ -52,8 +52,7 @@ def do_predict(args):
paddle.set_device(place)
# Define data loader
(test_loader,
test_steps_fn), trg_idx2word = reader.create_infer_loader(args)
test_loader, to_tokens = reader.create_infer_loader(args)
# Define model
transformer = InferTransformerModel(
......@@ -90,6 +89,7 @@ def do_predict(args):
transformer.eval()
f = open(args.output_file, "w")
with paddle.no_grad():
for (src_word, ) in test_loader:
finished_seq = transformer(src_word=src_word)
finished_seq = finished_seq.numpy().transpose([0, 2, 1])
......@@ -98,7 +98,7 @@ def do_predict(args):
if beam_idx >= args.n_best:
break
id_list = post_process_seq(beam, args.bos_idx, args.eos_idx)
word_list = [trg_idx2word[id] for id in id_list]
word_list = to_tokens(id_list)
sequence = " ".join(word_list) + "\n"
f.write(sequence)
......
......@@ -51,9 +51,7 @@ def do_train(args):
paddle.seed(random_seed)
# Define data loader
(train_loader, train_steps_fn), (eval_loader,
eval_steps_fn) = reader.create_data_loader(
args, trainer_count, rank)
(train_loader), (eval_loader) = reader.create_data_loader(args)
# Define model
transformer = TransformerModel(
......@@ -176,7 +174,6 @@ def do_train(args):
if step_idx % args.save_step == 0 and step_idx != 0:
# Validation
if args.validation_file:
transformer.eval()
total_sum_cost = 0
total_token_num = 0
......
#! /usr/bin/env bash
set -e
OUTPUT_DIR=$PWD/gen_data
###############################################################################
# change these variables for other WMT data
###############################################################################
OUTPUT_DIR_DATA="${OUTPUT_DIR}/wmt14_ende_data"
OUTPUT_DIR_BPE_DATA="${OUTPUT_DIR}/wmt14_ende_data_bpe"
LANG1="en"
LANG2="de"
# each of TRAIN_DATA: data_url data_file_lang1 data_file_lang2
TRAIN_DATA=(
'http://statmt.org/wmt13/training-parallel-europarl-v7.tgz'
'europarl-v7.de-en.en' 'europarl-v7.de-en.de'
'http://statmt.org/wmt13/training-parallel-commoncrawl.tgz'
'commoncrawl.de-en.en' 'commoncrawl.de-en.de'
'http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz'
'news-commentary-v12.de-en.en' 'news-commentary-v12.de-en.de'
)
# each of DEV_TEST_DATA: data_url data_file_lang1 data_file_lang2
# source & reference
DEV_TEST_DATA=(
'http://data.statmt.org/wmt17/translation-task/dev.tgz'
'newstest2013-ref.de.sgm' 'newstest2013-src.en.sgm'
'http://statmt.org/wmt14/test-full.tgz'
'newstest2014-deen-ref.en.sgm' 'newstest2014-deen-src.de.sgm'
)
###############################################################################
###############################################################################
# change these variables for other WMT data
###############################################################################
# OUTPUT_DIR_DATA="${OUTPUT_DIR}/wmt14_enfr_data"
# OUTPUT_DIR_BPE_DATA="${OUTPUT_DIR}/wmt14_enfr_data_bpe"
# LANG1="en"
# LANG2="fr"
# # each of TRAIN_DATA: ata_url data_tgz data_file
# TRAIN_DATA=(
# 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz'
# 'commoncrawl.fr-en.en' 'commoncrawl.fr-en.fr'
# 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz'
# 'training/europarl-v7.fr-en.en' 'training/europarl-v7.fr-en.fr'
# 'http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz'
# 'training/news-commentary-v9.fr-en.en' 'training/news-commentary-v9.fr-en.fr'
# 'http://www.statmt.org/wmt10/training-giga-fren.tar'
# 'giga-fren.release2.fixed.en.*' 'giga-fren.release2.fixed.fr.*'
# 'http://www.statmt.org/wmt13/training-parallel-un.tgz'
# 'un/undoc.2000.fr-en.en' 'un/undoc.2000.fr-en.fr'
# )
# # each of DEV_TEST_DATA: data_url data_tgz data_file_lang1 data_file_lang2
# DEV_TEST_DATA=(
# 'http://data.statmt.org/wmt16/translation-task/dev.tgz'
# '.*/newstest201[45]-fren-ref.en.sgm' '.*/newstest201[45]-fren-src.fr.sgm'
# 'http://data.statmt.org/wmt16/translation-task/test.tgz'
# '.*/newstest2016-fren-ref.en.sgm' '.*/newstest2016-fren-src.fr.sgm'
# )
###############################################################################
mkdir -p $OUTPUT_DIR_DATA $OUTPUT_DIR_BPE_DATA
# Extract training data
for ((i=0;i<${#TRAIN_DATA[@]};i+=3)); do
data_url=${TRAIN_DATA[i]}
data_tgz=${data_url##*/} # training-parallel-commoncrawl.tgz
data=${data_tgz%.*} # training-parallel-commoncrawl
data_lang1=${TRAIN_DATA[i+1]}
data_lang2=${TRAIN_DATA[i+2]}
if [ ! -e ${OUTPUT_DIR_DATA}/${data_tgz} ]; then
echo "Download "${data_url}
echo "Dir "${OUTPUT_DIR_DATA}/${data_tgz}
wget -O ${OUTPUT_DIR_DATA}/${data_tgz} ${data_url}
fi
if [ ! -d ${OUTPUT_DIR_DATA}/${data} ]; then
echo "Extract "${data_tgz}
mkdir -p ${OUTPUT_DIR_DATA}/${data}
tar_type=${data_tgz:0-3}
if [ ${tar_type} == "tar" ]; then
tar -xvf ${OUTPUT_DIR_DATA}/${data_tgz} -C ${OUTPUT_DIR_DATA}/${data}
else
tar -xvzf ${OUTPUT_DIR_DATA}/${data_tgz} -C ${OUTPUT_DIR_DATA}/${data}
fi
fi
# concatenate all training data
for data_lang in $data_lang1 $data_lang2; do
for f in `find ${OUTPUT_DIR_DATA}/${data} -regex ".*/${data_lang}"`; do
data_dir=`dirname $f`
data_file=`basename $f`
f_base=${f%.*}
f_ext=${f##*.}
if [ $f_ext == "gz" ]; then
gunzip $f
l=${f_base##*.}
f_base=${f_base%.*}
else
l=${f_ext}
fi
if [ $i -eq 0 ]; then
cat ${f_base}.$l > ${OUTPUT_DIR_DATA}/train.$l
else
cat ${f_base}.$l >> ${OUTPUT_DIR_DATA}/train.$l
fi
done
done
done
# Clone mosesdecoder
if [ ! -d ${OUTPUT_DIR}/mosesdecoder ]; then
echo "Cloning moses for data processing"
git clone https://github.com/moses-smt/mosesdecoder.git ${OUTPUT_DIR}/mosesdecoder
fi
# Extract develop and test data
dev_test_data=""
for ((i=0;i<${#DEV_TEST_DATA[@]};i+=3)); do
data_url=${DEV_TEST_DATA[i]}
data_tgz=${data_url##*/} # training-parallel-commoncrawl.tgz
data=${data_tgz%.*} # training-parallel-commoncrawl
data_lang1=${DEV_TEST_DATA[i+1]}
data_lang2=${DEV_TEST_DATA[i+2]}
if [ ! -e ${OUTPUT_DIR_DATA}/${data_tgz} ]; then
echo "Download "${data_url}
wget -O ${OUTPUT_DIR_DATA}/${data_tgz} ${data_url}
fi
if [ ! -d ${OUTPUT_DIR_DATA}/${data} ]; then
echo "Extract "${data_tgz}
mkdir -p ${OUTPUT_DIR_DATA}/${data}
tar_type=${data_tgz:0-3}
if [ ${tar_type} == "tar" ]; then
tar -xvf ${OUTPUT_DIR_DATA}/${data_tgz} -C ${OUTPUT_DIR_DATA}/${data}
else
tar -xvzf ${OUTPUT_DIR_DATA}/${data_tgz} -C ${OUTPUT_DIR_DATA}/${data}
fi
fi
for data_lang in $data_lang1 $data_lang2; do
for f in `find ${OUTPUT_DIR_DATA}/${data} -regex ".*/${data_lang}"`; do
echo "input-from-sgm"
data_dir=`dirname $f`
data_file=`basename $f`
data_out=`echo ${data_file} | cut -d '-' -f 1` # newstest2016
l=`echo ${data_file} | cut -d '.' -f 2` # en
dev_test_data="${dev_test_data}\|${data_out}" # to make regexp
if [ ! -e ${OUTPUT_DIR_DATA}/${data_out}.$l ]; then
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< $f > ${OUTPUT_DIR_DATA}/${data_out}.$l
fi
done
done
done
# Tokenize data
for l in ${LANG1} ${LANG2}; do
for f in `ls ${OUTPUT_DIR_DATA}/*.$l | grep "\(train\|newstest2013\)\.$l$"`; do
f_base=${f%.*} # dir/train dir/newstest2013
f_out=$f_base.tok.$l
f_tmp=$f_base.tmp.$l
if [ ! -e $f_out ]; then
echo "Tokenize "$f
cat $f | \
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/normalize-punctuation.perl $l | \
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/remove-non-printing-char.perl | \
tee -a $tmp/valid.raw.$l | \
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/tokenizer.perl -a -l $l -threads 8 >> $f_out
echo $f_out
fi
done
done
for l in ${LANG1} ${LANG2}; do
for f in `ls ${OUTPUT_DIR_DATA}/*.$l | grep "\(newstest2014\)\.$l$"`; do
f_base=${f%.*} # dir/newstest2014
f_out=$f_base.tok.$l
if [ ! -e $f_out ]; then
echo "Tokenize "$f
cat $f | \
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/tokenizer.perl -a -l $l -threads 8 >> $f_out
echo $f_out
fi
done
done
# Clean data
for f in ${OUTPUT_DIR_DATA}/train.${LANG1} ${OUTPUT_DIR_DATA}/train.tok.${LANG1}; do
f_base=${f%.*} # dir/train dir/train.tok
f_out=${f_base}.clean
if [ ! -e $f_out.${LANG1} ] && [ ! -e $f_out.${LANG2} ]; then
echo "Clean "${f_base}
${OUTPUT_DIR}/mosesdecoder/scripts/training/clean-corpus-n.perl $f_base ${LANG1} ${LANG2} ${f_out} 1 256
fi
done
python -m pip install subword-nmt
# Generate BPE data and vocabulary
for num_operations in 33708; do
if [ ! -e ${OUTPUT_DIR_BPE_DATA}/bpe.${num_operations} ]; then
echo "Learn BPE with ${num_operations} merge operations"
cat ${OUTPUT_DIR_DATA}/train.tok.clean.${LANG1} ${OUTPUT_DIR_DATA}/train.tok.clean.${LANG2} | \
subword-nmt learn-bpe -s $num_operations > ${OUTPUT_DIR_BPE_DATA}/bpe.${num_operations}
fi
for l in ${LANG1} ${LANG2}; do
for f in `ls ${OUTPUT_DIR_DATA}/*.$l | grep "\(train${dev_test_data}\)\.tok\(\.clean\)\?\.$l$"`; do
f_base=${f%.*} # dir/train.tok dir/train.tok.clean dir/newstest2016.tok
f_base=${f_base##*/} # train.tok train.tok.clean newstest2016.tok
f_out=${OUTPUT_DIR_BPE_DATA}/${f_base}.bpe.${num_operations}.$l
if [ ! -e $f_out ]; then
echo "Apply BPE to "$f
subword-nmt apply-bpe -c ${OUTPUT_DIR_BPE_DATA}/bpe.${num_operations} < $f > $f_out
fi
done
done
if [ ! -e ${OUTPUT_DIR_BPE_DATA}/vocab.bpe.${num_operations} ]; then
echo "Create vocabulary for BPE data"
cat ${OUTPUT_DIR_BPE_DATA}/train.tok.clean.bpe.${num_operations}.${LANG1} ${OUTPUT_DIR_BPE_DATA}/train.tok.clean.bpe.${num_operations}.${LANG2} | \
subword-nmt get-vocab | cut -f1 -d ' ' > ${OUTPUT_DIR_BPE_DATA}/vocab.bpe.${num_operations}
fi
done
# Adapt to the reader
for f in ${OUTPUT_DIR_BPE_DATA}/*.bpe.${num_operations}.${LANG1}; do
f_base=${f%.*} # dir/train.tok.clean.bpe.32000 dir/newstest2016.tok.bpe.32000
f_out=${f_base}.${LANG1}-${LANG2}
if [ ! -e $f_out ]; then
paste -d '\t' $f_base.${LANG1} $f_base.${LANG2} > $f_out
fi
done
if [ ! -e ${OUTPUT_DIR_BPE_DATA}/vocab_all.bpe.${num_operations} ]; then
sed '1i\<s>\n<e>\n<unk>' ${OUTPUT_DIR_BPE_DATA}/vocab.bpe.${num_operations} > ${OUTPUT_DIR_BPE_DATA}/vocab_all.bpe.${num_operations}
fi
echo "All done."
......@@ -22,79 +22,105 @@ from functools import partial
import numpy as np
from paddle.io import BatchSampler, DataLoader, Dataset
from paddlenlp.data import Pad
from paddlenlp.datasets import WMT14ende
from paddlenlp.data.sampler import SamplerHelper
def create_infer_loader(args):
dataset = TransformerDataset(
fpattern=args.predict_file,
src_vocab_fpath=args.src_vocab_fpath,
trg_vocab_fpath=args.trg_vocab_fpath,
token_delimiter=args.token_delimiter,
start_mark=args.special_token[0],
end_mark=args.special_token[1],
unk_mark=args.special_token[2])
args.src_vocab_size, args.trg_vocab_size, args.bos_idx, args.eos_idx, \
args.unk_idx = dataset.get_vocab_summary()
trg_idx2word = TransformerDataset.load_dict(
dict_path=args.trg_vocab_fpath, reverse=True)
batch_sampler = TransformerBatchSampler(
dataset=dataset,
use_token_batch=False,
batch_size=args.infer_batch_size,
max_length=args.max_length)
def min_max_filer(data, max_len, min_len=0):
# 1 for special tokens.
data_min_len = min(len(data[0]), len(data[1])) + 1
data_max_len = max(len(data[0]), len(data[1])) + 1
return (data_min_len >= min_len) and (data_max_len <= max_len)
def create_data_loader(args):
root = None if args.root == "None" else args.root
(src_vocab, trg_vocab) = WMT14ende.get_vocab(root=root)
args.src_vocab_size, args.trg_vocab_size = len(src_vocab), len(trg_vocab)
transform_func = WMT14ende.get_default_transform_func(root=root)
datasets = [
WMT14ende.get_datasets(
mode=m, transform_func=transform_func) for m in ["train", "dev"]
]
def _max_token_fn(current_idx, current_batch_size, tokens_sofar,
data_source):
return max(tokens_sofar,
len(data_source[current_idx][0]) + 1,
len(data_source[current_idx][1]) + 1)
def _key(size_so_far, minibatch_len):
return size_so_far * minibatch_len
data_loaders = [(None)] * 2
for i, dataset in enumerate(datasets):
m = dataset.mode
dataset = dataset.filter(
partial(
min_max_filer, max_len=args.max_length))
sampler = SamplerHelper(dataset)
src_key = (lambda x, data_source: len(data_source[x][0]) + 1)
if args.sort_type == SortType.GLOBAL:
buffer_size = -1
trg_key = (lambda x, data_source: len(data_source[x][1]) + 1)
# Sort twice
sampler = sampler.sort(
key=trg_key, buffer_size=buffer_size).sort(
key=src_key, buffer_size=buffer_size)
else:
sampler = sampler.shuffle()
if args.sort_type == SortType.POOL:
buffer_size = args.pool_size
sampler = sampler.sort(key=src_key, buffer_size=buffer_size)
batch_sampler = sampler.batch(
batch_size=args.batch_size,
drop_last=False,
batch_size_fn=_max_token_fn,
key=_key)
if m == "train":
batch_sampler = batch_sampler.shard()
data_loader = DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=partial(
prepare_infer_input,
prepare_train_input,
bos_idx=args.bos_idx,
eos_idx=args.eos_idx,
pad_idx=args.eos_idx),
pad_idx=args.bos_idx),
num_workers=0,
return_list=True)
data_loaders = (data_loader, batch_sampler.__len__)
return data_loaders, trg_idx2word
data_loaders[i] = (data_loader)
return data_loaders
def create_data_loader(args, world_size=1, rank=0):
data_loaders = [(None, None)] * 2
data_files = [args.training_file, args.validation_file
] if args.validation_file else [args.training_file]
for i, data_file in enumerate(data_files):
dataset = TransformerDataset(
fpattern=data_file,
src_vocab_fpath=args.src_vocab_fpath,
trg_vocab_fpath=args.trg_vocab_fpath,
token_delimiter=args.token_delimiter,
start_mark=args.special_token[0],
end_mark=args.special_token[1],
unk_mark=args.special_token[2])
args.src_vocab_size, args.trg_vocab_size, args.bos_idx, args.eos_idx, \
args.unk_idx = dataset.get_vocab_summary()
batch_sampler = TransformerBatchSampler(
dataset=dataset,
batch_size=args.batch_size,
pool_size=args.pool_size,
sort_type=args.sort_type,
shuffle=args.shuffle,
shuffle_batch=args.shuffle_batch,
use_token_batch=args.use_token_batch,
max_length=args.max_length,
distribute_mode=True if i == 0 else False,
world_size=world_size,
rank=rank)
def create_infer_loader(args):
root = None if args.root == "None" else args.root
(src_vocab, trg_vocab) = WMT14ende.get_vocab(root=root)
args.src_vocab_size, args.trg_vocab_size = len(src_vocab), len(trg_vocab)
transform_func = WMT14ende.get_default_transform_func(root=root)
dataset = WMT14ende.get_datasets(
mode="test", transform_func=transform_func).filter(
partial(
min_max_filer, max_len=args.max_length))
batch_sampler = SamplerHelper(dataset).batch(
batch_size=args.infer_batch_size, drop_last=False)
data_loader = DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=partial(
prepare_train_input,
prepare_infer_input,
bos_idx=args.bos_idx,
eos_idx=args.eos_idx,
pad_idx=args.bos_idx),
num_workers=0,
return_list=True)
data_loaders[i] = (data_loader, batch_sampler.__len__)
return data_loaders
return data_loader, trg_vocab.to_tokens
def prepare_train_input(insts, bos_idx, eos_idx, pad_idx):
......@@ -126,301 +152,3 @@ class SortType(object):
GLOBAL = 'global'
POOL = 'pool'
NONE = "none"
class Converter(object):
def __init__(self, vocab, beg, end, unk, delimiter, add_beg, add_end):
self._vocab = vocab
self._beg = beg
self._end = end
self._unk = unk
self._delimiter = delimiter
self._add_beg = add_beg
self._add_end = add_end
def __call__(self, sentence):
return ([self._beg] if self._add_beg else []) + [
self._vocab.get(w, self._unk)
for w in sentence.split(self._delimiter)
] + ([self._end] if self._add_end else [])
class ComposedConverter(object):
def __init__(self, converters):
self._converters = converters
def __call__(self, fields):
return [
converter(field)
for field, converter in zip(fields, self._converters)
]
class SentenceBatchCreator(object):
def __init__(self, batch_size):
self.batch = []
self._batch_size = batch_size
def append(self, info):
self.batch.append(info)
if len(self.batch) == self._batch_size:
tmp = self.batch
self.batch = []
return tmp
class TokenBatchCreator(object):
def __init__(self, batch_size):
self.batch = []
self.max_len = -1
self._batch_size = batch_size
def append(self, info):
cur_len = info.max_len
max_len = max(self.max_len, cur_len)
if max_len * (len(self.batch) + 1) > self._batch_size:
result = self.batch
self.batch = [info]
self.max_len = cur_len
return result
else:
self.max_len = max_len
self.batch.append(info)
class SampleInfo(object):
def __init__(self, i, lens):
self.i = i
# take bos and eos into account
self.min_len = min(lens[0] + 1, lens[1] + 1)
self.max_len = max(lens[0] + 1, lens[1] + 1)
self.src_len = lens[0]
self.trg_len = lens[1]
class MinMaxFilter(object):
def __init__(self, max_len, min_len, underlying_creator):
self._min_len = min_len
self._max_len = max_len
self._creator = underlying_creator
def append(self, info):
if info.max_len > self._max_len or info.min_len < self._min_len:
return
else:
return self._creator.append(info)
@property
def batch(self):
return self._creator.batch
class TransformerDataset(Dataset):
def __init__(self,
src_vocab_fpath,
trg_vocab_fpath,
fpattern,
field_delimiter="\t",
token_delimiter=" ",
start_mark="<s>",
end_mark="<e>",
unk_mark="<unk>",
trg_fpattern=None):
self._src_vocab = self.load_dict(src_vocab_fpath)
self._trg_vocab = self.load_dict(trg_vocab_fpath)
self._bos_idx = self._src_vocab[start_mark]
self._eos_idx = self._src_vocab[end_mark]
self._unk_idx = self._src_vocab[unk_mark]
self._field_delimiter = field_delimiter
self._token_delimiter = token_delimiter
self.load_src_trg_ids(fpattern, trg_fpattern)
def load_src_trg_ids(self, fpattern, trg_fpattern=None):
src_converter = Converter(
vocab=self._src_vocab,
beg=self._bos_idx,
end=self._eos_idx,
unk=self._unk_idx,
delimiter=self._token_delimiter,
add_beg=False,
add_end=False)
trg_converter = Converter(
vocab=self._trg_vocab,
beg=self._bos_idx,
end=self._eos_idx,
unk=self._unk_idx,
delimiter=self._token_delimiter,
add_beg=False,
add_end=False)
converters = ComposedConverter([src_converter, trg_converter])
self._src_seq_ids = []
self._trg_seq_ids = []
self._sample_infos = []
slots = [self._src_seq_ids, self._trg_seq_ids]
for i, line in enumerate(self._load_lines(fpattern, trg_fpattern)):
lens = []
for field, slot in zip(converters(line), slots):
slot.append(field)
lens.append(len(field))
self._sample_infos.append(SampleInfo(i, lens))
def _load_lines(self, fpattern, trg_fpattern=None):
fpaths = glob.glob(fpattern)
fpaths = sorted(fpaths) # TODO: Add custum sort
assert len(fpaths) > 0, "no matching file to the provided data path"
(f_mode, f_encoding, endl) = ("r", "utf8", "\n")
if trg_fpattern is None:
for fpath in fpaths:
with io.open(fpath, f_mode, encoding=f_encoding) as f:
for line in f:
fields = line.strip(endl).split(self._field_delimiter)
yield fields
else:
# separated source and target language data files
# assume we can get aligned data by sort the two language files
trg_fpaths = glob.glob(trg_fpattern)
trg_fpaths = sorted(trg_fpaths)
assert len(fpaths) == len(
trg_fpaths
), "the number of source language data files must equal \
with that of source language"
for fpath, trg_fpath in zip(fpaths, trg_fpaths):
with io.open(fpath, f_mode, encoding=f_encoding) as f:
with io.open(
trg_fpath, f_mode, encoding=f_encoding) as trg_f:
for line in zip(f, trg_f):
fields = [field.strip(endl) for field in line]
yield fields
@staticmethod
def load_dict(dict_path, reverse=False):
word_dict = {}
(f_mode, f_encoding, endl) = ("r", "utf8", "\n")
with io.open(dict_path, f_mode, encoding=f_encoding) as fdict:
for idx, line in enumerate(fdict):
if reverse:
word_dict[idx] = line.strip(endl)
else:
word_dict[line.strip(endl)] = idx
return word_dict
def get_vocab_summary(self):
return len(self._src_vocab), len(
self._trg_vocab), self._bos_idx, self._eos_idx, self._unk_idx
def __getitem__(self, idx):
return (self._src_seq_ids[idx], self._trg_seq_ids[idx]
) if self._trg_seq_ids else self._src_seq_ids[idx]
def __len__(self):
return len(self._sample_infos)
class TransformerBatchSampler(BatchSampler):
def __init__(self,
dataset,
batch_size,
pool_size=10000,
sort_type=SortType.NONE,
min_length=0,
max_length=100,
shuffle=False,
shuffle_batch=False,
use_token_batch=False,
clip_last_batch=False,
distribute_mode=True,
seed=0,
world_size=1,
rank=0):
for arg, value in locals().items():
if arg != "self":
setattr(self, "_" + arg, value)
self._random = np.random
self._random.seed(seed)
# for multi-devices
self._distribute_mode = distribute_mode
self._nranks = world_size
self._local_rank = rank
def __iter__(self):
# global sort or global shuffle
if self._sort_type == SortType.GLOBAL:
infos = sorted(self._dataset._sample_infos, key=lambda x: x.trg_len)
infos = sorted(infos, key=lambda x: x.src_len)
else:
if self._shuffle:
infos = self._dataset._sample_infos
self._random.shuffle(infos)
else:
infos = self._dataset._sample_infos
if self._sort_type == SortType.POOL:
reverse = True
for i in range(0, len(infos), self._pool_size):
# to avoid placing short next to long sentences
reverse = not reverse
infos[i:i + self._pool_size] = sorted(
infos[i:i + self._pool_size],
key=lambda x: x.max_len,
reverse=reverse)
batches = []
batch_creator = TokenBatchCreator(
self.
_batch_size) if self._use_token_batch else SentenceBatchCreator(
self._batch_size * self._nranks)
batch_creator = MinMaxFilter(self._max_length, self._min_length,
batch_creator)
for info in infos:
batch = batch_creator.append(info)
if batch is not None:
batches.append(batch)
if not self._clip_last_batch and len(batch_creator.batch) != 0:
batches.append(batch_creator.batch)
if self._shuffle_batch:
self._random.shuffle(batches)
if not self._use_token_batch:
# when producing batches according to sequence number, to confirm
# neighbor batches which would be feed and run parallel have similar
# length (thus similar computational cost) after shuffle, we as take
# them as a whole when shuffling and split here
batches = [[
batch[self._batch_size * i:self._batch_size * (i + 1)]
for i in range(self._nranks)
] for batch in batches]
batches = list(itertools.chain.from_iterable(batches))
self.batch_number = (len(batches) + self._nranks - 1) // self._nranks
# for multi-device
for batch_id, batch in enumerate(batches):
if not self._distribute_mode or (
batch_id % self._nranks == self._local_rank):
batch_indices = [info.i for info in batch]
yield batch_indices
if self._distribute_mode and len(batches) % self._nranks != 0:
if self._local_rank >= len(batches) % self._nranks:
# use previous data to pad
yield batch_indices
def __len__(self):
if hasattr(self, "batch_number"): #
return self.batch_number
if not self._use_token_batch:
batch_number = (
len(self._dataset) + self._batch_size * self._nranks - 1) // (
self._batch_size * self._nranks)
else:
# for uncertain batch number, the actual value is self.batch_number
batch_number = sys.maxsize
return batch_number
......@@ -63,9 +63,7 @@ def do_train(args):
paddle.seed(random_seed)
# Define data loader
# NOTE: To guarantee all data is involved, use world_size=1 and rank=0.
(train_loader, train_steps_fn), (
eval_loader, eval_steps_fn) = reader.create_data_loader(args)
(train_loader), (eval_loader) = reader.create_data_loader(args)
train_program = paddle.static.Program()
startup_program = paddle.static.Program()
......
# PaddleNLP Datasets
## 阅读理解
| 数据集名称 | 简介 | 调用方法 |
| ---- | ----- | ------ |
| [SQaAD](https://rajpurkar.github.io/SQuAD-explorer/) | 斯坦福问答数据集,包括SQaAD1.1和SQaAD2.0|`paddlenlp.datasets.SQuAD` |
| [DuReader-yesno](https://aistudio.baidu.com/aistudio/competition/detail/49) | 千言数据集:阅读理解,判断答案极性|`paddlenlp.datasets.DuReaderYesNo` |
| [DuReader-robust](https://aistudio.baidu.com/aistudio/competition/detail/49) | 千言数据集:阅读理解,答案原文抽取|`paddlenlp.datasets.DuReaderRobust` |
## 文本分类
| 数据集名称 | 简介 | 调用方法 |
| ---- | --------- | ------ |
| [CoLA](https://nyu-mll.github.io/CoLA/) | 单句分类任务,二分类,判断句子是否合法| `paddlenlp.datasets.GlueCoLA`|
| [SST-2](https://nlp.stanford.edu/sentiment/index.html) | 单句分类任务,二分类,判断句子情感极性| `paddlenlp.datasets.GlueSST2`|
| [MRPC](https://microsoft.com/en-us/download/details.aspx?id=52398) | 句对匹配任务,二分类,判断句子对是否是相同意思| `paddlenlp.datasets.GlueMRPC`|
| [STSB](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) | 计算句子对相似性,分数为1~5| `paddlenlp.datasets.GlueSTSB`|
| [QQP](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs) | 判定句子对是否等效,等效、不等效两种情况,二分类任务| `paddlenlp.datasets.GlueQQP`|
| [MNLI](http://www.nyu.edu/projects/bowman/multinli/) | 句子对,一个前提,一个是假设。前提和假设的关系有三种情况:蕴含(entailment),矛盾(contradiction),中立(neutral)。句子对三分类问题| `paddlenlp.datasets.GlueMNLI`|
| [QNLI](https://rajpurkar.github.io/SQuAD-explorer/) | 判断问题(question)和句子(sentence)是否蕴含,蕴含和不蕴含,二分类| `paddlenlp.datasets.GlueQNLI`|
| [RTE](https://aclweb.org/aclwiki/Recognizing_Textual_Entailment) | 判断句对是否蕴含,句子1和句子2是否互为蕴含,二分类任务| `paddlenlp.datasets.GlueRTE`|
| [WNLI](https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html) | 判断句子对是否相关,相关或不相关,二分类任务| `paddlenlp.datasets.GlueWNLI`|
| [LCQMC](http://icrc.hitsz.edu.cn/Article/show/171.html) | A Large-scale Chinese Question Matching Corpus 语义匹配数据集| `paddlenlp.datasets.LCQMC`|
| [ChnSentiCorp](https://github.com/SophonPlus/ChineseNlpCorpus/blob/master/datasets/ChnSentiCorp_htl_all/intro.ipynb) | 中文评论情感分析语料| `paddlenlp.datasets.ChnSentiCorp`|
| [IMDB](https://www.imdb.com/interfaces/) | IMDB电影评论情感分析数据集| `paddle.text.datasets.Imdb`|
| [Movielens](https://grouplens.org/datasets/movielens/) | Movielens 1-M电影评级数据集| `paddle.text.datasets.Movielens`|
## 序列标注
| 数据集名称 | 简介 | 调用方法 |
| ---- | --------- | ------ |
| [Conll05](https://www.cs.upc.edu/~srlconll/spec.html) | 语义角色标注数据集| `paddle.text.datasets.Conll05st`|
| [MSRA_NER](https://github.com/lemonhu/NER-BERT-pytorch/tree/master/data/msra) | MSRA 命名实体识别数据集| `paddlenlp.datasets.MSRA_NER`|
| [Express_Ner](https://aistudio.baidu.com/aistudio/projectdetail/131360?channelType=0&channel=-1) | 快递单命名实体识别数据集| [express_ner](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/named_entity_recognition/express_ner/data)|
## 机器翻译
| 数据集名称 | 简介 | 调用方法 |
| ---- | --------- | ------ |
| [IWSLT15](https://workshop2015.iwslt.org/) | IWSLT'15 English-Vietnamese data 英语-越南语翻译数据集| `paddlenlp.datasets.IWSLT15`|
| [WMT14](http://www.statmt.org/wmt14/translation-task.html) | WMT14 EN-DE 英语-德语翻译数据集| `paddlenlp.datasets.WMT14ende`|
## 时序预测
| 数据集名称 | 简介 | 调用方法 |
| ---- | --------- | ------ |
| [CSSE COVID-19](https://github.com/CSSEGISandData/COVID-19) |约翰·霍普金斯大学系统科学与工程中心新冠病例数据 | [time_series](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/time_series)|
| [UCIHousing](https://archive.ics.uci.edu/ml/datasets/Housing) | 波士顿房价预测数据集 | `paddle.text.datasets.UCIHousing`|
## 语料库
| 数据集名称 | 简介 | 调用方法 |
| ---- | --------- | ------ |
| [yahoo](https://webscope.sandbox.yahoo.com/catalog.php?datatype=l&guccounter=1) | 雅虎英文语料库| [VAE](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_generation/vae-seq2seq)|
| [PTB](http://www.fit.vutbr.cz/~imikolov/rnnlm/) | Penn Treebank Dataset | `paddlenlp.datasets.PTB`|
| [1 Billon words](https://opensource.google/projects/lm-benchmark) | 1 Billion Word Language Model Benchmark R13 Output 基准语料库| [ELMo](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/language_model/elmo)|
# Embedding 模型汇总
PaddleNLP提供多个开源的预训练Embedding模型,用户仅需在使用`paddlenlp.embeddings.TokenEmbedding`时,指定预训练模型的名称,即可加载相对应的预训练模型。以下为PaddleNLP所支持的预训练Embedding模型,其名称用作`paddlenlp.embeddings.TokenEmbedding`的参数。命名方式为:\${训练模型}.\${语料}.\${词向量类型}.\${co-occurrence type}.dim\${维度}。训练模型有三种,分别是Word2Vec(w2v, 使用skip-gram模型训练), GloVe(glove)和FastText(fasttext)。
## 中文词向量
以下预训练模型由[Chinese-Word-Vectors](https://github.com/Embedding/Chinese-Word-Vectors)提供。
根据不同类型的上下文为每个语料训练多个目标词向量,第二列开始表示不同类型的上下文。以下为上下文类别:
* Word表示训练时目标词预测的上下文是一个Word。
* Word + Ngram表示训练时目标词预测的上下文是一个Word或者Ngram,其中bigram表示2-grams,ngram.1-2表示1-gram或者2-grams。
* Word + Character表示训练时目标词预测的上下文是一个Word或者Character,其中word-character.char1-2表示上下文是1个或2个Character。
* Word + Character + Ngram表示训练时目标词预测的上下文是一个Word、Character或者Ngram。bigram-char表示上下文是2-grams或者1个Character。
| 语料 | Word | Word + Ngram | Word + Character | Word + Character + Ngram |
| ------------------------------------------- | ---- | ---- | ---- | ---- |
| Baidu Encyclopedia 百度百科 | w2v.baidu_encyclopedia.target.word-word.dim300 | w2v.baidu_encyclopedia.target.word-ngram.1-2.dim300 | w2v.baidu_encyclopedia.target.word-character.char1-2.dim300 | w2v.baidu_encyclopedia.target.bigram-char.dim300 |
| Wikipedia_zh 中文维基百科 | w2v.wiki.target.word-word.dim300 | w2v.wiki.target.word-bigram.dim300 | w2v.wiki.target.word-char.dim300 | w2v.wiki.target.bigram-char.dim300 |
| People's Daily News 人民日报 | w2v.people_daily.target.word-word.dim300 | w2v.people_daily.target.word-bigram.dim300 | w2v.people_daily.target.word-char.dim300 | w2v.people_daily.target.bigram-char.dim300 |
| Sogou News 搜狗新闻 | w2v.sogou.target.word-word.dim300 | w2v.sogou.target.word-bigram.dim300 | w2v.sogou.target.word-char.dim300 | w2v.sogou.target.bigram-char.dim300 |
| Financial News 金融新闻 | w2v.financial.target.word-word.dim300 | w2v.financial.target.word-bigram.dim300 | w2v.financial.target.word-char.dim300 | w2v.financial.target.bigram-char.dim300 |
| Zhihu_QA 知乎问答 | w2v.zhihu.target.word-word.dim300 | w2v.zhihu.target.word-bigram.dim300 | w2v.zhihu.target.word-char.dim300 | w2v.zhihu.target.bigram-char.dim300 |
| Weibo 微博 | w2v.weibo.target.word-word.dim300 | w2v.weibo.target.word-bigram.dim300 | w2v.weibo.target.word-char.dim300 | w2v.weibo.target.bigram-char.dim300 |
| Literature 文学作品 | w2v.literature.target.word-word.dim300 | w2v.literature.target.word-bigram.dim300 | w2v.literature.target.word-char.dim300 | w2v.literature.target.bigram-char.dim300 |
| Complete Library in Four Sections 四库全书 | w2v.sikuquanshu.target.word-word.dim300 | w2v.sikuquanshu.target.word-bigram.dim300 | 无 | 无 |
| Mixed-large 综合 | w2v.mixed-large.target.word-word.dim300 | 暂无 | w2v.mixed-large.target.word-word.dim300 | 暂无 |
特别地,对于百度百科语料,在不同的 Co-occurrence类型下分别提供了目标词与上下文向量:
| Co-occurrence 类型 | 目标词向量 | 上下文词向量 |
| --------------------------- | ------ | ---- |
| Word → Word | w2v.baidu_encyclopedia.target.word-word.dim300 | w2v.baidu_encyclopedia.context.word-word.dim300 |
| Word → Ngram (1-2) | w2v.baidu_encyclopedia.target.word-ngram.1-2.dim300 | 暂无 |
| Word → Ngram (1-3) | 暂无 | 暂无 |
| Ngram (1-2) → Ngram (1-2)| 暂无 | 暂无 |
| Word → Character (1) | w2v.baidu_encyclopedia.target.word-character.char1-1.dim300 | w2v.baidu_encyclopedia.context.word-character.char1-1.dim300 |
| Word → Character (1-2) | w2v.baidu_encyclopedia.target.word-character.char1-2.dim300 | w2v.baidu_encyclopedia.context.word-character.char1-2.dim300 |
| Word → Character (1-4) | w2v.baidu_encyclopedia.target.word-character.char1-4.dim300 | w2v.baidu_encyclopedia.context.word-character.char1-4.dim300 |
| Word → Word (left/right) | 暂无 | 暂无 |
| Word → Word (distance) | 暂无 | 暂无 |
## 英文词向量
待更新。
# paddlenlp.metrics API汇总
目前paddlenlp提供以下评价指标:
| Metric | 简介 | API |
| -------------------------------------------------------- | :----------------------------------------------------------- | ------------------------------------------------------------ |
| Perplexity | 困惑度,常用来衡量语言模型优劣,也可用于机器翻译、文本生成等任务。 | `paddlenlp.metrics.Perplexity` |
| BLEU(bilingual evaluation understudy) | 机器翻译常用评价指标 | `paddlenlp.metrics.BLEU` |
| Rouge(Recall-Oriented Understudy for Gisting Evaluation) | 评估自动文摘以及机器翻译的指标 | `paddlenlp.metrics.RougeL`, `paddlenlp.metrics.RougeN` |
| AccuracyAndF1 | 准确率及F1-score,可用于GLUE中的MRPC 和QQP任务 | `paddlenlp.metrics.AccuracyAndF1` |
| PearsonAndSpearman | 皮尔森相关性系数和斯皮尔曼相关系数。可用于GLUE中的STS-B任务 | `paddlenlp.metrics.PearsonAndSpearman` |
| Mcc(Matthews correlation coefficient) | 马修斯相关系数,用以测量二分类的分类性能的指标。可用于GLUE中的CoLA任务 | `paddlenlp.metrics.Mcc` |
| ChunkEvaluator | 计算了块检测的精确率、召回率和F1-score。常用于序列标记任务,如命名实体识别(NER) | `paddlenlp.metrics.ChunkEvaluator` |
| Squad | 用于SQuAD和DuReader-robust的评价指标 | `paddlenlp.metrics.compute_predictions`, `paddlenlp.metrics.squad_evaluate` |
# paddlenlp.models
该模块提供了百度自研的模型的高阶API,如文本分类模型Senta,文本匹配模型SimNet,通用预训练模型ERNIE等。
```python
class paddlenlp.models.Ernie(model_name, num_classes, task=None, **kwargs):
"""
预训练模型ERNIE。
更多信息参考:ERNIE: Enhanced Representation through Knowledge Integration(https://arxiv.org/abs/1904.09223)
参数:
`model_name (obj:`str`)`: 模型名称,如`ernie-1.0`,`ernie-tiny`,`ernie-2.0-en`, `ernie-2.0-large-en`。
`num_classes (obj:`int`)`: 分类类别数。
`task (obj:`str`): 预训练模型ERNIE用于下游任务名称,可以为`seq-cls`,`token-cls`,`qa`. 默认为None
- task='seq-cls': ERNIE用于文本分类任务。其将从ERNIE模型中提取句子特征,用于最后一层全连接网络进行文本分类。
详细信息参考:`paddlenlp.transformers.ErnieForSequenceClassification`。
- task='token-cls': ERNIE用于序列标注任务。其将从ERNIE模型中提取每一个token特征,用于最后一层全连接网络进行token分类。
详细信息参考:`paddlenlp.transformers.ErnieForQuestionAnswering`。
- task='qa': ERNIE用于阅读理解任务。其将从ERNIE模型中提取每一个token特征,用于最后一层全连接网络进行答案位置在原文中位置的预测。
详细信息参考:`paddlenlp.transformers.ErnieForTokenClassification`。
- task='None':预训练模型ERNIE。可将其作为backbone,用于提取句子特征pooled_output、token特征sequence_output。
详细信息参考:`paddlenlp.transformers.ErnieModel`
"""
def forward(input_ids, token_type_ids=None, position_ids=None, attention_mask=None):
"""
参数:
`input_ids (obj:`paddle.Tensor`)`:文本token id,shape为(batch_size, sequence_length)。
`token_type_ids (obj:`paddle.Tensor`)`: 各token所在文本的标识(token属于文本1或者文本2),shape为(batch_size, sequence_length)。
默认为None,表示所有token都属于文本1。
`position_ids(obj:`paddle.Tensor`)`:各Token在输入序列中的位置,shape为(batch_size, sequence_length)。默认为None。
`attention_mask`(obj:`paddle.Tensor`)`:为了避免在padding token上做attention操作,`attention_mask`表示token是否为padding token的标志矩阵,
shape为(batch_size, sequence_length)。mask的值或为0或为1, 为1表示该token是padding token,为0表示该token为真实输入token id。默认为None。
返回:
- 当`task=None`时,返回相应下游任务的分类概率值`probs(obj:`paddle.Tensor`)`,shape为(batch_size,num_classes)。
- 当`task=None`时,返回预训练模型ERNIE的句子特征pooled_output、token特征sequence_output。
* pooled_output(obj:`paddle.Tensor`):shape (batch_size,hidden_size)
* sequence_output(obj:`paddle.Tensor`):shape (batch_size,sequence_length, hidden_size)
"""
```
```python
class paddlenlp.models.Senta(network, vocab_size, num_classes, emb_dim=128, pad_token_id=0):
"""
文本分类模型Senta
参数:
`network(obj:`str`)`: 网络名称,可选bow,bilstm,bilstm_attn,bigru,birnn,cnn,lstm,gru,rnn以及textcnn。
- network='bow',对输入word embedding相加作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.BoWEncoder`。
- network=`bilstm`, 对输入word embedding进行双向lstm操作,取最后一个step的表示作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.LSTMEncoder`。
- network=`bilstm_attn`, 对输入word embedding进行双向lstm和Attention操作,取最后一个step的表示作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.LSTMEncoder`。
- network=`bigru`, 对输入word embedding进行双向gru操作,取最后一个step的表示作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.GRUEncoder`。
- network=`birnn`, 对输入word embedding进行双向rnn操作,取最后一个step的表示作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.RNNEncoder`。
- network='cnn',对输入word embedding进行一次积操作后进行max-pooling,作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.CNNEncoder`。
- network='lstm', 对输入word embedding进行lstm操作后进行max-pooling,作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.LSTMEncoder`。
- network='gru', 对输入word embedding进行lstm操作后进行max-pooling,作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.GRUEncoder`。
- network='rnn', 对输入word embedding进行lstm操作后进行max-pooling,作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.RNNEncoder`。
- network='textcnn',对输入word embedding进行多次卷积和max-pooling,作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.CNNEncoder`。
`vocab_size(obj:`int`)`:词汇表大小。
`num_classes(obj:`int`)`:分类类别数。
`emb_dim(obj:`int`)`:word embedding维度,默认128.
`pad_token_id(obj:`int`)`:padding token 在词汇表中index,默认0。
"""
def forward(text, seq_len):
"""
参数:
`text(obj:`paddle.Tensor`)`: 文本token id,shape为(batch_size, sequence_length)。
`seq_len(obj:`paddle.Tensor`): 文本序列长度, shape为(batch_size)。
返回:
`probs(obj:`paddle.Tensor`)`: 分类概率值,shape为(batch_size,num_classes)。
"""
```
```python
class paddlenlp.models.SimNet(nn.Layer):
"""
文本匹配模型SimNet
参数:
`network(obj:`str`)`: 网络名称,可选bow,cnn,lstm,以及gru,rnn以及textcnn。
- network='bow',对输入word embedding相加作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.BoWEncoder`。
- network='cnn',对输入word embedding进行一次积操作后进行max-pooling,作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.CNNEncoder`。
- network='lstm', 对输入word embedding进行lstm操作,取最后一个step的表示作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.LSTMEncoder`。
- network='gru', 对输入word embedding进行lstm操作后进行max-pooling,取最后一个step的表示作为文本特征表示。
详细信息参考:`paddlenlp.seq2vec.GRUEncoder`。
`vocab_size(obj:`int`)`:词汇表大小。
`num_classes(obj:`int`)`:分类类别数。
`emb_dim(obj:`int`)`:word embedding维度,默认128。
`pad_token_id(obj:`int`)`:padding token 在词汇表中index,默认0。
"""
def forward(query, title, query_seq_len=None, title_seq_len=None):
"""
参数:
`query(obj:`paddle.Tensor`)`: query文本token id,shape为(batch_size, query_sequence_length)。
`title(obj:`paddle.Tensor`)`: title文本token id,shape为(batch_size, title_sequence_length)。
`query_seq_len(obj:`paddle.Tensor`): query文本序列长度,shape为(batch_size)。。
返回:
`probs(obj:`paddle.Tensor`)`: 分类概率值,shape为(batch_size,num_classes)。
"""
```
# PaddleNLP transformer类预训练模型
随着深度学习的发展,NLP领域涌现了一大批高质量的transformer类预训练模型,多次刷新各种NLP任务SOTA。PaddleNLP为用户提供了常用的BERT、ERNIE等预训练模型,让用户能够方便快捷的使用各种transformer类模型,完成自己所需的任务。
## Transformer 类模型汇总
下表汇总了目前PaddleNLP支持的各类预训练模型。用户可以使用PaddleNLP提供的模型,完成问答、序列分类、token分类等任务。同时我们提供了22种预训练的参数权重供用户使用,其中包含了11种中文语言模型的预训练权重。
| Model | Tokenizer| Supported Task| Pretrained Weight|
|---|---|---|---|
| [BERT](https://arxiv.org/abs/1810.04805) | BertTokenizer|BertModel<br> BertForQuestionAnswering<br> BertForSequenceClassification<br>BertForTokenClassification| `bert-base-uncased`<br> `bert-large-uncased` <br>`bert-base-multilingual-uncased` <br>`bert-base-cased`<br> `bert-base-chinese`<br> `bert-base-multilingual-cased`<br> `bert-large-cased`<br> `bert-wwm-chinese`<br> `bert-wwm-ext-chinese` |
|[ERNIE](https://arxiv.org/abs/1904.09223)|ErnieTokenizer<br>ErnieTinyTokenizer|ErnieModel<br> ErnieForQuestionAnswering<br> ErnieForSequenceClassification<br> ErnieForTokenClassification<br> ErnieForGeneration| `ernie-1.0`<br> `ernie-tiny`<br> `ernie-2.0-en`<br> `ernie-2.0-large-en`<br>`ernie-gen-base-en`<br>`ernie-gen-large-en`<br>`ernie-gen-large-en-430g`|
|[RoBERTa](https://arxiv.org/abs/1907.11692)|RobertaTokenizer| RobertaModel<br>RobertaForQuestionAnswering<br>RobertaForSequenceClassification<br>RobertaForTokenClassification| `roberta-wwm-ext`<br> `roberta-wwm-ext-large`<br> `rbt3`<br> `rbtl3`|
|[ELECTRA](https://arxiv.org/abs/2003.10555) |ElectraTokenizer| ElectraModel<br>ElectraForSequenceClassification<br>ElectraForTokenClassification<br>|`electra-small`<br> `electra-base`<br> `electra-large`<br> `chinese-electra-small`<br> `chinese-electra-base`<br>|
|[Transformer](https://arxiv.org/abs/1706.03762) |- | TransformerModel | - |
注:其中中文的预训练模型有 `bert-base-chinese, bert-wwm-chinese, bert-wwm-ext-chinese, ernie-1.0, ernie-tiny, roberta-wwm-ext, roberta-wwm-ext-large, rbt3, rbtl3, chinese-electra-base, chinese-electra-small`。生成模型`ernie-gen-base-en, ernie-gen-large-en, ernie-gen-large-en-430g`仅支持`ErnieForGeneration`任务。
## 预训练模型使用方法
PaddleNLP在提丰富预训练模型的同时,也降低了用户的使用难度。只需轻松十几行代码,用户即可完成加载模型,fine-tune下游任务。
```python
import paddle
from paddlenlp.datasets import ChnSentiCorp
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
train_dataset, dev_dataset, test_dataset = ChnSentiCorp.get_datasets(
['train', 'dev', 'test'])
model = BertForSequenceClassification.from_pretrained(
"bert-wwm-chinese", num_classes=len(train_dataset.get_labels()))
tokenizer = BertTokenizer.from_pretrained("bert-wwm-chinese")
# please define your dataloader from dataset and tokenizer
optimizer = paddle.optimizer.AdamW(learning_rate=0.001,
parameters=model.parameters())
criterion = paddle.nn.loss.CrossEntropyLoss()
for batch in train_data_loader:
input_ids, segment_ids, labels = batch
logits = model(input_ids, segment_ids)
loss = criterion(logits, labels)
probs = paddle.nn.functional.softmax(logits, axis=1)
loss.backward()
optimizer.step()
optimizer.clear_gradients()
```
上面的代码给出使用预训练模型的简要示例,更完整详细的示例代码,可以参考[使用预训练模型Fine-tune完成中文文本分类任务](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_classification/pretrained_models)
1. 加载数据集:PaddleNLP内置了多种数据集,用户可以一键导入所需的数据集。
2. 加载预训练模型:PaddleNLP的预训练模型可以很容易地通过`from_pretrained`方法加载。第一个参数是汇总表中对应的 `Pretrained Weight`,可加载对应的预训练权重。`BertForSequenceClassification`初始化`__init__`所需的其他参数,如`num_classes`等,也是通过`from_pretrained`传入。`Tokenizer`使用同样的`from_pretrained`方法加载。
3. 使用tokenier将dataset处理成模型的输入。此部分可以参考前述的详细示例代码。
4. 定义训练所需的优化器,loss函数等,就可以开始进行模型fine-tune任务。
更多详细使用方法,请参考[examples](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples)
# PaddleNLP Model Zoo
Examples are still work in progress...
[**PaddleNLP**](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP) 是基于 PaddlePaddle 深度学习框架开发的自然语言处理 (NLP) 工具,算法,模型和数据的开源项目。百度在 NLP 领域十几年的深厚积淀为 PaddleNLP 提供了强大的核心动力。PaddleNLP 提供较为丰富的模型库,基本涵盖了主流的NLP任务,因为模型库中使用了PaddleNLP提供的基础NLP工具,例如数据集处理,高阶API,使得模型库的算法简洁易懂。下面是 PaddleNLP 支持任务的具体信息,具体主要是包括了 **NLP基础技术**, **NLP核心技术**, **NLP核心应用**
### 基础技术模型
| 任务类型 | 目录 | 简介 |
| ----------------------------------| ------------------------------------------------------------ | ------------------------------------------------------------ |
| 中文词法分析 | [LAC(Lexical Analysis of Chinese)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/lexical_analysis) | 百度自主研发中文特色模型词法分析任务,集成了中文分词、词性标注和命名实体识别任务。输入是一个字符串,而输出是句子中的词边界和词性、实体类别。 |
| 预训练词向量 | [WordEmbedding](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/word_embedding) | 提供了丰富的中文预训练词向量,通过简单配置即可使用词向量来进行热启训练,能支持较多的中文场景下的训练任务的热启训练,加快训练收敛速度。|
### 核心技术模型
| 任务类型 | 目录 | 简介 |
| -------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
| ERNIE-GEN文本生成 | [ERNIE-GEN(An Enhanced Multi-Flow Pre-training and Fine-tuning Framework for Natural Language Generation)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_generation/ernie-gen) |ERNIE-GEN是百度发布的生成式预训练模型,是一种Multi-Flow结构的预训练和微调框架。ERNIE-GEN利用更少的参数量和数据,在摘要生成、问题生成、对话和生成式问答4个任务共5个数据集上取得了SOTA效果 |
| BERT 预训练&GLUE下游任务 | [BERT(Bidirectional Encoder Representation from Transformers)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/bert) | BERT模型作为目前最为火热语义表示预训练模型,PaddleNLP提供了简洁功效的实现方式,同时易用性方面通过简单参数切换即可实现不同的BERT模型。 |
| Electra 预训练&GLUE下游任务 | [Electra(Pre-training Text Encoders as Discriminators Rather Than Generator)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/electra) |ELECTRA模型新一种模型预训练的框架,采用generator和discriminator的结合方式,相对于BERT来说能提升计算效率,同时缓解BERT训练和预测不一致的问题。|
### 核心应用模型
#### 机器翻译 (Machine Translation)
机器翻译是计算语言学的一个分支,是人工智能的终极目标之一,具有重要的科学研究价值。在机器翻译的任务上,提供了两大类模型,一类是传统的 Sequence to Sequence任务,简称Seq2Seq,通过RNN类模型进行编码,解码;另外一类是Transformer类模型,通过Self-Attention机制来提升Encoder和Decoder的效果,Transformer模型的具体信息可以参考论文, [Attention Is All You Need](https://arxiv.org/abs/1706.03762)。下面是具体的模型信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [Seq2Seq](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/machine_translation/seq2seq) | 使用编码器-解码器(Encoder-Decoder)结构, 同时使用了Attention机制来加强Decoder和Encoder之间的信息交互,Seq2Seq 广泛应用于机器翻译,自动对话机器人,文档摘要自动生成,图片描述自动生成等任务中。|
| [Transformer](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/machine_translation/transformer) |基于PaddlePaddle框架的Transformer结构搭建的机器翻译模型,Transformer 计算并行度高,能解决学习长程依赖问题。并且模型框架集成了训练,验证,预测任务,功能完备,效果突出。|
#### 命名实体识别 (Named Entity Recognition)
命名实体识别(Named Entity Recognition,NER)是NLP中一项非常基础的任务。NER是信息提取、问答系统、句法分析、机器翻译等众多NLP任务的重要基础工具。命名实体识别的准确度,决定了下游任务的效果,是NLP中非常重要的一个基础问题。
在NER任务提供了两种解决方案,一类LSTM/GRU + CRF(Conditional Random Field),RNN类的模型来抽取底层文本的信息,而CRF(条件随机场)模型来学习底层Token之间的联系;另外一类是通过预训练模型,例如ERNIE,BERT模型,直接来预测Token的标签信息。
因为该类模型较为抽象,提供了一份快递单信息抽取的训练脚本给大家使用,具体的任务是通过两类的模型来抽取快递单的核心信息,例如地址,姓名,手机号码,具体的[快递单任务链接](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/named_entity_recognition/express_ner)
下面是具体的模型信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [BiGRU+CRF](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/named_entity_recognition/express_ner) |传统的序列标注模型,通过双向GRU模型能抽取文本序列的信息和联系,通过CRF模型来学习文本Token之间的联系,本模型集成PaddleNLP自己开发的CRF模型,模型结构清晰易懂。 |
| [ERNIE/BERT Fine-tuning](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/named_entity_recognition) |通过预训练模型提供的强大的语义信息和ERNIE/BERT类模型的Self-Attention机制来覆盖Token之间的联系,直接通过BERT/ERNIE的序列分类模型来预测文本每个token的标签信息,模型结构简单,效果优异。|
#### 文本分类 (Text Classification)
文本分类任务是NLP中较为常见的任务,在该任务上我们提供了两大类模型,一类是基于RNN类模型的传统轻量级的分类模型,一类是基于预训模型的分类模型,在RNN类模型上我们提供了百度自研的Senta模型,模型结构经典,效果突出;在预训练类模型上,提供了大量的预训练模型,模型参数自动下载,用法简易,极易提升文本分类任务效果。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [RNN/GRU/LSTM](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_classification/rnn) | 面向通用场景的文本分类模型,网络结构接入常见的RNN类模型,例如LSTM,GRU,RNN。整体模型结构集成在百度的自研的Senta文本情感分类模型上,效果突出,用法简易。|
| [ERNIE/BERT Fine-tuning](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_classification/pretrained_models) |基于预训练后模型的文本分类的模型,多达11种的预训练模型可供使用,其中有较多中文预训练模型,预训练模型切换简单,情感分析任务上效果突出。|
#### 文本生成 (Text Generation)
文本生成是自然语言处理中一个重要的研究领域,具有广阔的应用前景。国内外已经有诸如Automated Insights、Narrative Science等文本生成系统投入使用,这些系统根据格式化数据或自然语言文本生成新闻、财报或者其他解释性文本。目前比较常见的文本生成任务两大类,文本写作和文本摘要。在这里主要提供百度自研的文本生成模型ERNIE-GEN, ERNIE-GEN是一种Multi-Flow结构的预训练和微调框架。ERNIE-GEN利用更少的参数量和数据,在摘要生成、问题生成、对话和生成式问答4个任务共5个数据集上取得了SOTA效果。我们基于ERNIE-GEN模型提供了一个自动关写诗的示例,来展示ERNIE-GEN的生成效果。下面是具体模型信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [ERNIE-GEN(An Enhanced Multi-Flow Pre-training and Fine-tuning Framework for Natural Language Generation)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_generation/ernie-gen) |ERNIE-GEN是百度发布的生成式预训练模型,通过Global-Attention的方式解决训练和预测曝光偏差的问题,同时使用Multi-Flow Attention机制来分别进行Global和Context信息的交互,同时通过片段生成的方式来增加语义相关性。|
#### 文本匹配 (Text Matching)
文本匹配一直是自然语言处理(NLP)领域一个基础且重要的方向,一般研究两段文本之间的关系。文本相似度计算、自然语言推理、问答系统、信息检索等,都可以看作针对不同数据和场景的文本匹配应用。在文本匹配的任务上提供了传统的SimNet(Similarity Net)和SentenceBERT模型。SimNet是一个计算短文本相似度的框架,主要包括 BOW、CNN、RNN、MMDNN 等核心网络结构形式。SimNet 框架在百度各产品上广泛应用,提供语义相似度计算训练和预测框架,适用于信息检索、新闻推荐、智能客服等多个应用场景,帮助企业解决语义匹配问题。SentenceBERT模型是通过强大语义信息的预训练模型来表征句子的语义信息,通过比较两个句子的语义信息来判断两个句子是否匹配。
下面是具体的模型信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [SimNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_matching/simnet)|PaddleNLP提供的SimNet模型已经纳入了PaddleNLP的官方API中,用户直接调用API即完成一个SimNet模型的组网,在模型层面提供了Bow/CNN/LSTM/GRU常用信息抽取方式, 灵活高,使用方便。|
| [SentenceTransformer](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_matching/sentence_transformers)|直接调用简易的预训练模型接口接口完成对Sentence的语义表示,同时提供了较多的中文预训练模型,可以根据任务的来选择相关参数。|
#### 语言模型 (Language Model)
在自然语言处理(NLP)领域中,语言模型预训练方法在多项NLP任务上都获得了不错的提升,广泛受到了各界的关注。在这里主要是提供了目前两种语言模型,一种是RNNLM模型,通过RNN网络来进行序列任务的预测;另外一种是ELMo模型,以双向 LSTM 为网路基本组件,以 Language Model 为训练目标,通过预训练得到通用的语义表示;下面是具体的模型信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [RNNLM](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/language_model/rnnlm) |序列任务常用的rnn网络,实现了一个两层的LSTM网络,然后LSTM的结果去预测下一个词出现的概率。是基于RNN的常规的语言模型。|
| [ELMo](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/language_model/elmo) |ElMo是一个双向的LSTM语言模型,由一个前向和一个后向语言模型构成,目标函数就是取这两个方向语言模型的最大似然。ELMo主要是解决了传统的WordEmbedding的向量表示单一的问题,ELMo通过结合上下文来增强语义表示。|
#### 文本图学习 (Text Graph)
在很多工业应用中,往往出现一种特殊的图:Text Graph。顾名思义,图的节点属性由文本构成,而边的构建提供了结构信息。如搜索场景下的Text Graph,节点可由搜索词、网页标题、网页正文来表达,用户反馈和超链信息则可构成边关系。百度图学习PGL((Paddle Graph Learning)团队提出ERNIESage(ERNIE SAmple aggreGatE)模型同时建模文本语义与图结构信息,有效提升Text Graph的应用效果。图学习是深度学习领域目前的研究热点,如果想对图学习有更多的了解,可以访问[PGL Github链接](https://github.com/PaddlePaddle/PGL/)
ERNIESage模型的具体信息如下。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [ERNIESage(ERNIE SAmple aggreGatE)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/text_graph/erniesage)|通过Graph(图)来来构建自身节点和邻居节点的连接关系,将自身节点和邻居节点的关系构建成一个关联样本输入到ERNIE中,ERNIE作为聚合函数(Aggregators)来表征自身节点和邻居节点的语义关系,最终强化图中节点的语义表示。在TextGraph的任务上ERNIESage的效果非常优秀。|
#### 阅读理解(Machine Reading Comprehension)
机器阅读理解是近期自然语言处理领域的研究热点之一,也是人工智能在处理和理解人类语言进程中的一个长期目标。得益于深度学习技术和大规模标注数据集的发展,用端到端的神经网络来解决阅读理解任务取得了长足的进步。下面是具体的模型信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [BERT Fine-tuning](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/machine_reading_comprehension/) |通过ERNIE/BERT等预训练模型的强大的语义表示能力,设置在阅读理解上面的下游任务,该模块主要是提供了多个数据集来验证BERT模型在阅读理解上的效果,数据集主要是包括了SQuAD,DuReader,DuReader-robust,DuReader-yesno。同时提供了和相关阅读理解相关的Metric(指标),用户可以简易的调用这些API,快速验证模型效果。|
#### 对话系统(Dialogue System)
对话系统 (Dialogue System) 常常需要根据应用场景的变化去解决多种多样的任务。任务的多样性(意图识别、槽填充、行为识别、状态追踪等等),以及领域训练数据的稀少给对话系统领域带来了诸多挑战。为此提供了基于BERT的对话通用理解模型 (DGU: DialogueGeneralUnderstanding),该种训练范式在对话理解任务上取得比肩甚至超越各个领域业内最好的模型的效果,展现了学习一个通用对话理解模型的巨大潜力。下面是模型的信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [BERT-DGU](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/dialogue/dgu) |通过ERNIE/BERT等预训练模型的强大的语义表示能力,抽取对话中的文本语义信息,通过对文本分类等操作就可以完成对话中的诸多任务,例如意图识别,行文识别,状态跟踪等。|
#### 时间序列预测(Time Series)
时间序列是指按照时间先后顺序排列而成的序列,例如每日发电量、每小时营业额等组成的序列。通过分析时间序列中的发展过程、方向和趋势,我们可以预测下一段时间可能出现的情况。为了更好让大家了解时间序列预测任务,提供了基于19年新冠疫情预测的任务示例,有兴趣的话可以进行研究学习。
下面是具体的时间序列模型的信息。
| 模型 | 简介 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [TCN(Temporal convolutional network)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/time_series)|TCN模型基于卷积的时间序列模型,通过因果卷积(Causal Convolution)和空洞卷积(Dilated Convolution) 特定的组合方式解决卷积不适合时间序列任务的问题,TCN具备并行度高,内存低等诸多优点,在某些时间序列任务上效果已经超过传统的RNN模型。|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import itertools
import os
import random
import time
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from paddlenlp.datasets.dataset import *
from paddlenlp.datasets.glue import *
from paddlenlp.data import *
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.transformers.model_bert import *
from paddlenlp.transformers.tokenizer_bert import BertTokenizer
from run_glue import convert_example, TASK_CLASSES
MODEL_CLASSES = {"bert": (BertForSequenceClassification, BertTokenizer), }
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(TASK_CLASSES.keys()), )
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for prediction.", )
parser.add_argument(
"--eager_run", type=eval, default=True, help="Use dygraph mode.")
parser.add_argument(
"--use_gpu", type=eval, default=True, help="Whether to use gpu.")
args = parser.parse_args()
return args
def do_prdict(args):
paddle.enable_static() if not args.eager_run else None
paddle.set_device("gpu" if args.n_gpu else "cpu")
args.task_name = args.task_name.lower()
dataset_class, _ = TASK_CLASSES[args.task_name]
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
test_dataset = dataset_class.get_datasets(["test"])
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=test_dataset.get_labels(),
max_seq_length=args.max_seq_length,
is_test=True)
test_dataset = test_dataset.apply(trans_func, lazy=True)
test_batch_sampler = paddle.io.BatchSampler(
test_dataset, batch_size=args.batch_size, shuffle=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # input
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # segment
Stack(), # length
): fn(samples)[:2]
test_data_loader = DataLoader(
dataset=test_dataset,
batch_sampler=test_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
model = model_class.from_pretrained(args.model_name_or_path)
model.eval()
for batch in test_data_loader:
input_ids, segment_ids = batch
logits = model(input_ids, segment_ids)
for i, rs in enumerate(paddle.argmax(logits).numpy()):
print(batch[i], rs)
if __name__ == "__main__":
args = parse_args()
do_prdict(args)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import itertools
import logging
import os
import random
import time
import h5py
from functools import partial
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import paddle
import paddle.distributed as dist
from paddle.io import DataLoader, Dataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import BertForPretraining, BertModel, BertPretrainingCriterion
from paddlenlp.transformers import BertTokenizer
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {"bert": (BertForPretraining, BertTokenizer), }
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--input_dir",
default=None,
type=str,
required=True,
help="The input directory where the data will be read from.", )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_predictions_per_seq",
default=80,
type=int,
help="The maximum total of masked tokens in input sequence")
parser.add_argument(
"--batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--eager_run", type=eval, default=True, help="Use dygraph mode.")
parser.add_argument(
"--n_gpu",
type=int,
default=1,
help="number of gpus to use, 0 for cpu.")
args = parser.parse_args()
return args
def set_seed(args):
random.seed(args.seed + paddle.distributed.get_rank())
np.random.seed(args.seed + paddle.distributed.get_rank())
paddle.seed(args.seed + paddle.distributed.get_rank())
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
def __call__(self, id):
np.random.seed(seed=self.seed + id)
random.seed(self.seed + id)
def create_pretraining_dataset(input_file, max_pred_length, shared_list, args,
worker_init):
train_data = PretrainingDataset(
input_file=input_file, max_pred_length=max_pred_length)
# files have been sharded, no need to dispatch again
train_batch_sampler = paddle.io.BatchSampler(
train_data, batch_size=args.batch_size, shuffle=True)
# DataLoader cannot be pickled because of its place.
# If it can be pickled, use global function instead of lambda and use
# ProcessPoolExecutor instead of ThreadPoolExecutor to prefetch.
def _collate_data(data, stack_fn=Stack()):
num_fields = len(data[0])
out = [None] * num_fields
# input_ids, segment_ids, input_mask, masked_lm_positions,
# masked_lm_labels, next_sentence_labels, mask_token_num
for i in (0, 1, 2, 5):
out[i] = stack_fn([x[i] for x in data])
batch_size, seq_length = out[0].shape
size = num_mask = sum(len(x[3]) for x in data)
# Padding for divisibility by 8 for fp16 or int8 usage
if size % 8 != 0:
size += 8 - (size % 8)
# masked_lm_positions
# Organize as a 1D tensor for gather or use gather_nd
out[3] = np.full(size, 0, dtype=np.int64)
# masked_lm_labels
out[4] = np.full([size, 1], -1, dtype=np.int64)
mask_token_num = 0
for i, x in enumerate(data):
for j, pos in enumerate(x[3]):
out[3][mask_token_num] = i * seq_length + pos
out[4][mask_token_num] = x[4][j]
mask_token_num += 1
# mask_token_num
out.append(np.asarray([mask_token_num], dtype=np.float32))
return out
train_data_loader = DataLoader(
dataset=train_data,
batch_sampler=train_batch_sampler,
collate_fn=_collate_data,
num_workers=0,
worker_init_fn=worker_init,
return_list=True)
return train_data_loader, input_file
class PretrainingDataset(Dataset):
def __init__(self, input_file, max_pred_length):
self.input_file = input_file
self.max_pred_length = max_pred_length
f = h5py.File(input_file, "r")
keys = [
'input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions',
'masked_lm_ids', 'next_sentence_labels'
]
self.inputs = [np.asarray(f[key][:]) for key in keys]
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.inputs[0])
def __getitem__(self, index):
[
input_ids, input_mask, segment_ids, masked_lm_positions,
masked_lm_ids, next_sentence_labels
] = [
input[index].astype(np.int64)
if indice < 5 else np.asarray(input[index].astype(np.int64))
for indice, input in enumerate(self.inputs)
]
# TODO: whether to use reversed mask by changing 1s and 0s to be
# consistent with nv bert
input_mask = (1 - np.reshape(
input_mask.astype(np.float32), [1, 1, input_mask.shape[0]])) * -1e9
index = self.max_pred_length
# store number of masked tokens in index
# outputs of torch.nonzero diff with that of numpy.nonzero by zip
padded_mask_indices = (masked_lm_positions == 0).nonzero()[0]
if len(padded_mask_indices) != 0:
index = padded_mask_indices[0].item()
mask_token_num = index
else:
index = 0
mask_token_num = 0
# masked_lm_labels = np.full(input_ids.shape, -1, dtype=np.int64)
# masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
masked_lm_labels = masked_lm_ids[:index]
masked_lm_positions = masked_lm_positions[:index]
# softmax_with_cross_entropy enforce last dim size equal 1
masked_lm_labels = np.expand_dims(masked_lm_labels, axis=-1)
next_sentence_labels = np.expand_dims(next_sentence_labels, axis=-1)
return [
input_ids, segment_ids, input_mask, masked_lm_positions,
masked_lm_labels, next_sentence_labels
]
def do_train(args):
paddle.enable_static() if not args.eager_run else None
paddle.set_device("gpu" if args.n_gpu else "cpu")
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args)
worker_init = WorkerInitObj(args.seed + paddle.distributed.get_rank())
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = BertForPretraining(
BertModel(**model_class.pretrained_init_configuration[
args.model_name_or_path]))
criterion = BertPretrainingCriterion(
getattr(model, BertForPretraining.base_model_prefix).config[
"vocab_size"])
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
# If use defalut last_epoch, lr of the first iteration is 0.
# Use `last_epoch = 0` to be consistent with nv bert.
lr_scheduler = paddle.optimizer.lr.LambdaDecay(
args.learning_rate,
lambda current_step, num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps if args.max_steps > 0 else
(len(train_data_loader) * args.num_train_epochs): float(
current_step) / float(max(1, num_warmup_steps))
if current_step < num_warmup_steps else max(
0.0,
float(num_training_steps - current_step) / float(
max(1, num_training_steps - num_warmup_steps))),
last_epoch=0)
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
pool = ThreadPoolExecutor(1)
global_step = 0
tic_train = time.time()
for epoch in range(args.num_train_epochs):
files = [
os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)
if os.path.isfile(os.path.join(args.input_dir, f)) and "training" in
f
]
files.sort()
num_files = len(files)
random.Random(args.seed + epoch).shuffle(files)
f_start_id = 0
shared_file_list = {}
if paddle.distributed.get_world_size() > num_files:
remainder = paddle.distributed.get_world_size() % num_files
data_file = files[(
f_start_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank() + remainder * f_start_id) %
num_files]
else:
data_file = files[(f_start_id * paddle.distributed.get_world_size()
+ paddle.distributed.get_rank()) % num_files]
previous_file = data_file
train_data_loader, _ = create_pretraining_dataset(
data_file, args.max_predictions_per_seq, shared_file_list, args,
worker_init)
for f_id in range(f_start_id + 1, len(files)):
if paddle.distributed.get_world_size() > num_files:
data_file = files[(
f_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank() + remainder * f_id) %
num_files]
else:
data_file = files[(f_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank()) % num_files]
previous_file = data_file
dataset_future = pool.submit(create_pretraining_dataset, data_file,
args.max_predictions_per_seq,
shared_file_list, args, worker_init)
for step, batch in enumerate(train_data_loader):
global_step += 1
(input_ids, segment_ids, input_mask, masked_lm_positions,
masked_lm_labels, next_sentence_labels,
masked_lm_scale) = batch
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
masked_positions=masked_lm_positions)
loss = criterion(prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels,
masked_lm_scale)
if global_step % args.logging_steps == 0:
if (not args.n_gpu > 1
) or paddle.distributed.get_rank() == 0:
logger.info(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch, step, loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_gradients()
if global_step % args.save_steps == 0:
if (not args.n_gpu > 1
) or paddle.distributed.get_rank() == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
paddle.save(
optimizer.state_dict(),
os.path.join(output_dir, "model_state.pdopt"))
if global_step >= args.max_steps:
del train_data_loader
return
del train_data_loader
train_data_loader, data_file = dataset_future.result(timeout=None)
if __name__ == "__main__":
args = parse_args()
if args.n_gpu > 1:
paddle.distributed.spawn(do_train, args=(args, ), nprocs=args.n_gpu)
else:
do_train(args)
......@@ -2,4 +2,4 @@
## Dialogue General Understanding
## PLATO
## PLATO-2
......@@ -7,28 +7,28 @@
DGU模型内共包含6个任务,全部基于公开数据集在Paddle2.0上完成训练及评估,详细说明如下:
```
DRS: 使用UDC (Ubuntu Corpus V1) 数据集完成对话匹配 (Dialogue Response Selection) 任务;
DST: 使用DSTC2 (Dialog State Tracking Challenge 2) 数据集完成对话状态追踪 (Dialogue State Tracking) 任务;
DSF: 使用ATIS (Airline Travel Information System) 数据集完成对话槽填充 (Dialogue Slot Filling) 任务;
DID: 使用ATIS (Airline Travel Information System) 数据集完成对话意图识别 (Dialogue Intent Detection) 任务;
MRDA: 使用MRDAC (Meeting Recorder Dialogue Act Corpus) 数据集完成对话行为识别 (Dialogue Act Detection) 任务;
SwDA: 使用SwDAC (Switchboard Dialogue Act Corpus) 数据集完成对话行为识别 (Dialogue Act Detection) 任务;
udc: 使用UDC (Ubuntu Corpus V1) 数据集完成对话匹配 (Dialogue Response Selection) 任务;
dstc2: 使用DSTC2 (Dialog State Tracking Challenge 2) 数据集完成对话状态追踪 (Dialogue State Tracking) 任务;
atis_slot: 使用ATIS (Airline Travel Information System) 数据集完成对话槽填充 (Dialogue Slot Filling) 任务;
atis_intent: 使用ATIS (Airline Travel Information System) 数据集完成对话意图识别 (Dialogue Intent Detection) 任务;
mrda: 使用MRDAC (Meeting Recorder Dialogue Act Corpus) 数据集完成对话行为识别 (Dialogue Act Detection) 任务;
swda: 使用SwDAC (Switchboard Dialogue Act Corpus) 数据集完成对话行为识别 (Dialogue Act Detection) 任务;
```
## 模型效果
DGU模型中的6个任务,分别采用不同的评估指标在test集上进行评估,结果如下:
<table border="1">
<table>
<tr><th style="text-align:center">任务</th><th style="text-align:center">评估指标</th><th style="text-align:center">DGU</th></tr>
<tr align="center"><td rowspan="3" style="vertical-align:middle;">DRS</td><td>R1@10</td><td>81.04%</td></tr>
<tr align="center"><td rowspan="3" style="vertical-align:middle;">udc</td><td>R1@10</td><td>81.04%</td></tr>
<tr align="center"><td>R2@10</td><td>89.85%</td></tr>
<tr align="center"><td>R5@10</td><td>97.59%</td></tr>
<tr align="center"><td>DST</td><td>Joint_Acc</td><td>90.43%</td></tr>
<tr align="center"><td>DSF</td><td>F1_Micro</td><td>97.98%</td></tr>
<tr align="center"><td>DID</td><td>Acc</td><td>97.42%</td></tr>
<tr align="center"><td>MRDA</td><td>Acc</td><td>90.94%</td></tr>
<tr align="center"><td>SwDA</td><td>Acc</td><td>80.61%</td></tr>
<tr align="center"><td>dstc2</td><td>Joint_Acc</td><td>90.43%</td></tr>
<tr align="center"><td>atis_slot</td><td>F1_Micro</td><td>97.98%</td></tr>
<tr align="center"><td>atis_intent</td><td>Acc</td><td>97.42%</td></tr>
<tr align="center"><td>mrda</td><td>Acc</td><td>90.94%</td></tr>
<tr align="center"><td>swda</td><td>Acc</td><td>80.61%</td></tr>
</table>
**NOTE:** 以上结果均是采用默认配置在GPU单卡上训练和评估得到的,用户如需复现效果,可采用默认配置在单卡上进行训练评估。
......@@ -69,7 +69,7 @@ DGU模型中的6个任务,分别采用不同的评估指标在test集上进行
下载数据集压缩包并解压后,DGU_datasets目录下共存在6个目录,分别对应每个任务的训练集train.txt、评估集dev.txt和测试集test.txt。
```shell
wget wget https://paddlenlp.bj.bcebos.com/datasets/DGU_datasets.tar.gz
wget https://paddlenlp.bj.bcebos.com/datasets/DGU_datasets.tar.gz
tar -zxf DGU_datasets.tar.gz
```
......@@ -77,22 +77,22 @@ DGU_datasets目录结构:
```text
DGU_datasets/
├── did
├── atis_intent
│   ├── dev.txt
│   ├── map_tag_intent_id.txt
│   ├── test.txt
│   └── train.txt
├── drs
├── udc
│   ├── dev.txt
│   ├── dev.txt-small
│   ├── test.txt
│   └── train.txt
├── dsf
├── atis_slot
│   ├── dev.txt
│   ├── map_tag_slot_id.txt
│   ├── test.txt
│   └── train.txt
├── dst
├── dstc2
│   ├── dev.txt
│   ├── map_tag_id.txt
│   ├── test.txt
......@@ -112,16 +112,16 @@ DGU_datasets/
数据的每一行由多列组成,都以"\t"作为分割符,详细数据格式说明如下:
```
drs:由label、多轮对话conv和回应response组成
udc:由label、多轮对话conv和回应response组成
格式:label \t conv1 \t conv2 \t conv3 \t ... \t response
dst:由多轮对话id、当前轮QA对(使用\1拼接)和对话状态序列state_list(state_list中每个state由空格分割)组成
dstc2:由多轮对话id、当前轮QA对(使用\1拼接)和对话状态序列state_list(state_list中每个state由空格分割)组成
格式:conversation_id \t question \1 answer \t state1 state2 state3 ...
dsf:由对话内容conversation_content和标签序列label_list (label_list中每个label由空格分割) 组成, 其中标签序列和对话内容中word为一一对应关系
atis_slot:由对话内容conversation_content和标签序列label_list (label_list中每个label由空格分割) 组成, 其中标签序列和对话内容中word为一一对应关系
格式:conversation_content \t label1 label2 label3 ...
did:由标签label和对话内容conversation_content组成
atis_intent:由标签label和对话内容conversation_content组成
格式: label \t conversation_content
mrda:由多轮对话id、标签label、发言人caller、对话内容conversation_content组成
......@@ -140,14 +140,14 @@ swda:由多轮对话id、标签label、发言人caller、对话内容conversat
```shell
export CUDA_VISIBLE_DEVICES=0,1
# GPU启动,n_gpu指定训练所用的GPU数量,可以是单卡,也可以多卡。默认会进行训练、验证和评估
python -u main.py --task_name=drs --data_dir=./DGU_datasets/drs --output_dir=./checkpoints/drs --n_gpu=2
python -u main.py --task_name=udc --data_dir=./DGU_datasets/udc --output_dir=./checkpoints/udc --n_gpu=2
# 若只需进行评估,do_train设为False,并且必须指定init_from_ckpt
# python -u main.py --task_name=drs --data_dir=./DGU_datasets/drs --do_train=False --init_from_ckpt=./checkpoints/drs/best
# python -u main.py --task_name=udc --data_dir=./DGU_datasets/udc --do_train=False --init_from_ckpt=./checkpoints/udc/best
```
以上参数表示:
* task_name:任务名称,可以为drs、dst、dsf、did、mrda或swda。
* task_name:任务名称,可以为udc、dstc2、atis_slot、atis_intent、mrda或swda。
* data_dir:训练数据路径。
* output_dir:训练保存模型的文件路径。
* n_gpu:训练所使用的GPU卡的数量,默认为1。
......
......@@ -108,7 +108,7 @@ def parse_args():
def set_default_args(args):
args.task_name = args.task_name.lower()
if args.task_name == 'drs':
if args.task_name == 'udc':
if not args.save_steps:
args.save_steps = 1000
if not args.logging_steps:
......@@ -119,7 +119,7 @@ def set_default_args(args):
args.max_seq_len = 210
if not args.test_batch_size:
args.test_batch_size = 100
elif args.task_name == 'dst':
elif args.task_name == 'dstc2':
if not args.save_steps:
args.save_steps = 400
if not args.logging_steps:
......@@ -132,14 +132,14 @@ def set_default_args(args):
args.max_seq_len = 256
if not args.test_max_seq_len:
args.test_max_seq_len = 512
elif args.task_name == 'dsf':
elif args.task_name == 'atis_slot':
if not args.save_steps:
args.save_steps = 100
if not args.logging_steps:
args.logging_steps = 10
if not args.epochs:
args.epochs = 50
elif args.task_name == 'did':
elif args.task_name == 'atis_intent':
if not args.save_steps:
args.save_steps = 100
if not args.logging_steps:
......
......@@ -20,7 +20,7 @@ def get_label_map(label_list):
class UDCv1(Dataset):
"""
The UDCv1 dataset is using in task DRS(Dialogue Response Selection).
The UDCv1 dataset is using in task Dialogue Response Selection.
The source dataset is UDCv1(Ubuntu Dialogue Corpus v1.0). See detail at
http://dataset.cs.mcgill.ca/ubuntu-corpus-1.0/
"""
......@@ -107,7 +107,7 @@ class UDCv1(Dataset):
class DSTC2(Dataset):
"""
The dataset DSTC2 is using in task DST(Dialogue State Tracking).
The dataset DSTC2 is using in task Dialogue State Tracking.
The source dataset is DSTC2(Dialog State Tracking Challenges 2). See detail at
https://github.com/matthen/dstc
"""
......@@ -207,7 +207,7 @@ class DSTC2(Dataset):
class ATIS_DSF(Dataset):
"""
The dataset ATIS_DSF is using in task DSF(Dialogue Slot Filling).
The dataset ATIS_DSF is using in task Dialogue Slot Filling.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
......@@ -281,7 +281,7 @@ class ATIS_DSF(Dataset):
class ATIS_DID(Dataset):
"""
The dataset ATIS_ID is using in task DID(Dialogue Intent Detection).
The dataset ATIS_ID is using in task Dialogue Intent Detection.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
......@@ -441,7 +441,7 @@ def truncate_and_concat(pre_txt: List[str],
class MRDA(Dataset):
"""
The dataset MRDA is using in task DA(Dialogue Act).
The dataset MRDA is using in task Dialogue Act.
The source dataset is MRDA(Meeting Recorder Dialogue Act). See detail at
https://www.aclweb.org/anthology/W04-2319.pdf
"""
......@@ -479,7 +479,7 @@ class MRDA(Dataset):
class SwDA(Dataset):
"""
The dataset SwDA is using in task DA(Dialogue Act).
The dataset SwDA is using in task Dialogue Act.
The source dataset is SwDA(Switchboard Dialog Act). See detail at
http://compprag.christopherpotts.net/swda.html
"""
......
......@@ -22,10 +22,10 @@ import data
import metric
TASK_CLASSES = {
'drs': (data.UDCv1, metric.RecallAtK),
'dst': (data.DSTC2, metric.JointAccuracy),
'dsf': (data.ATIS_DSF, metric.F1Score),
'did': (data.ATIS_DID, Accuracy),
'udc': (data.UDCv1, metric.RecallAtK),
'dstc2': (data.DSTC2, metric.JointAccuracy),
'atis_slot': (data.ATIS_DSF, metric.F1Score),
'atis_intent': (data.ATIS_DID, Accuracy),
'mrda': (data.MRDA, Accuracy),
'swda': (data.SwDA, Accuracy)
}
......@@ -70,18 +70,20 @@ class DGULossFunction(nn.Layer):
self.loss_fn = self.get_loss_fn()
def get_loss_fn(self):
if self.task_name in ['drs', 'dsf', 'did', 'mrda', 'swda']:
if self.task_name in [
'udc', 'atis_slot', 'atis_intent', 'mrda', 'swda'
]:
return F.softmax_with_cross_entropy
elif self.task_name == 'dst':
elif self.task_name == 'dstc2':
return nn.BCEWithLogitsLoss(reduction='sum')
def forward(self, logits, labels):
if self.task_name in ['drs', 'did', 'mrda', 'swda']:
if self.task_name in ['udc', 'atis_intent', 'mrda', 'swda']:
loss = self.loss_fn(logits, labels)
loss = paddle.mean(loss)
elif self.task_name == 'dst':
elif self.task_name == 'dstc2':
loss = self.loss_fn(logits, paddle.cast(labels, dtype=logits.dtype))
elif self.task_name == 'dsf':
elif self.task_name == 'atis_slot':
labels = paddle.unsqueeze(labels, axis=-1)
loss = self.loss_fn(logits, labels)
loss = paddle.mean(loss)
......@@ -89,8 +91,8 @@ class DGULossFunction(nn.Layer):
def print_logs(args, step, logits, labels, loss, total_time, metric):
if args.task_name in ['drs', 'did', 'mrda', 'swda']:
if args.task_name == 'drs':
if args.task_name in ['udc', 'atis_intent', 'mrda', 'swda']:
if args.task_name == 'udc':
metric = Accuracy()
metric.reset()
correct = metric.compute(logits, labels)
......@@ -98,13 +100,13 @@ def print_logs(args, step, logits, labels, loss, total_time, metric):
acc = metric.accumulate()
print('step %d - loss: %.4f - acc: %.4f - %.3fs/step' %
(step, loss, acc, total_time / args.logging_steps))
elif args.task_name == 'dst':
elif args.task_name == 'dstc2':
metric.reset()
metric.update(logits, labels)
joint_acc = metric.accumulate()
print('step %d - loss: %.4f - joint_acc: %.4f - %.3fs/step' %
(step, loss, joint_acc, total_time / args.logging_steps))
elif args.task_name == 'dsf':
elif args.task_name == 'atis_slot':
metric.reset()
metric.update(logits, labels)
f1_micro = metric.accumulate()
......@@ -181,13 +183,14 @@ def train(args, model, train_data_loader, dev_data_loader, metric, rank):
batch_start_time = time.time()
@paddle.no_grad()
def evaluation(args, model, data_loader, metric):
model.eval()
metric.reset()
for batch in data_loader:
input_ids, segment_ids, labels = batch
logits = model(input_ids, segment_ids)
if args.task_name in ['did', 'mrda', 'swda']:
if args.task_name in ['atis_intent', 'mrda', 'swda']:
correct = metric.compute(logits, labels)
metric.update(correct)
else:
......@@ -195,17 +198,17 @@ def evaluation(args, model, data_loader, metric):
model.train()
metric_out = metric.accumulate()
print('Total samples: %d' % (len(data_loader) * args.test_batch_size))
if args.task_name == 'drs':
if args.task_name == 'udc':
print('R1@10: %.4f - R2@10: %.4f - R5@10: %.4f\n' %
(metric_out[0], metric_out[1], metric_out[2]))
return metric_out[0]
elif args.task_name == 'dst':
elif args.task_name == 'dstc2':
print('Joint_acc: %.4f\n' % metric_out)
return metric_out
elif args.task_name == 'dsf':
elif args.task_name == 'atis_slot':
print('F1_micro: %.4f\n' % metric_out)
return metric_out
elif args.task_name in ['did', 'mrda', 'swda']:
elif args.task_name in ['atis_intent', 'mrda', 'swda']:
print('Acc: %.4f\n' % metric_out)
return metric_out
......@@ -248,7 +251,7 @@ def main(args):
max_seq_length=args.test_max_seq_len)
metric = metric_class()
if args.task_name in ('drs', 'dst', 'did', 'mrda', 'swda'):
if args.task_name in ('udc', 'dstc2', 'atis_intent', 'mrda', 'swda'):
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment
......@@ -256,7 +259,7 @@ def main(args):
): fn(samples)
model = BertForSequenceClassification.from_pretrained(
args.model_name_or_path, num_classes=dataset_class.num_classes())
elif args.task_name == 'dsf':
elif args.task_name == 'atis_slot':
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment
......
# PLATO-2
## 模型简介
构建高质量的开放领域(Open-Domain)的对话机器人,使得它能用自然语言与人自由地交流,这一直是自然语言处理领域终极目标之一。
为了能够简易地构建一个高质量的开放域聊天机器人,本项目在Paddle2.0上实现了PLATO-2的预测模型,并基于终端实现了简单的人机交互。用户可以通过下载预训练模型快速构建一个开放域聊天机器人。
PLATO-2的网络结构及评估结果见下图:
![image](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/dialogue/plato-2/imgs/network.png)
![image](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/dialogue/plato-2/imgs/eval_en.png)
![image](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/dialogue/plato-2/imgs/eval_cn.png)
PLATO-2的训练过程及其他细节详见 [Knover](https://github.com/PaddlePaddle/Knover)
## 快速开始
### 安装说明
* PaddlePaddle 安装
本项目依赖于 PaddlePaddle 2.0 及以上版本,请参考 [安装指南](http://www.paddlepaddle.org/#quick-start) 进行安装
* PaddleNLP 安装
```shell
pip install paddlenlp>=2.0.0b
```
* 环境依赖
Python的版本要求 3.6+
本项目依赖sentencepiece和termcolor,请在运行本项目之前进行安装
```shell
pip install sentencepiece termcolor
```
### 代码结构说明
以下是本项目主要代码结构及说明:
```text
.
├── interaction.py # 交互主程序入口
├── model.py # 模型组网
├── readers
│   ├── dialog_reader.py # 模型输入数据生成
│   ├── nsp_reader.py # 模型输入数据生成
│   └── plato_reader.py # 模型输入数据生成
├── utils
│   ├── __init__.py # 基础函数
│   ├── args.py # 运行参数配置
│   ├── masking.py # mask相关函数
│   └── tokenization.py # 分词相关函数
├── imgs # 示例图存储文件夹
└── README.md # 说明文档
```
### 数据准备
您可以从以下位置下载预训练模型文件:
* PLATO-2, 24-layers, 16-heads, 1024-hidden, EN: [预训练模型](https://paddlenlp.bj.bcebos.com/models/transformers/plato2/24L.pdparams)
* PLATO-2, 32-layers, 32-heads, 2048-hidden, EN: [预训练模型](https://paddlenlp.bj.bcebos.com/models/transformers/plato2/32L.pdparams)
以24层预训练模型为例:
```shell
wget https://paddlenlp.bj.bcebos.com/models/transformers/plato2/24L.pdparams
```
**NOTE:** PLATO-2网络参数量较大,24层网络至少需要显存16G,32层网络至少需要显存22G,用户可选择合适的网络层数及预训练模型。
sentencepiece分词预训练模型和词表文件下载:
```shell
wget https://paddlenlp.bj.bcebos.com/models/transformers/plato2/data.tar.gz
tar -zxf data.tar.gz
```
### 人机交互
运行如下命令即可开始与聊天机器人用英语进行简单的对话
```shell
export CUDA_VISIBLE_DEVICES=0
python interaction.py --vocab_path ./data/vocab.txt --spm_model_file ./data/spm.model --num_layers 24 --init_from_ckpt ./24L.pdparams
```
以上参数表示:
* vocab_path:词表文件路径。
* spm_model_file:sentencepiece分词预训练模型路径。
* num_layers:PLATO-2组网层数。
* init_from_ckpt:PLATO-2预训练模型路径。
32层PLATO-2网络交互示例:
![image](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/examples/dialogue/plato-2/imgs/case.jpg)
import json
import argparse
from collections import namedtuple
from termcolor import colored, cprint
import paddle
from utils.args import parse_args, str2bool
from utils import gen_inputs
from readers.nsp_reader import NSPReader
from readers.plato_reader import PlatoReader
from model import Plato2InferModel
def setup_args():
"""Setup arguments."""
parser = argparse.ArgumentParser()
group = parser.add_argument_group("Model")
group.add_argument("--init_from_ckpt", type=str, default="")
group.add_argument("--vocab_size", type=int, default=8001)
group.add_argument("--latent_type_size", type=int, default=20)
group.add_argument("--num_layers", type=int, default=24)
group = parser.add_argument_group("Task")
group.add_argument("--is_cn", type=str2bool, default=False)
args, _ = parser.parse_known_args()
NSPReader.add_cmdline_args(parser)
args = parse_args(parser)
args.batch_size *= args.latent_type_size
#print(json.dumps(args, indent=2))
return args
def load_params(model, init_from_ckpt):
state_dict = paddle.load(init_from_ckpt)
model.set_state_dict(state_dict)
def interact(args):
"""Inference main function."""
plato_reader = PlatoReader(args)
nsp_reader = NSPReader(args)
if args.num_layers == 24:
n_head = 16
hidden_size = 1024
elif args.num_layers == 32:
n_head = 32
hidden_size = 2048
else:
raise ValueError('The pre-trained model only support 24 or 32 layers, '
'but received num_layers=%d.' % args.num_layers)
model = Plato2InferModel(nsp_reader, args.num_layers, n_head, hidden_size)
load_params(model, args.init_from_ckpt)
model.eval()
Example = namedtuple("Example", ["src", "data_id"])
context = []
start_info = "Enter [EXIT] to quit the interaction, [NEXT] to start a new conversation."
cprint(start_info, "yellow", attrs=["bold"])
while True:
user_utt = input(colored("[Human]: ", "red", attrs=["bold"])).strip()
if user_utt == "[EXIT]":
break
elif user_utt == "[NEXT]":
context = []
cprint(start_info, "yellow", attrs=["bold"])
else:
context.append(user_utt)
example = Example(src=" [SEP] ".join(context), data_id=0)
record = plato_reader._convert_example_to_record(
example, is_infer=True)
data = plato_reader._pad_batch_records([record], is_infer=True)
inputs = gen_inputs(data, args.latent_type_size)
pred = model(inputs)[0]
bot_response = pred["response"]
print(
colored(
"[Bot]:", "blue", attrs=["bold"]),
colored(
bot_response, attrs=["bold"]))
context.append(bot_response)
return
if __name__ == "__main__":
args = setup_args()
interact(args)
from collections import namedtuple
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
def post_process_context(token_ids, reader, merge=True):
"""Post-process the context sequence."""
context = []
utt = []
for tok_id in token_ids[1:]:
if tok_id == reader.eos_id:
utt = reader.tokenizer.convert_ids_to_tokens(utt)
if merge:
utt = reader.tokenizer.merge_subword(utt)
context.append(utt)
utt = []
else:
utt.append(tok_id)
return context
def post_process_response(token_ids, reader, merge=True):
"""
Post-process the decoded sequence. Truncate from the first
<eos> and remove the <bos> and <eos> tokens currently.
"""
eos_pos = len(token_ids)
for i, tok_id in enumerate(token_ids):
if tok_id == reader.eos_id:
eos_pos = i
break
token_ids = token_ids[1:eos_pos]
response = reader.tokenizer.convert_ids_to_tokens(token_ids)
if merge:
response = reader.tokenizer.merge_subword(response)
return token_ids, response
def get_cross_turn_repetition(context, pred_tokens, eos_idx, is_cn=False):
"""Get cross-turn repetition."""
if len(pred_tokens) == 0:
return 1.0
if is_cn:
context = ["".join(utt) for utt in context]
pred_tokens = "".join(pred_tokens)
pred_tri_grams = set()
for i in range(len(pred_tokens) - 2):
tri_gram = tuple(pred_tokens[i:i + 3])
pred_tri_grams.add(tri_gram)
for utt in context:
for i in range(len(utt) - 2):
tri_gram = tuple(utt[i:i + 3])
if tri_gram in pred_tri_grams:
return 1.0
return 0.0
def get_in_turn_repetition(pred, is_cn=False):
"""Get in-turn repetition."""
if len(pred) == 0:
return 1.0
if isinstance(pred[0], str):
pred = [tok.lower() for tok in pred]
if is_cn:
pred = "".join(pred)
tri_grams = set()
for i in range(len(pred) - 2):
tri_gram = tuple(pred[i:i + 3])
if tri_gram in tri_grams:
return 1.0
tri_grams.add(tri_gram)
return 0.0
class Plato2EncoderLayer(nn.Layer):
def __init__(self, n_head, hidden_size, attn_dropout, act_dropout):
super(Plato2EncoderLayer, self).__init__()
self.self_attn = nn.MultiHeadAttention(hidden_size, n_head,
attn_dropout)
self.pre_norm_layer = nn.LayerNorm(hidden_size)
self.post_norm_layer = nn.LayerNorm(hidden_size)
self.fc1 = nn.Linear(hidden_size, hidden_size * 4)
self.fc2 = nn.Linear(hidden_size * 4, hidden_size)
self.dropout_layer = nn.Dropout(act_dropout)
self.gelu_layer = nn.GELU()
def forward(self, x, attn_mask, cache):
query = self.pre_norm_layer(x)
attn_output, new_cache = self.self_attn(query, None, None, attn_mask,
cache)
attn_output = self.dropout_layer(attn_output)
attn_output = attn_output + x
ffd_input = self.post_norm_layer(attn_output)
ffd_output = self.fc1(ffd_input)
ffd_output = self.gelu_layer(ffd_output)
ffd_output = self.dropout_layer(ffd_output)
ffd_output = self.fc2(ffd_output)
ffd_output = self.dropout_layer(ffd_output)
out = ffd_output + attn_output
return out, new_cache
def gen_cache(self, key):
return self.self_attn.gen_cache(key)
class Plato2Encoder(nn.Layer):
def __init__(self, vocab_size, type_size, max_position_seq_len, num_layers,
n_head, hidden_size, attn_dropout, act_dropout):
super(Plato2Encoder, self).__init__()
self.n_head = n_head
self.word_embedding_layer = nn.Embedding(vocab_size, hidden_size)
self.sent_embedding_layer = nn.Embedding(type_size, hidden_size)
self.pos_embedding_layer = nn.Embedding(max_position_seq_len,
hidden_size)
self.encoder_layers = []
for i in range(num_layers):
encoder_layer = Plato2EncoderLayer(n_head, hidden_size,
attn_dropout, act_dropout)
self.encoder_layers.append(encoder_layer)
self.add_sublayer('layers.' + str(i), encoder_layer)
self.post_encoder_layer_norm = nn.LayerNorm(hidden_size)
self.dropout_layer = nn.Dropout(act_dropout)
def forward(self,
caches,
token_ids,
type_ids,
pos_ids,
generation_mask,
aux_emb=None):
out, self_attn_mask = self.gen_input(token_ids, type_ids, pos_ids,
generation_mask, aux_emb)
new_caches = []
for i, encoder_layer in enumerate(self.encoder_layers):
out, new_cache = encoder_layer(out, self_attn_mask, caches[i])
new_caches.append(new_cache)
enc_output = self.post_encoder_layer_norm(out)
return enc_output, new_caches
def gen_input(self, token_ids, type_ids, pos_ids, input_mask, aux_emb=None):
token_emb_out = self.word_embedding_layer(token_ids)
type_emb_out = self.sent_embedding_layer(type_ids)
pos_emb_out = self.pos_embedding_layer(pos_ids)
emb_out = token_emb_out + type_emb_out + pos_emb_out
# auxiliary memory embeddings
if aux_emb is not None:
emb_out = paddle.concat([aux_emb, emb_out], axis=1)
emb_out = self.dropout_layer(emb_out)
# generate n-head self-attention mask
self_attn_mask = input_mask
self_attn_mask = paddle.scale(
x=self_attn_mask, scale=1e4, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = paddle.stack(
x=[self_attn_mask] * self.n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
return emb_out, n_head_self_attn_mask
def gen_caches(self, key):
caches = [
encoder_layer.gen_cache(key)
for encoder_layer in self.encoder_layers
]
return caches
class NSP(nn.Layer):
def __init__(self, vocab_size, type_size, max_position_seq_len, num_layers,
n_head, hidden_size, attn_dropout, act_dropout):
super(NSP, self).__init__()
self.n_head = n_head
self.hidden_size = hidden_size
self.word_embedding_layer = nn.Embedding(vocab_size, hidden_size)
self.sent_embedding_layer = nn.Embedding(type_size, hidden_size)
self.pos_embedding_layer = nn.Embedding(max_position_seq_len,
hidden_size)
encoder_layer = nn.TransformerEncoderLayer(
hidden_size, n_head, hidden_size * 4, act_dropout, 'gelu',
attn_dropout, act_dropout, 'True')
encoder_norm = nn.LayerNorm(hidden_size)
self.encoder = nn.TransformerEncoder(encoder_layer, num_layers,
encoder_norm)
self.fc1 = nn.Linear(hidden_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, 2)
self.dropout_layer = nn.Dropout(act_dropout)
self.tanh_layer = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, inputs):
token_ids = inputs['token_ids']
type_ids = inputs['type_ids']
pos_ids = inputs['pos_ids']
attention_mask = inputs['attention_mask']
label_pos = inputs["label_pos"]
out, self_attn_mask = self.gen_input(token_ids, type_ids, pos_ids,
attention_mask)
# [-1, seq_len, hidden_size]
enc_out = self.encoder(out, self_attn_mask)
enc_out = paddle.reshape(enc_out, [-1, self.hidden_size])
label_pos = paddle.cast(label_pos, 'int64')
out = paddle.gather(enc_out, label_pos)
pooled_out = self.fc1(out)
pooled_out = self.tanh_layer(pooled_out)
# [-1, 2]
logits = self.fc2(pooled_out)
probs = self.softmax(logits)
return probs
def gen_input(self, token_ids, type_ids, pos_ids, input_mask, aux_emb=None):
token_emb_out = self.word_embedding_layer(token_ids)
type_emb_out = self.sent_embedding_layer(type_ids)
pos_emb_out = self.pos_embedding_layer(pos_ids)
emb_out = token_emb_out + type_emb_out + pos_emb_out
# auxiliary memory embeddings
if aux_emb is not None:
emb_out = paddle.concat([aux_emb, emb_out], axis=1)
emb_out = self.dropout_layer(emb_out)
# generate n-head self-attention mask
self_attn_mask = input_mask
self_attn_mask = paddle.scale(
x=self_attn_mask, scale=1e4, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = paddle.stack(
x=[self_attn_mask] * self.n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
return emb_out, n_head_self_attn_mask
class Plato2InferModel(nn.Layer):
def __init__(self,
nsp_reader,
num_layers,
n_head,
hidden_size,
vocab_size=8001,
type_size=2,
latent_type_size=20,
max_position_seq_len=256,
act_dropout=0.1,
attn_dropout=0.1,
max_dec_len=64,
min_dec_len=1,
topk=10):
super(Plato2InferModel, self).__init__()
self.nsp_reader = nsp_reader
self.num_layers = num_layers
self.latent_type_size = latent_type_size
self.max_dec_len = max_dec_len
self.min_dec_len = min_dec_len
self.topk = topk
self.unk_id = 0
self.bos_id = 1
self.eos_id = 2
self.mask_id = 8000
self.after_eos = paddle.ones([vocab_size]) * -1e9
self.after_eos[self.eos_id] = 0
self.is_cn = False
self.batch_size = 1
self.latent_weight = paddle.create_parameter(
[hidden_size, latent_type_size], 'float32')
self.plato2_encoder = Plato2Encoder(
vocab_size, type_size, max_position_seq_len, num_layers, n_head,
hidden_size, attn_dropout, act_dropout)
self.logits_fc_layer = nn.Linear(hidden_size, hidden_size)
self.logits_layer_norm = nn.LayerNorm(hidden_size)
self.logits_bias = paddle.create_parameter(
[vocab_size], 'float32', is_bias=True)
self.nsp_predictor = NSP(vocab_size, type_size, max_position_seq_len,
num_layers, n_head, hidden_size, attn_dropout,
act_dropout)
self.gelu_layer = nn.GELU()
self.softmax = nn.Softmax()
@paddle.no_grad()
def forward(self, inputs):
token_ids = inputs['token_ids']
type_ids = inputs['type_ids']
pos_ids = inputs['pos_ids']
generation_mask = inputs['generation_mask']
latent_id = inputs['latent_id']
data_id = inputs['data_id']
# [-1, 1, latent_type_size]
latent_id = F.one_hot(latent_id, self.latent_type_size)
# [-1, 1, hidden_size]
latent_emb = paddle.matmul(
latent_id, self.latent_weight, transpose_y=True)
caches = self.plato2_encoder.gen_caches(token_ids)
# [-1, seq_len + 1, hidden_size]
enc_out, new_caches = self.plato2_encoder(
caches, token_ids, type_ids, pos_ids, generation_mask, latent_emb)
pred_ids = self.decode(inputs, new_caches)
nsp_inputs = self.gen_nsp_input(token_ids, pred_ids)
# [-1, 2]
probs = self.nsp_predictor(nsp_inputs)
return self.get_results(data_id, token_ids, pred_ids, probs)
def decode(self, inputs, caches):
tgt_ids = inputs['tgt_ids']
tgt_pos = inputs['tgt_pos']
tgt_generation_mask = inputs['tgt_generation_mask']
predictions = tgt_ids
# TODO
step = 0
while step < self.max_dec_len:
# [-1, 1]
append_mask = paddle.cast(
tgt_ids != self.eos_id, dtype=tgt_generation_mask.dtype)
tgt_generation_mask = paddle.concat(
[tgt_generation_mask, paddle.unsqueeze(append_mask, 1)],
axis=-1)
tgt_sent = paddle.ones(
[tgt_generation_mask.shape[0], 1], dtype=tgt_ids.dtype)
# [-1, 1, hidden_size]
out, caches = self.plato2_encoder(caches, tgt_ids, tgt_sent,
tgt_pos, tgt_generation_mask)
out = paddle.squeeze(out, axis=1)
# [-1, hidden_size]
trans = self.logits_fc_layer(out)
trans = self.gelu_layer(trans)
trans = self.logits_layer_norm(trans)
# [-1, vocab_size]
logits = paddle.matmul(
trans,
self.plato2_encoder.word_embedding_layer.weight,
transpose_y=True) + self.logits_bias
logits[:, self.unk_id] = -1e9
logits[:, self.bos_id] = -1e9
logits[:, self.mask_id] = -1e9
if step < self.min_dec_len:
logits[:, self.eos_id] = -1e9
logits = logits * append_mask + (1 - append_mask) * self.after_eos
probs = self.softmax(logits)
# [-1, topk]
topk_probs, _ = paddle.topk(probs, k=self.topk)
mask = paddle.cast(probs >= topk_probs[:, -1:], 'float32')
sums = paddle.sum(topk_probs, axis=-1, keepdim=True)
new_probs = probs * mask / sums
# [-1, 1]
sampling_ids = paddle.multinomial(new_probs)
step = step + 1
tgt_ids = sampling_ids
tgt_pos = tgt_pos + 1
predictions = paddle.concat([predictions, tgt_ids], axis=1)
return predictions
def gen_nsp_input(self, token_ids, pred_ids):
token_ids = token_ids.numpy()
pred_ids = pred_ids.numpy()
def __reader__():
headers = ["src", "tgt", "data_id"]
Example = namedtuple("Example", headers)
for i, (raw, pred) in enumerate(zip(token_ids, pred_ids)):
context = post_process_context(
raw, self.nsp_reader, merge=False)
_, response = post_process_response(
pred, self.nsp_reader, merge=False)
context_tokenized_input = " [SEP] ".join(" ".join(utt)
for utt in context)
response_tokenized_input = " ".join(response)
example = Example(
src=context_tokenized_input,
tgt=response_tokenized_input,
data_id=i)
data = self.nsp_reader._convert_example_to_record(
example, is_infer=True)
yield data
return
generator = self.nsp_reader.data_generator(
reader=__reader__,
is_infer=True,
phase="test", )
inputs = next(generator())
#print('\nnsp_inputs:')
for key in inputs:
inputs[key] = paddle.to_tensor(inputs[key])
if key in ['token_ids', 'type_ids', 'pos_ids']:
inputs[key] = paddle.squeeze(inputs[key], axis=-1)
#print(key, inputs[key].shape)
#print(inputs[key])
return inputs
def get_results(self, data_id, token_ids, pred_ids, probs):
data_id = data_id.numpy()
token_ids = token_ids.numpy()
pred_ids = pred_ids.numpy()
probs = probs.numpy()
infos = []
for raw, pred, prob in zip(token_ids, pred_ids, probs):
tokens = post_process_context(raw, self.nsp_reader)
pred_token_ids, pred_tokens = post_process_response(pred,
self.nsp_reader)
info = {}
info['response'] = ' '.join(pred_tokens)
cross_turn_repetition = get_cross_turn_repetition(
tokens, pred_tokens, self.nsp_reader.eos_id, self.is_cn)
in_turn_repetition = max(
get_in_turn_repetition(pred_tokens, self.is_cn),
get_in_turn_repetition(pred_token_ids))
info['score'] = float(prob[1])
if len(pred_token_ids) >= self.max_dec_len:
info['score'] -= 1e3
elif cross_turn_repetition > 0:
info['score'] -= 1e3
elif in_turn_repetition > 0:
info['score'] -= 1e3
infos.append(info)
results = []
pre_idx = 0
sample = []
for idx, info in zip(data_id, infos):
if idx != pre_idx:
sample = sorted(sample, key=lambda info: -info["score"])
result = sample[0]
result['data_id'] = pre_idx
results.apeend(result)
sample = []
pre_idx = idx
sample.append(info)
if sample:
sample = sorted(sample, key=lambda info: -info["score"])
result = sample[0]
result['data_id'] = pre_idx
results.append(result)
return results
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dialogue Reader."""
import csv
from collections import namedtuple
from contextlib import contextmanager
import gzip
import numpy as np
from utils import pad_batch_data
from utils.args import str2bool
from utils.masking import mask
import utils.tokenization as tokenization
class DialogReader(object):
"""The implement of DialogReader."""
@classmethod
def add_cmdline_args(cls, parser):
"""Add cmdline argurments."""
group = parser.add_argument_group("Reader")
group.add_argument("--max_src_len", type=int, default=128)
group.add_argument("--max_tgt_len", type=int, default=128)
group.add_argument(
"--truncate_first_turn", type=str2bool, default=False)
group.add_argument(
"--file_format",
type=str,
default="file",
choices=["file", "filelist"])
group.add_argument(
"--data_format",
type=str,
default="raw",
choices=["raw", "tokenized", "numerical"])
group.add_argument("--in_tokens", type=str2bool, default=False)
group.add_argument("--batch_size", type=int, default=16)
group.add_argument("--continuous_position", type=str2bool, default=True)
group.add_argument("--random_seed", type=int, default=11)
group.add_argument("--sort_pool_size", type=int, default=2**16)
group = parser.add_argument_group("Tokenizer")
group.add_argument(
"--tokenizer", type=str, default="SentencePieceTokenizer")
args, _ = parser.parse_known_args()
tokenizer_cls = getattr(tokenization, args.tokenizer)
tokenizer_cls.add_cmdline_args(parser)
return group
def __init__(self, args):
tokenizer_cls = getattr(tokenization, args.tokenizer)
self.tokenizer = tokenizer_cls(args)
self.vocab = self.tokenizer.vocab
self.pad_id = args.pad_id = self.vocab["[PAD]"]
self.bos_id = args.bos_id = self.vocab["[CLS]"]
self.eos_id = args.eos_id = self.vocab["[SEP]"]
self.unk_id = args.unk_id = self.vocab["[UNK]"]
self.mask_id = args.mask_id = self.vocab["[MASK]"]
self.vocab_size = args.get("vocab_size", 0)
self.max_src_len = args.max_src_len
self.max_tgt_len = args.max_tgt_len
self.truncate_first_turn = args.truncate_first_turn
self.file_format = args.file_format
self.data_format = args.data_format
self.in_tokens = args.in_tokens
self.batch_size = args.batch_size
self.continuous_position = args.continuous_position
self.sort_pool_size = args.sort_pool_size
# random_seed must be set for data slicing when using multi-gpu
self.global_rng = np.random.RandomState(args.random_seed)
# training progress
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
# model related
self.fields = ["token_ids", "type_ids", "pos_ids"]
self.num_numerical_fields = len(self.fields)
self.fields += ["tgt_start_idx", "data_id"]
self.sort_key = lambda record: [len(record.token_ids)]
self.Record = namedtuple(
"Record", self.fields, defaults=(None, ) * len(self.fields))
self.features = {}
return
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_epoch, self.current_file_index, self.total_file
def _convert_example_to_record(self, example, is_infer):
# process src
src_token_ids = []
src_pos_ids = []
# tokenize src
s_token_ids_list = []
for s in example.src.split("[SEP]"):
s = tokenization.convert_to_unicode(s).strip()
if self.data_format == "tokenized":
s_tokens = s.split(" ")
else:
s_tokens = self.tokenizer.tokenize(s)
s_token_ids = self.tokenizer.convert_tokens_to_ids(
s_tokens) + [self.eos_id]
s_token_ids_list.append(s_token_ids)
# trim src
idx = len(s_token_ids_list) - 1
total_token_num = 1
while idx >= 0:
total_token_num += len(s_token_ids_list[idx])
if total_token_num > self.max_src_len:
if self.truncate_first_turn and idx == 0:
truncated_ids = s_token_ids_list[idx][:self.max_src_len -
total_token_num]
if len(truncated_ids) > 1:
s_token_ids_list[
idx] = truncated_ids[:-1] + [self.eos_id]
idx -= 1
break
idx -= 1
for i, s_token_ids in enumerate(s_token_ids_list[idx + 1:], idx + 1):
src_token_ids += s_token_ids
src_pos_ids += list(range(1, len(s_token_ids) + 1))
src_token_ids = [self.bos_id] + src_token_ids
src_type_ids = [0] * len(src_token_ids)
src_pos_ids = [0] + src_pos_ids
assert len(src_token_ids) == len(src_type_ids) == len(src_pos_ids), \
"not len(src_token_ids) == len(src_type_ids) == len(src_pos_ids)"
token_ids = src_token_ids
type_ids = src_type_ids
pos_ids = src_pos_ids
tgt_start_idx = len(token_ids)
if not is_infer:
# process tgt
# tokenize tgt
tgt = tokenization.convert_to_unicode(example.tgt).strip()
if self.data_format == "tokenized":
tgt_tokens = tgt.split(" ")
else:
tgt_tokens = self.tokenizer.tokenize(tgt)
tgt_token_ids = self.tokenizer.convert_tokens_to_ids(tgt_tokens)
tgt_token_ids.append(self.eos_id)
# trim tgt
if len(tgt_token_ids) > self.max_tgt_len - 1:
tgt_token_ids = tgt_token_ids[:self.max_tgt_len - 1]
tgt_token_ids = [self.bos_id] + tgt_token_ids
tgt_type_ids = [1] * len(tgt_token_ids)
tgt_pos_ids = list(range(1, len(tgt_token_ids) + 1))
assert len(tgt_token_ids) == len(tgt_type_ids) == len(tgt_pos_ids), \
"not len(tgt_token_ids) == len(tgt_type_ids) == len(tgt_pos_ids)"
token_ids += tgt_token_ids
type_ids += tgt_type_ids
pos_ids += tgt_pos_ids
assert len(token_ids) == len(type_ids) == len(pos_ids), \
"not len(token_ids) == len(type_ids) == len(pos_ids)"
if self.continuous_position:
src_pos_ids = list(range(len(src_token_ids)))
if not is_infer:
tgt_pos_ids = list(range(len(tgt_token_ids)))
pos_ids = list(range(len(token_ids)))
field_values = {
"token_ids": src_token_ids,
"type_ids": src_type_ids,
"pos_ids": src_pos_ids
}
field_values["tgt_start_idx"] = tgt_start_idx
field_values["data_id"] = example.data_id
record = self.Record(**field_values)
return record
def _read_tsv(self, fp, phase, is_infer, delimiter="\t", quotechar=None):
"""Reads a tab separated value file."""
csv.field_size_limit(2**20)
reader = csv.reader(fp, delimiter=delimiter, quotechar=quotechar)
headers = next(reader)
headers.append("data_id")
Example = namedtuple("Example", headers)
for i, line in enumerate(reader):
example = Example(*line, data_id=i)
if is_infer or phase.endswith("test"):
self.features[phase][i] = example
record = self._convert_example_to_record(example, is_infer)
yield record
def _read_numerical_file(self, fp, delimiter=";"):
for i, line in enumerate(fp):
cols = tokenization.convert_to_unicode(line).strip().split(
delimiter)
cols = list(map(lambda x: list(map(int, x.split(" "))), cols))
if len(cols) > self.num_numerical_fields:
cols = cols[:self.num_numerical_fields]
tgt_start_idx = cols[0].index(self.bos_id, 1)
record = self.Record(*cols, tgt_start_idx=tgt_start_idx, data_id=i)
yield record
def _read_file(self, input_file, phase, is_infer):
def __wrapper__():
with open_file(input_file) as fp:
if self.data_format == "numerical":
records = self._read_numerical_file(fp)
else:
records = self._read_tsv(fp, phase, is_infer)
for record in records:
yield record
return __wrapper__
def _read_files(self, filelist, phase, is_infer, shuffle_files):
input_files = open(filelist).readlines()
def __wrapper__():
if shuffle_files:
self.global_rng.shuffle(input_files)
if phase == "train":
self.total_file = len(input_files)
for file_index, input_file in enumerate(input_files, 1):
if phase == "train":
self.current_file_index = file_index
self.current_file = input_file
file_reader = self._read_file(input_file.strip(), phase,
is_infer)
for record in file_reader():
yield record
return __wrapper__
def _batch_reader(self,
reader,
phase=None,
is_infer=False,
sort_pool_size=2**16):
"""Construct a batch reader."""
def update_max_lens(max_lens, record):
"""Update max_lens."""
if max_lens is None:
return self.sort_key(record)
else:
return [
max(max_len, l) for max_len, l in zip(max_lens,
self.sort_key(record))
]
def get_batch(reader):
"""Generate batches from reader."""
batch, max_lens = [], None
for record in reader():
if record is None:
yield batch
batch, max_lens = [], None
continue
self.current_example += 1
max_lens = update_max_lens(max_lens, record)
if self.in_tokens:
to_append = (len(batch) + 1
) * sum(max_lens) <= self.batch_size
else:
to_append = len(batch) < self.batch_size
if to_append:
batch.append(record)
else:
yield batch
batch, max_lens = [record], self.sort_key(record)
if len(batch) > 0:
yield batch
def get_sorted_batch(pool):
"""Generate sorted batches from pool."""
pool = sorted(pool, key=self.sort_key)
batches = []
batch, max_lens = [], None
for record in pool:
self.current_example += 1
max_lens = update_max_lens(max_lens, record)
if self.in_tokens:
to_append = (len(batch) + 1
) * sum(max_lens) <= self.batch_size
else:
to_append = len(batch) < self.batch_size
if to_append:
batch.append(record)
else:
batches.append(batch)
batch, max_lens = [record], self.sort_key(record)
if len(batch) > 0:
batches.append(batch)
self.global_rng.shuffle(batches)
for batch in batches:
yield batch
def __wrapper__():
if sort_pool_size > 0:
pool = []
for record in reader():
pool.append(record)
if len(pool) == sort_pool_size:
for batch in get_sorted_batch(pool):
yield batch
pool = []
if len(pool) > 0:
for batch in get_sorted_batch(pool):
yield batch
else:
for batch in get_batch(reader):
yield batch
return __wrapper__
def _distributed_batch_reader(self,
batch_reader,
num_part,
part_id,
is_test=False):
def __wrapper__():
batches = []
for batch in batch_reader():
batches.append(batch)
if len(batches) == num_part:
yield batches[part_id]
batches = []
if is_test and 0 <= part_id < len(batches):
yield batches[part_id]
return
return __wrapper__
def data_generator(self,
input_file=None,
reader=None,
num_epochs=1,
num_part=1,
part_id=0,
phase=None,
is_infer=False):
"""Data generator."""
def __wrapper__():
if is_infer or phase.endswith("test"):
self.features[phase] = {}
nonlocal reader
if reader is None:
if self.file_format == "filelist":
reader = self._read_files(input_file, phase, is_infer,
not phase.endswith("test"))
else:
if phase == "train":
self.total_file = 1
self.current_file_index = 1
self.current_file = input_file
reader = self._read_file(input_file, phase, is_infer)
batch_reader = self._batch_reader(
reader,
phase,
is_infer,
sort_pool_size=self.sort_pool_size if not is_infer else 0)
if phase == "train":
batch_reader = self._distributed_batch_reader(batch_reader,
num_part, part_id)
elif phase.startswith("distributed"):
batch_reader = self._distributed_batch_reader(
batch_reader, num_part, part_id, is_test=True)
for epoch_index in range(num_epochs):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index + 1
for batch in batch_reader():
yield self._pad_batch_records(batch, is_infer)
return __wrapper__
def _gen_self_attn_mask(self,
batch_token_ids,
batch_tgt_start_idx=None,
is_unidirectional=True,
shift_len=0):
max_len = max(map(len, batch_token_ids))
input_mask_data = np.zeros(
(len(batch_token_ids), max_len + shift_len, max_len + shift_len))
if is_unidirectional:
for index, mask_data in enumerate(input_mask_data):
start = 0 if batch_tgt_start_idx is None else batch_tgt_start_idx[
index]
end = len(batch_token_ids[index])
mask_data[:end + shift_len, :start + shift_len] = 1.0
# Generate the lower triangular matrix using the slice of matrix
b = np.tril(np.ones([end - start, end - start]), 0)
mask_data[start + shift_len:end + shift_len, start + shift_len:
end + shift_len] = b
else:
for index, token_ids in enumerate(batch_token_ids):
input_mask_data[index, :len(token_ids) + shift_len, :len(
token_ids) + shift_len] = 1.0
return input_mask_data.astype("float32")
def _pad_batch_records(self, batch_records, is_infer):
"""
Padding batch records and construct model's inputs.
"""
batch_size = len(batch_records)
batch = {}
batch_token_ids = [record.token_ids for record in batch_records]
batch_type_ids = [record.type_ids for record in batch_records]
batch_pos_ids = [record.pos_ids for record in batch_records]
batch["token_ids"] = pad_batch_data(batch_token_ids, pad_id=self.pad_id)
batch["type_ids"] = pad_batch_data(batch_type_ids, pad_id=self.pad_id)
batch["pos_ids"] = pad_batch_data(batch_pos_ids, pad_id=self.pad_id)
batch_tgt_start_idx = [record.tgt_start_idx for record in batch_records]
batch["generation_mask"] = self._gen_self_attn_mask(
batch_token_ids, batch_tgt_start_idx=batch_tgt_start_idx)
if is_infer:
tgt_ids = np.array(
[[[self.bos_id]]] * len(batch_token_ids), dtype="int64")
if self.continuous_position:
tgt_pos = np.array(batch_tgt_start_idx, dtype="int64")
else:
tgt_pos = np.zeros_like(batch_tgt_start_idx, dtype="int64")
tgt_pos = tgt_pos.reshape(-1, 1, 1)
batch["init_score"] = np.zeros_like(
tgt_ids, dtype="float32").reshape(-1, 1).tolist()
batch["tgt_ids"] = tgt_ids.tolist()
batch["tgt_pos"] = tgt_pos.tolist()
batch["tgt_generation_mask"] = batch[
"generation_mask"][:, 0:1, :].astype("float32")
else:
batch["tgt_label"], batch["tgt_pos"] = mask(
batch_tokens=batch_token_ids,
vocab_size=self.vocab_size,
sent_b_starts=batch_tgt_start_idx,
is_unidirectional=True)
batch_data_id = [record.data_id for record in batch_records]
batch["data_id"] = np.array(batch_data_id).astype("int64").reshape(
[-1, 1])
return batch
@contextmanager
def open_file(filename):
"""Open file."""
if filename.endswith(".gz"):
fp = gzip.open(filename, "rt")
else:
fp = open(filename)
yield fp
fp.close()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NSP Reader."""
from collections import namedtuple
import numpy as np
from readers.dialog_reader import DialogReader
from utils import pad_batch_data
from utils.args import str2bool
from utils.masking import mask
class NSPReader(DialogReader):
"""NSP Reader."""
@classmethod
def add_cmdline_args(cls, parser):
"""Add cmdline argurments."""
group = DialogReader.add_cmdline_args(parser)
group.add_argument(
"--attention_style",
type=str,
default="bidirectional",
choices=["bidirectional", "unidirectional"])
group.add_argument(
"--mix_negative_sample", type=str2bool, default=False)
return group
def __init__(self, args):
super(NSPReader, self).__init__(args)
self.fields.append("label")
self.Record = namedtuple(
"Record", self.fields, defaults=(None, ) * len(self.fields))
self.attention_style = args.attention_style
self.mix_negative_sample = args.mix_negative_sample
return
def _convert_example_to_record(self, example, is_infer):
record = super(NSPReader, self)._convert_example_to_record(example,
False)
if "label" in example._fields:
record = record._replace(label=int(example.label))
return record
def _mix_negative_sample(self, reader, neg_pool_size=2**16):
def gen_from_pool(pool):
num_samples = len(pool)
if num_samples == 1:
# only one sample: it is impossible to generate negative sample
yield pool[0]._replace(label=1)
return
self.global_rng.shuffle(pool)
for i in range(num_samples):
pool[i] = pool[i]._replace(label=1)
j = (i + 1) % num_samples
idx_i = pool[i].tgt_start_idx
idx_j = pool[j].tgt_start_idx
field_values = {}
field_values["token_ids"] = pool[i].token_ids[:idx_i] + pool[
j].token_ids[idx_j:]
field_values["type_ids"] = pool[i].type_ids[:idx_i] + pool[
j].type_ids[idx_j:]
field_values["pos_ids"] = list(
range(len(field_values["token_ids"])))
neg_record = self.Record(
**field_values, tgt_start_idx=idx_i, data_id=-1, label=0)
pool.append(neg_record)
assert len(neg_record.token_ids) <= self.max_seq_len
self.global_rng.shuffle(pool)
for record in pool:
yield record
def __wrapper__():
pool = []
for record in reader():
pool.append(record)
if len(pool) == neg_pool_size:
for record in gen_from_pool(pool):
yield record
pool = []
if len(pool) > 0:
for record in gen_from_pool(pool):
yield record
return __wrapper__
def _batch_reader(self,
reader,
phase=None,
is_infer=False,
sort_pool_size=2**16):
if self.mix_negative_sample:
reader = self._mix_negative_sample(reader)
return super(NSPReader, self)._batch_reader(
reader,
phase=phase,
is_infer=is_infer,
sort_pool_size=sort_pool_size)
def _pad_batch_records(self, batch_records, is_infer):
"""
Padding batch records and construct model's inputs.
"""
batch = {}
batch_token_ids = [record.token_ids for record in batch_records]
batch_type_ids = [record.type_ids for record in batch_records]
batch_pos_ids = [record.pos_ids for record in batch_records]
batch_tgt_start_idx = [record.tgt_start_idx for record in batch_records]
batch_label = [record.label for record in batch_records]
if self.attention_style == "unidirectional":
batch["token_ids"] = pad_batch_data(
batch_token_ids, pad_id=self.pad_id)
batch["type_ids"] = pad_batch_data(
batch_type_ids, pad_id=self.pad_id)
batch["pos_ids"] = pad_batch_data(batch_pos_ids, pad_id=self.pad_id)
tgt_label, tgt_pos, label_pos = mask(
batch_tokens=batch_token_ids,
vocab_size=self.vocab_size,
bos_id=self.bos_id,
sent_b_starts=batch_tgt_start_idx,
labels=batch_label,
is_unidirectional=True)
attention_mask = self._gen_self_attn_mask(batch_token_ids,
batch_tgt_start_idx)
else:
batch_mask_token_ids, tgt_label, tgt_pos, label_pos = mask(
batch_tokens=batch_token_ids,
vocab_size=self.vocab_size,
bos_id=self.bos_id,
eos_id=self.eos_id,
mask_id=self.mask_id,
sent_b_starts=batch_tgt_start_idx,
labels=batch_label,
is_unidirectional=False)
if not is_infer:
batch_token_ids = batch_mask_token_ids
batch["token_ids"] = pad_batch_data(
batch_token_ids, pad_id=self.pad_id)
batch["type_ids"] = pad_batch_data(
batch_type_ids, pad_id=self.pad_id)
batch["pos_ids"] = pad_batch_data(batch_pos_ids, pad_id=self.pad_id)
attention_mask = self._gen_self_attn_mask(
batch_token_ids, is_unidirectional=False)
batch["attention_mask"] = attention_mask
batch["label_pos"] = label_pos
if not is_infer:
batch_label = np.array(batch_label).astype("int64").reshape([-1, 1])
batch["label"] = batch_label
batch["tgt_label"] = tgt_label
batch["tgt_pos"] = tgt_pos
batch_data_id = [record.data_id for record in batch_records]
batch["data_id"] = np.array(batch_data_id).astype("int64").reshape(
[-1, 1])
return batch
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plato Reader."""
import numpy as np
from readers.dialog_reader import DialogReader
from utils import pad_batch_data
from utils.masking import mask
class PlatoReader(DialogReader):
"""The implement of PlatoReader"""
def __init__(self, args):
super(PlatoReader, self).__init__(args)
self.latent_type_size = args.latent_type_size
self.use_bow = args.use_bow
def _pad_batch_records(self, batch_records, is_infer):
"""
Padding batch records and construct model's inputs.
"""
batch = {}
batch_token_ids = [record.token_ids for record in batch_records]
batch_type_ids = [record.type_ids for record in batch_records]
batch_pos_ids = [record.pos_ids for record in batch_records]
batch_tgt_start_idx = [record.tgt_start_idx for record in batch_records]
batch_size = len(batch_token_ids)
# padding
batch["token_ids"] = pad_batch_data(batch_token_ids, pad_id=self.pad_id)
batch["type_ids"] = pad_batch_data(batch_type_ids, pad_id=self.pad_id)
batch["pos_ids"] = pad_batch_data(batch_pos_ids, pad_id=self.pad_id)
batch["generation_mask"] = self._gen_self_attn_mask(
batch_token_ids,
batch_tgt_start_idx=batch_tgt_start_idx,
is_unidirectional=True,
shift_len=1)
if not is_infer:
batch["recognition_mask"] = self._gen_self_attn_mask(
batch_token_ids, is_unidirectional=False, shift_len=1)
if is_infer:
tgt_ids = np.array([[[self.bos_id]]] * batch_size, dtype="int64")
if self.continuous_position:
tgt_pos = np.array(batch_tgt_start_idx, dtype="int64")
else:
tgt_pos = np.zeros_like(batch_tgt_start_idx, dtype="int64")
tgt_pos = tgt_pos.reshape(-1, 1, 1)
batch["init_score"] = np.zeros_like(
tgt_ids, dtype="float32").reshape(-1, 1).tolist()
batch["tgt_ids"] = tgt_ids.tolist()
batch["tgt_pos"] = tgt_pos.tolist()
batch["parent_idx"] = np.array(range(batch_size), dtype="int32")
batch["tgt_generation_mask"] = batch[
"generation_mask"][:, 0:1, :].astype("float32")
else:
mask_return_list = mask(
batch_tokens=batch_token_ids,
vocab_size=self.vocab_size,
sent_b_starts=batch_tgt_start_idx,
is_unidirectional=True,
use_latent=True,
use_bow=self.use_bow)
batch["tgt_label"] = mask_return_list[0]
batch["tgt_pos"] = mask_return_list[1]
if self.use_bow:
batch["bow_label"] = mask_return_list[2]
batch["bow_pos"] = mask_return_list[3]
batch_data_id = [record.data_id for record in batch_records]
batch["data_id"] = np.array(batch_data_id).astype("int64").reshape(
[-1, 1])
return batch
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils."""
from itertools import chain
import numpy as np
import paddle
def repeat_array(array, times):
"""Repeate numpy array."""
if isinstance(array, list):
return list(chain(*([array] * times)))
else:
return np.concatenate([array] * times, axis=0)
def gen_inputs(inputs, latent_type_size):
batch_size = len(inputs["data_id"])
new_bsz = batch_size * latent_type_size
inputs = {
name: repeat_array(array, latent_type_size)
for name, array in inputs.items()
}
# Add latent_id
inputs["latent_id"] = np.array(
[i for i in range(latent_type_size) for _ in range(batch_size)],
dtype="int64").reshape([-1, 1])
#print('\nplato_inputs:')
for key in inputs:
inputs[key] = paddle.to_tensor(inputs[key])
if key in [
'token_ids', 'type_ids', 'pos_ids', 'tgt_ids', 'tgt_pos',
'data_id'
]:
inputs[key] = paddle.squeeze(inputs[key], axis=-1)
#print(key, inputs[key].shape, inputs[key].dtype)
return inputs
def pad_batch_data(insts, pad_id=0):
"""Pad the instances to the max sequence length in batch. """
max_len = max(map(len, insts))
inst_data = np.array(
[list(inst) + [pad_id] * (max_len - len(inst)) for inst in insts])
return inst_data.astype("int64").reshape([-1, max_len, 1])
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse argument."""
import argparse
import json
def str2bool(v):
""" Support bool type for argparse. """
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Unsupported value encountered.")
class Args(dict):
""" Arguments class
Store arguments in training / infer / ... scripts.
"""
def __getattr__(self, name):
if name in self.keys():
return self[name]
for v in self.values():
if isinstance(v, Args):
if name in v:
return v[name]
return None
def get(self, key, default_value=None):
"""Get the value of corresponding key."""
if key in self.keys():
return self[key]
for v in self.values():
if isinstance(v, Args):
if key in v:
return v[key]
return default_value
def __setattr__(self, name, value):
self[name] = value
def save(self, filename):
with open(filename, "w") as fp:
json.dump(self, fp, ensure_ascii=False, indent=4, sort_keys=False)
def load(self, filename, group_name=None):
if group_name is not None:
if group_name not in self:
self[group_name] = Args()
self[group_name].load(filename)
return
with open(filename, "r") as fp:
params_dict = json.load(fp)
for k, v in params_dict.items():
if isinstance(v, dict):
self[k].update(Args(v))
else:
self[k] = v
def parse_args(parser: argparse.ArgumentParser, allow_unknown=False) -> Args:
""" Parse hyper-parameters from cmdline. """
if allow_unknown:
parsed, _ = parser.parse_known_args()
else:
parsed = parser.parse_args()
args = Args()
optional_args = parser._action_groups[1]
for action in optional_args._group_actions[1:]:
arg_name = action.dest
args[arg_name] = getattr(parsed, arg_name)
for group in parser._action_groups[2:]:
group_args = Args()
for action in group._group_actions:
arg_name = action.dest
group_args[arg_name] = getattr(parsed, arg_name)
if len(group_args) > 0:
if group.title in args:
args[group.title].update(group_args)
else:
args[group.title] = group_args
return args
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reader utils."""
import numpy as np
def mask(batch_tokens,
vocab_size,
bos_id=1,
eos_id=2,
mask_id=3,
sent_b_starts=None,
labels=None,
is_unidirectional=False,
use_latent=False,
use_bow=False):
"""
Add mask for batch_tokens, return out, mask_label, mask_pos;
Note: mask_pos responding the batch_tokens after padded;
"""
batch_tokens = np.copy(batch_tokens)
max_len = max(map(len, batch_tokens))
mask_label = []
mask_pos = []
if labels is not None:
label_pos = []
if is_unidirectional:
# unidirectional language model
if use_latent:
max_len += 1
shift_len = 1
else:
shift_len = 0
for sent_index, sent in enumerate(batch_tokens):
sent_b_index = sent_b_starts[
sent_index] if sent_b_starts is not None else 0
need_cal = True
if labels is not None:
label_pos.append(sent_index * max_len + len(sent) - 1 +
shift_len)
if labels[sent_index] == 0:
need_cal = False
mask_label.extend(sent[sent_b_index + 1:])
mask_pos.extend([
sent_index * max_len + i + shift_len
for i in range(sent_b_index, len(sent) - 1)
])
mask_label = np.array(mask_label).astype("int64").reshape([-1, 1])
mask_pos = np.array(mask_pos).astype("int64").reshape([-1, 1])
return_list = [mask_label, mask_pos]
# latent related (bow label and pos)
if use_latent and use_bow:
bow_label = []
bow_pos = []
for sent_index, sent in enumerate(batch_tokens):
sent_b_index = sent_b_starts[
sent_index] if sent_b_starts is not None else 0
def __filter__(tok_id):
# TODO: exclude [EOS] from bow loss
return True
bow_pos.extend([
sent_index for i in range(sent_b_index + 1, len(sent))
if __filter__(sent[i])
])
bow_label.extend([
sent[i] for i in range(sent_b_index + 1, len(sent))
if __filter__(sent[i])
])
bow_label = np.array(bow_label).astype("int64").reshape([-1, 1])
bow_pos = np.array(bow_pos).astype("int64").reshape([-1, 1])
return_list += [bow_label, bow_pos]
else:
# bidirectional mask language model
total_token_num = sum(map(len, batch_tokens))
prob_mask = np.random.rand(total_token_num)
# TODO: fix replace_ids, include [UNK]
replace_ids = np.random.randint(
3, high=vocab_size, size=total_token_num)
prob_index = 0
for sent_index, sent in enumerate(batch_tokens):
# add pair label position
if labels is not None:
label_pos.append(sent_index * max_len)
# add mask label and position
for token_index, token in enumerate(sent):
if token == eos_id or token == bos_id:
continue
prob = prob_mask[prob_index + token_index]
if prob > 0.15:
continue
elif 0.03 < prob <= 0.15:
# mask
mask_label.append(sent[token_index])
sent[token_index] = mask_id
mask_pos.append(sent_index * max_len + token_index)
elif 0.015 < prob <= 0.03:
# random replace
mask_label.append(sent[token_index])
sent[token_index] = replace_ids[prob_index + token_index]
mask_pos.append(sent_index * max_len + token_index)
else:
# keep the original token
mask_label.append(sent[token_index])
mask_pos.append(sent_index * max_len + token_index)
prob_index += len(sent)
mask_label = np.array(mask_label).astype("int64").reshape([-1, 1])
mask_pos = np.array(mask_pos).astype("int64").reshape([-1, 1])
return_list = [batch_tokens, mask_label, mask_pos]
if labels is not None:
label_pos = np.array(label_pos).astype("int64").reshape([-1, 1])
assert len(labels) == len(label_pos)
return_list.append(label_pos)
return return_list
# ELECTRA
# ELECTRA with PaddleNLP
TBD
[ELECTRA](https://openreview.net/pdf?id=r1xMH1BtvB)[BERT](https://arxiv.org/abs/1810.04805)的基础上对其预训练过程进行了改进:预训练由两部分模型网络组成,称为Generator和Discriminator,各自包含1个BERT模型。Generator的预训练使用和BERT一样的Masked Language Model(MLM)任务,但Discriminator的预训练使用Replaced Token Detection(RTD)任务(主要改进点)。预训练完成后,使用Discriminator作为精调模型,后续的Fine-tuning不再使用Generator。根据论文中给出的实验结果,在和BERT具有相同的模型参数、预训练计算量一样的情况下,GLUE得分比BERT明显好,small模型为79.9:75.1,Base模型为85.1:82.2,Large模型为89.0:87.2。作者给出的原因是:
1. 相比MLM任务只着眼于输入中15%词的完形填空,ELECTRA的RTD任务着眼于整个输入内容,模型更具有全局观
2. 应用了GAN生成对抗的思想,替换词的时候使用Generator做逼真替换而不是随机替换,加快Discriminator收敛
3. 虽然预训练模型包括Generator和Discriminator,但也不是完全照搬GAN,和常规GAN不一样的地方:
- 输入为真实文本,常规GAN输入为随机噪声
- 生成器的输入输出都为句子,而句子中的字词都是离散的,因此判别器的梯度无法传给生成器,而常规GAN是可以传递的
- 如果生成出原来的词,则为正例,而常规GAN生成的都是负例
本项目是 ELECTRA 在 Paddle 2.0上的开源实现。
## 发布要点
1. 动态图ELECTRA模型,支持 Fine-tuning,在 GLUE 所有任务上进行了验证。
2. 支持 ELECTRA Pre-training。
## NLP 任务的 Fine-tuning
使用../glue/run_glue.py运行,详细可参考../glue/README.md,有两种方式:
1. 使用已有的预训练模型运行 Fine-tuning。
2. 运行 ELECTRA 模型的预训练后,使用预训练模型运行 Fine-tuning(需要很多资源)。
下面的例子基于方式1进行介绍。
### 语句和句对分类任务
以 GLUE/SST-2 任务为例,启动 Fine-tuning 的方式如下(`paddlenlp` 要已经安装或能在 `PYTHONPATH` 中找到):
```shell
export CUDA_VISIBLE_DEVICES=0,1
export TASK_NAME=SST-2
cd ../glue/ && python -u ./run_glue.py \
--model_type electra \
--model_name_or_path electra-small \
--task_name $TASK_NAME \
--max_seq_length 128 \
--batch_size 32 \
--learning_rate 1e-4 \
--num_train_epochs 3 \
--logging_steps 1 \
--save_steps 500 \
--output_dir ./tmp/$TASK_NAME/ \
--n_gpu 1 \
```
其中参数释义如下:
- `model_type` 指示了模型类型,当前支持BERT、ELECTRA模型。
- `model_name_or_path` 指示了使用哪种预训练模型,对应有其预训练模型和预训练时使用的 tokenizer,当前支持electra-small、electra-base、electra-large。若模型相关内容保存在本地,这里也可以提供相应目录地址。
- `task_name` 表示 Fine-tuning 的任务,当前支持CoLA、SST-2、MRPC、STS-B、QQP、MNLI、QNLI、RTE。
- `max_seq_length` 表示最大句子长度,超过该长度将被截断。
- `batch_size` 表示每次迭代**每张卡**上的样本数目。
- `learning_rate` 表示基础学习率大小,将于learning rate scheduler产生的值相乘作为当前学习率。
- `num_train_epochs` 表示训练轮数。
- `logging_steps` 表示日志打印间隔。
- `save_steps` 表示模型保存及评估间隔。
- `output_dir` 表示模型保存路径。
- `n_gpu` 表示使用的 GPU 卡数。若希望使用多卡训练,将其设置为指定数目即可;若为0,则使用CPU。
训练过程将按照 `logging_steps``save_steps` 的设置打印如下日志:
```
global step 6310/6315, epoch: 2, batch: 2099, rank_id: 0, loss: 0.035772, lr: 0.0000000880, speed: 3.1527 step/s
global step 6311/6315, epoch: 2, batch: 2100, rank_id: 0, loss: 0.056789, lr: 0.0000000704, speed: 3.4201 step/s
global step 6312/6315, epoch: 2, batch: 2101, rank_id: 0, loss: 0.096717, lr: 0.0000000528, speed: 3.4694 step/s
global step 6313/6315, epoch: 2, batch: 2102, rank_id: 0, loss: 0.044982, lr: 0.0000000352, speed: 3.4513 step/s
global step 6314/6315, epoch: 2, batch: 2103, rank_id: 0, loss: 0.139579, lr: 0.0000000176, speed: 3.4566 step/s
global step 6315/6315, epoch: 2, batch: 2104, rank_id: 0, loss: 0.046043, lr: 0.0000000000, speed: 3.4590 step/s
eval loss: 0.549763, acc: 0.9151376146788991, eval done total : 1.8206987380981445 s
```
使用electra-small预训练模型进行单卡 Fine-tuning ,在验证集上有如下结果:
| Task | Metric | Result |
|-------|------------------------------|-------------|
| CoLA | Matthews corr | 58.22 |
| SST-2 | acc. | 91.85 |
| MRPC | acc./F1 | 88.24 |
| STS-B | Pearson/Spearman corr | 87.24 |
| QQP | acc./F1 | 88.83 |
| MNLI | matched acc./mismatched acc. | 82.45 |
| QNLI | acc. | 88.61 |
| RTE | acc. | 66.78 |
注:acc.是Accuracy的简称,表中Metric字段名词取自[GLUE论文](https://openreview.net/pdf?id=rJ4km2R5t7)
## 预训练
预训练需要BookCorpus数据,当前BookCorpus数据已不再开源,可以使用其它数据替代,只要是纯文本数据即可。
例如[Gutenberg Dataset](https://web.eecs.umich.edu/~lahiri/gutenberg_dataset.html)
下面例子假设数据在./BookCorpus/,数据文件为纯文本train.data
```shell
export CUDA_VISIBLE_DEVICES=0,1
export DATA_DIR=./BookCorpus/
python -u ./run_pretrain.py \
--model_type electra \
--model_name_or_path electra-small \
--train_batch_size 96 \
--learning_rate 5e-4 \
--weight_decay 1e-2 \
--adam_epsilon 1e-6 \
--warmup_steps 10000 \
--num_train_epochs 4 \
--input_dir $DATA_DIR \
--output_dir ./tmp2/ \
--logging_steps 1 \
--save_steps 20000 \
--max_steps 1000000 \
--n_gpu 2
```
此差异已折叠。
......@@ -3,7 +3,7 @@ from functools import partial
from paddle.io import DistributedBatchSampler, DataLoader
from paddle.static import InputSpec
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
from paddlenlp.transformers import ErnieTokenizer
import numpy as np
import paddle
import paddlenlp
......@@ -18,10 +18,13 @@ def convert_example(example, tokenizer, max_seq_length=128):
return input_ids, segment_ids, label
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
model = BertForSequenceClassification.from_pretrained('bert-base-chinese')
paddle.set_device('gpu')
# Dataset prepare
train_ds = paddlenlp.datasets.ChnSentiCorp.get_datasets(['train'])
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
model = paddlenlp.models.Ernie('ernie-1.0', task='seq-cls', num_classes=2)
train_ds, = paddlenlp.datasets.ChnSentiCorp.get_datasets(['train'])
trans_func = partial(convert_example, tokenizer=tokenizer)
train_ds = train_ds.apply(trans_func)
batchify_fn = lambda samples, fn=Tuple(
......
# Language Model
## RNN-LM (xiaopeng)
## ELMo (moyuan)
\ No newline at end of file
......@@ -87,13 +87,6 @@ class CrossEntropyLossForLm(nn.Layer):
class UpdateModel(paddle.callbacks.Callback):
# This callback reset model hidden states and update learning rate before each epoch begins
def __init__(self, base_lr, lr_decay, epoch_start_decay):
self.base_lr = base_lr
self.lr_decay = lr_decay
self.epoch_start_decay = epoch_start_decay
def on_epoch_begin(self, epoch=None, logs=None):
self.model.network.reset_states()
new_lr = self.base_lr * (self.lr_decay
**max(epoch + 1 - self.epoch_start_decay, 0.0))
self.model._optimizer.set_lr(new_lr)
# PaddleMRC
基于PaddleNLP提供的BERT类预训练模型,在SQuAD和DuReader数据集上完成Finetune
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册