From b42dffe71fc259eb7f3a8c42615e58ddf24b90f0 Mon Sep 17 00:00:00 2001 From: xixiaoyao Date: Tue, 21 Jan 2020 16:31:52 +0800 Subject: [PATCH] add api docs --- api_README.md | 825 +------------------------------- backbone/README.md | 0 backbone/__init__.py | 0 backbone/bert.py | 155 ------ backbone/ernie.py | 175 ------- backbone/utils/__init__.py | 0 backbone/utils/transformer.py | 371 -------------- demo/demo2/run.py | 7 +- paddlepalm/backbone/bert.py | 4 +- paddlepalm/backbone/ernie.py | 4 +- paddlepalm/head/base_head.py | 2 +- paddlepalm/head/cls.py | 4 +- paddlepalm/multihead_trainer.py | 84 ++-- paddlepalm/trainer.py | 400 ++++++++++------ 14 files changed, 320 insertions(+), 1711 deletions(-) delete mode 100644 backbone/README.md delete mode 100644 backbone/__init__.py delete mode 100644 backbone/bert.py delete mode 100644 backbone/ernie.py delete mode 100644 backbone/utils/__init__.py delete mode 100644 backbone/utils/transformer.py diff --git a/api_README.md b/api_README.md index 2bccb9a..ed86548 100644 --- a/api_README.md +++ b/api_README.md @@ -1,9 +1,14 @@ # PaddlePALM -PaddlePALM (Paddle for Multi-task) 是一个强大快速、灵活易用的NLP大规模多任务学习框架。通过PaddlePALM,用户可以轻松完成复杂的多任务学习与参数复用,无缝集成「**单任务训练**」、「**多任务辅助训练**」和「**多目标任务联合训练**」这 *3* 种训练方式和灵活的保存与预测机制,且仅需书写极少量代码即可”一键启动”高性能单机单卡和分布式训练与推理。 +PaddlePALM (Paddle for Multi-task) 是一个灵活通用且易用的NLP大规模预训练与多任务学习框架。通过PaddlePALM,用户可以轻松完成复杂的多任务学习与参数复用,无缝集成「**单任务训练**」、「**多任务辅助训练**」和「**多目标任务联合训练**」这 *3* 种训练方式和灵活的保存与预测机制,且仅需书写极少量代码即可”一键启动”高性能单机单卡和分布式训练与推理。 框架中内置了丰富的[主干网络](#附录b内置主干网络backbone)及其[预训练模型](#预训练模型)(BERT、ERNIE等)、常见的[任务范式](#附录c内置任务范式paradigm)(分类、匹配、机器阅读理解等)和相应的[数据集读取与处理工具](#附录a内置数据集载入与处理工具reader)。同时框架提供了用户自定义接口,若内置工具、主干网络和任务无法满足需求,开发者可以轻松完成相关组件的自定义。各个组件均为零耦合设计,用户仅需完成组件本身的特性开发即可完成与框架的融合。 +PaddlePALM (PArallel Learning from Multi-tasks) is a flexible, general and easy-to-use NLP large-scale pretraining and multi-task learning friendly framework. PALM is a high level framework aiming at **fastly** develop **high-performance** NLP models. With PALM, a typical NLP task can be achieved just in 8 steps. +s + +然后给出一些成功案例和一些公开数据集的各个backbone的实验结果(BERT、ERNIE、RoBERTa)和一些成功的多任务学习示例。 + ## 目录 - [安装](#安装) @@ -125,824 +130,6 @@ The save/load and predict operations of a multi_head_trainer is the same as a tr More implementation details of running multi-task learning with multi_head_trainer can be found [here](). - - - -V -## 框架代码结构 - -```text -. -├── mtl_controller.py # 任务控制器,负责创建和调度各个任务实例来完成多任务学习 -├── task_instance.py # 任务实例类,完成任务实例的配置管理、训练进程管理、保存与载入等 -├── default_settings.py # 默认的环境变量和框架配置 -├── utils # 框架核心工具集 -│ ├── config_helper.py # 配置工具类,完成命令行与json、yaml的联合解析 -│ ├── reader_helper.py # 完成多任务数据集iterators的合并、采样、调度和归一化,连接python生成器与计算图 -│ ├── saver.py # 模型保存与载入 -│ ├── print_helper.py # 日志打印规范化工具 -│ ├── plot_helper.py # 命令行绘图工具 -│ └── textprocess_helper.py # 文本数据处理工具函数 -├── backbone # 框架预置的主干网络 -│ ├── ernie.py # ERNIE模型 -│ ├── bert.py # BERT模型 -│ └── utils # 实现主干网络的一些可复用的工具函数 -├── reader # 框架内置的数据集载入与处理工具 -│ ├── cls.py # 文本分类数据集工具 -│ ├── match.py # 文本匹配数据集工具 -│ ├── mrc.py # 机器阅读理解数据集工具 -│ └── mlm.py # 掩码语言模型(mask language model)数据集生成与处理工具 -└── paradigm # 任务范式 - ├── cls.py # 文本分类 - ├── match.py # 文本匹配 - ├── mrc.py # 机器阅读理解 - └── mlm.py # 掩码语言模型(mask language model) -``` - - - - - -#### 转换 -注意,预训练模型不能直接被框架使用。我们提供了转换脚本可以将其转换成paddlepalm的模型格式。如下,通过运行`script/convert_params.sh`可将预训练模型bert转换成框架的模型格式。 - -```shell -bash script/convert_params.sh pretrain_model/bert/params -``` - -注意,以下恢复操作在执行后述DEMO流程中**无需执行**。 -若用户需将转换成的paddlepalm模型恢复为原始的预训练模型,可以运行`script/recover_params.sh`进行恢复。 - -```shell -bash script/recover_params.sh pretrain_model/bert/params -``` - - -## 三个DEMO入门PaddlePALM - -### DEMO1:单任务训练 - -框架支持对任何一个内置任务进行传统的单任务训练。接下来我们启动一个复杂的机器阅读理解任务的训练,我们在`data/mrqa`文件夹中提供了[EMNLP2019 MRQA机器阅读理解评测](https://mrqa.github.io/shared)的部分比赛数据。下面我们利用该数据尝试完成一个基于BERT的机器阅读理解任务MRQA的单任务学习。 - -用户可通过运行如下脚本一键开始本节任务的训练 - -```shell -bash run_demo1.sh -``` - -下面以该任务为例,讲解如何基于paddlepalm框架轻松实现该任务。 - -**1. 配置任务实例** - -首先,我们编写该任务实例的配置文件`mrqa.yaml`,若该任务实例参与训练或预测,则框架将自动解析该配置文件并创建相应的任务实例。配置文件需符合yaml格式的要求。一个任务实例的配置文件最少应包含`train_file`,`reader`和`paradigm`这三个字段,分别代表训练集的文件路径`train_file`、使用的数据集载入与处理工具`reader`、任务范式`paradigm`。 - -```yaml -train_file: data/mrqa/train.json -reader: mrc -paradigm: mrc -``` - -*注:框架内置的其他数据集载入与处理工具见[这里](#附录a内置数据集载入与处理工具reader),任务范式列表见[这里](#附录c内置任务范式paradigm)* - -此外,我们还需要配置reader的预处理规则,各个预置reader支持的预处理配置和规则请参考[这里](#附录a内置数据集载入与处理工具reader)。预处理规则同样直接写入`mrqa.yaml`中。 - -```yaml -max_seq_len: 512 -max_query_len: 64 -doc_stride: 128 # 在MRQA数据集中,存在较长的文档,因此我们这里使用滑动窗口处理样本,滑动步长设置为128 -do_lower_case: True -vocab_path: "pretrain_model/bert/vocab.txt" -``` - -更详细的任务实例配置方法(为任务实例选择合适的reader、paradigm和backbone)可参考[这里](#readerbackbone与paradigm的选择) - -**2.配置backbone和训练规则** - -然后我们编写全局配置文件`config_demo1.yaml`。在这里可以完成对主干网络(backbone)、多任务学习规则以及[广播到任务实例](#配置广播机制)的配置。同样使用yaml格式描述,例如在这里我们可以配置一下需要学习的任务`task_instance`、模型的保存路径`save_path`、基于的主干网络`backbone`、优化器`optimizer`等。 - -```yaml -task_instance: "mrqa" - -save_path: "output_model/firstrun" - -backbone: "bert" -backbone_config_path: "pretrain_model/bert/bert_config.json" - -optimizer: "adam" -learning_rate: 3e-5 -batch_size: 4 - -num_epochs: 2 -warmup_proportion: 0.1 -``` - -这里的task_instance即填写我们刚刚编写的任务实例配置文件的文件名`mrqa`**(注意不要包括.yaml后缀!)**。框架启动多任务学习后会根据`task_instance`中指定的任务实例来寻找相关配置文件,并创建任务实例。 - -此外,backbone的相关配置除了可以直接写入全局配置文件以外,还可以在额外的一个json文件中进行描述,并在全局配置文件中通过`backbone_config_path`进行该配置文件路径的指定。 - -*注:框架支持的其他内置全局参数见[这里](#附录d可配置的全局参数列表)* - -**3.开始训练** - -下面我们开始尝试启动MRQA任务的训练(该代码位于`demo1.py`中)。如[框架原理](#框架原理)所述,框架的核心组件是`Controller`,负责多任务学习的启动。 - -```python -# Demo 1: single task training of MRQA -import paddlepalm as palm - -if __name__ == '__main__': - controller = palm.Controller('config_demo1.yaml', task_dir='demo1_tasks') - controller.load_pretrain('pretrain_model/bert/params') - controller.train() -``` - -训练日志如下,可以看到loss值随着训练收敛。在训练结束后,`Controller`自动为mrqa任务保存预测模型。 - -``` -Global step: 10. Task: mrqa, step 10/135 (epoch 0), loss: 5.928, speed: 0.67 steps/s -Global step: 20. Task: mrqa, step 20/135 (epoch 0), loss: 4.594, speed: 0.75 steps/s -Global step: 30. Task: mrqa, step 30/135 (epoch 0), loss: 1.663, speed: 0.75 steps/s -... -Global step: 250. Task: mrqa, step 115/135 (epoch 1), loss: 1.391, speed: 0.75 steps/s -Global step: 260. Task: mrqa, step 125/135 (epoch 1), loss: 1.871, speed: 0.75 steps/s -Global step: 270. Task: mrqa, step 135/135 (epoch 1), loss: 1.544, speed: 0.75 steps/s -mrqa: train finished! -mrqa: inference model saved at output_model/firstrun/mrqa/infer_model -``` - -### DEMO2:多任务辅助训练与目标任务预测 - -本节我们考虑更加复杂的学习目标,我们引入一个掩码语言模型(Mask Language Model,MLM)问答匹配(QA Match)任务来辅助上一节MRQA任务的训练,相关训练数据分别位于`data/mlm4mrqa`和`data/match4mrqa`。并且我们这里换用ERNIE模型作为主干网络,来获得更佳的效果。在多任务训练结束后,我们使用训练好的模型来对MRQA任务的测试集进行预测。 - -用户可通过运行如下脚本直接开始本节任务的训练 - -```shell -bash run_demo2.sh -``` - -下面以该任务为例,讲解如何基于paddlepalm框架轻松实现这个复杂的多任务学习。 - -**1. 配置任务实例** - -首先,我们像上一节一样为MLM任务和Matching任务分别创建任务实例的配置文件`mlm4mrqa.yaml`和`match4mrqa.yaml`: - -```yaml ------ mlm4mrqa.yaml ----- -train_file: "data/mlm4mrqa/train.tsv" -reader: mlm -paradigm: mlm - ------ match4mrqa.yaml ----- -train_file: "data/match/train.tsv" -reader: match -paradigm: match -``` - -由于我们在训练结束后要对MRQA任务的测试集进行预测,因此我们要在之前写好的`mrqa.yaml`中追加预测相关的配置 -```yaml -pred_file: data/mrqa/dev.json -pred_output_path: 'mrqa_output' -max_answer_len: 30 -n_best_size: 20 -``` - -**2.配置全局参数** - -由于MRQA、MLM和Matching任务有相同的字典、大小写配置、截断长度等,因此我们可以将这些各个任务中相同的参数写入到全局配置文件`mtl_config.yaml`中,**框架会自动将该文件中的配置广播(broadcast)到各个任务实例。** - -```yaml -task_instance: "mrqa, mlm4mrqa, match4mrqa" -target_tag: 1,0,0 - -save_path: "output_model/secondrun" - -backbone: "ernie" -backbone_config_path: "pretrain_model/ernie/ernie_config.json" - -vocab_path: "pretrain_model/ernie/vocab.txt" -do_lower_case: True -max_seq_len: 512 # 写入全局配置文件的参数会被自动广播到各个任务实例 - -batch_size: 4 -num_epochs: 2 -optimizer: "adam" -learning_rate: 3e-5 -warmup_proportion: 0.1 -weight_decay: 0.1 -``` - -这里我们可以使用`target_tag`来标记目标任务和辅助任务,各个任务的tag使用逗号`,`隔开。target_tag与task_instance中的元素一一对应,当某任务的tag设置为1时,表示对应的任务被设置为目标任务;设置为0时,表示对应的任务被设置为辅助任务,默认情况下所以任务均被设置为目标任务(即默认`target_tag`为全1)。 - -辅助任务不会保存预测模型,且不会影响训练的终止,仅仅起到“陪同训练”的作用以期提高模型的泛化能力。当所有的目标任务达到预期的训练步数后多任务学习终止,框架自动为每个目标任务保存预测模型(inference model)到设置的`save_path`位置。 - -同时需要注意的是,这里`num_epochs`指代目标任务`mrqa`的训练epoch数量(训练集遍历次数)。 - -在训练过程中,默认每个训练step会从各个任务等概率采样,来决定当前step训练哪个任务。但包括辅助任务在内,各个任务的采样概率是可以被控制的。若用户希望改变采样比率,可以通过`mix_ratio`字段来进行设置,例如 - -```yaml -mix_ratio: 1.0, 0.5, 0.5 -``` - -若将如上设置加入到全局配置文件中,则辅助任务`mlm4mrqa`和`match4mrqa`的采样概率/预估的训练步数仅为`mrqa`任务的一半。关于采样概率的更多介绍请参考进阶篇。 - - - -**3.开始多任务训练** - -```python -import paddlepalm as palm - -if __name__ == '__main__': - controller = palm.Controller('config_demo2.yaml', task_dir='demo2_tasks') - controller.load_pretrain('pretrain_model/ernie/params') - controller.train() - -``` - -训练日志如下,在训练过程中可以看到每个任务的loss下降 -``` -Global step: 10. Task: mrqa, step 4/135 (epoch 0), loss: 6.235, speed: 0.75 steps/s -Global step: 20. Task: mrqa, step 8/135 (epoch 0), loss: 5.652, speed: 0.75 steps/s -Global step: 30. Task: mrqa, step 13/135 (epoch 0), loss: 6.031, speed: 0.75 steps/s -Global step: 40. Task: match4mrqa, step 13/25 (epoch 0), loss: 0.758, speed: 2.52 steps/s -Global step: 50. Task: mlm4mrqa, step 14/30 (epoch 0), loss: 7.322, speed: 3.24 steps/s -... -Global step: 547. Task: match4mrqa, step 13/25 (epoch 5), loss: 0.400, speed: 2.23 steps/s -Global step: 548. Task: match4mrqa, step 14/25 (epoch 5), loss: 0.121, speed: 3.03 steps/s -Global step: 549. Task: mrqa, step 134/135 (epoch 1), loss: 0.824, speed: 0.75 steps/s -Global step: 550. Task: mlm4mrqa, step 22/30 (epoch 4), loss: 6.903, speed: 3.59 steps/s -Global step: 551. Task: mrqa, step 135/135 (epoch 1), loss: 3.408, speed: 0.75 steps/s - -mrqa: train finished! -mrqa: inference model saved at output_model/secondrun/mrqa/infer_model -``` - -**4.预测** - -在得到目标任务的预测模型(inference_model)后,我们可以加载预测模型对该任务的测试集进行预测。在多任务训练阶段,在全局配置文件的`save_path`指定的路径下会为每个目标任务创建同名子目录,子目录中都有预测模型文件夹`infermodel`。我们可以将该路径传给框架的`controller`来完成对该目标任务的预测。 - -例如,我们在上一节得到了mrqa任务的预测模型。首先创建一个新的*Controller*,**并且创建时要将`for_train`标志位置为*False***。而后调用*pred*接口,将要预测的任务实例名字和预测模型的路径传入,即可完成相关预测。预测的结果默认保存在任务实例配置文件的`pred_output_path`指定的路径中。代码段如下: - -```python - controller = palm.Controller(config='config_demo2.yaml', task_dir='demo2_tasks', for_train=False) - controller.pred('mrqa', inference_model_dir='output_model/secondrun/mrqa/infermodel') -``` - -我们可以在刚刚yaml文件中设置的`mrqa_output/`文件夹下的`predictions.json`文件中看到类似如下的预测结果 - -```json -{ - "3f02f171c82e49828580007a71eefc31": "Ethan Allen", - "98d0b8ce19d1434abdb42aa01e83db61": "McDonald's", - "f0bc45a4dd7a4d8abf91a5e4fb25fe57": "Jesse James", - ... -} -``` - -其中的每一行是测试集中的一个question对应的预测答案(其中的key为question的id,详情见mrc reader的说明文档)。 - -### DEMO3:多目标任务联合训练与任务层参数复用 - -本节我们考虑一个更加复杂的大规模多任务学习场景。假如手头有若干任务,其中每个任务都可能将来被用于预测(即均为目标任务),且鉴于这若干个任务之间存在一些相关性,我们希望将其中一部分任务的任务层参数也进行复用。分类数据集位于`data/cls4mrqa`内。 - -具体来说,例如我们有6个分类任务(CLS1 ~ CLS6),均为目标任务(每个任务的模型都希望未来拿来做预测和部署),且我们希望任务1,2,5的任务输出层共享同一份参数,任务3、4共享同一份参数,任务6自己一份参数,即希望对6个任务实现如图所示的参数复用关系。 - -![image2](https://tva1.sinaimg.cn/large/006y8mN6ly1g8issdoli5j31ow08ogxv.jpg) - -如图,在同一个方框内的任务共享相同的任务层参数。 - -用户可通过运行如下脚本一键开始学习本节任务目标: - -```shell -bash run_demo3.sh -``` - -**1. 配置任务实例** - -为了演示方便,我们使用同一份数据集来创建6个分类的任务实例,分别命名为`cls1.yaml`, `cls2.yaml`, `cls3.yaml`, `cls4.yaml`, `cls5.yaml`, `cls6.yaml`。每个实例的配置文件中填入如下必要字段 - -```yaml -train_file: "data/cls4mrqa/train.tsv" -reader: cls -paradigm: cls - -n_classes: 4 -``` - -**2.配置全局参数** - -在paddlepalm中可以轻松完成上述的复杂复用关系的定义,我们使用`task_reuse_tag`来描述任务层的参数复用关系,与`target_tag`一样,`task_reuse_tag`中的元素与`task_instance`一一对应,元素取值相同的任务会自动共享任务层参数,取值不同的任务不复用任务层参数。因此可以在全局配置文件中如下描述 - -```yaml -task_instance: "cls1, cls2, cls3, cls4, cls5, cls6" -task_reuse_tag: 0, 0, 1, 1, 0, 2 -``` - -同时,这6个任务均为目标任务,因此我们不需要手动设置`target_tag`了(任务默认即为目标任务)。不过,**设置多个目标的情况下,依然可以添加辅助任务陪同这些目标任务进行训练**,这时候就需要引入`target_tag`来区分目标任务和辅助任务了。而后,我们在全局配置文件中写入其他必要的参数(backbone、优化器等)。 - -```yaml -save_path: "output_model/secondrun" - -backbone: "ernie" -backbone_config_path: "pretrain_model/ernie/ernie_config.json" - -vocab_path: "pretrain_model/ernie/vocab.txt" -do_lower_case: True -max_seq_len: 512 # 写入全局配置文件的参数会被自动广播到各个任务实例 - -batch_size: 4 -num_epochs: 2 -optimizer: "adam" -learning_rate: 3e-5 -warmup_proportion: 0.1 -weight_decay: 0.1 -``` -**3.开始多目标任务训练** - -最后,我们像DEMO1和DEMO2一样创建`Controller`,实例化各个任务实例、载入预训练模型并启动多任务训练: - -```yaml -import paddlepalm as palm - -if __name__ == '__main__': - controller = palm.Controller('config_demo3.yaml', task_dir='demo3_tasks') - controller.load_pretrain('pretrain_model/ernie/params') - controller.train() - ``` - -可以看到如下日志输出。 - -``` -Global step: 1. Task: cls4, step 1/15 (epoch 0), loss: 1.344, speed: 0.50 steps/s -Global step: 10. Task: cls4, step 5/15 (epoch 0), loss: 1.398, speed: 2.19 steps/s -Global step: 20. Task: cls2, step 5/15 (epoch 0), loss: 1.260, speed: 2.64 steps/s -cls4: train finished! -cls4: inference model saved at output_model/thirdrun/infer_model -cls5: train finished! -cls5: inference model saved at output_model/thirdrun/infer_model -Global step: 30. Task: cls2, step 7/15 (epoch 0), loss: 0.961, speed: 0.04 steps/s -cls2: train finished! -cls2: inference model saved at output_model/thirdrun/infer_model -Global step: 40. Task: cls6, step 4/15 (epoch 0), loss: 1.412, speed: 2.74 steps/s -Global step: 50. Task: cls2, step 12/15 (epoch 0), loss: 1.011, speed: 2.19 steps/s -cls6: train finished! -cls6: inference model saved at output_model/thirdrun/infer_model -cls1: train finished! -cls1: inference model saved at output_model/thirdrun/infer_model -Global step: 60. Task: cls3, step 7/15 (epoch 0), loss: 1.363, speed: 2.72 steps/s -cls3: train finished! -cls3: inference model saved at output_model/thirdrun/infer_model -``` - -对本DEMO更深入的理解可以参考[多目标任务下的训练终止条件与预期训练步数](#多目标任务下的训练终止条件与预期训练步数)。 - -## 进阶篇 -本章节更深入的对paddlepalm的使用方法展开介绍,并提供一些提高使用效率的小技巧。 - -### 配置广播机制 - -![PALM原理图](https://tva1.sinaimg.cn/large/006y8mN6ly1g8j1isf3fcj31ne0tyqbd.jpg) - -要完成多任务学习,我们需要对主干网络、各个任务以及训练方式进行必要的配置,为此,框架实现了一套高效的配置广播机制。如上图,通过yaml语言可以描述主干网络和各个任务实例的相关配置,并存储于文件中。由于任务实例可能有多个,且部分超参数会同时被主干网络和任务实例用到,因此对于这些需要“重复配置”却取值相同的超参数,可以写入全局配置文件中,框架在解析全局配置文件时会自动将其“广播”给主干网络和各个任务实例。 - -此外,全局配置文件的优先级要高于主干网络和任务实例的配置文件,因此当某个超参数在全局配置文件的取值与其在其余位置的取值冲突时,框架以全局配置文件中的取值为准。 - -同时,为了方便进行大规模实验和超参数调优,凡是在**全局配置文件**中出现的超参数,均可以通过命令行进行控制,例如,对于如下全局配置文件 - -```yaml -... -learning_rate: 1e-3 -batch_size: 32 -... -``` - -我们可能希望通过命令行临时调整学习率`learning_rate`和批大小`batch_size`,因此我们在运行训练脚本时可以通过如下方式对其进行改变。 - -```shell -python demo3.py --learning_rate 1e-4 --batch_size 64 -``` - -因此,各种配置方式的优先级如下 - -**命令行 > 全局配置文件 > 任务实例配置文件&主干网络配置文件** - -### reader、backbone与paradigm的选择 - -reader、backbone和paradigm是实现各类任务的三大基础组件,其中reader为数据集载入与处理工具,将一定格式的输入数据集自动转换成确定的输出元素字典(如单词id序列,位置id序列等);backbone为主干网络,将来自reader的一部分输出转换为高阶抽象的输出元素字典(如词向量、句向量、编码器输出的上下文相关词向量等);paradigm为任务范式,将来自reader的一部分输出和backbone输出的对原始输入的高阶抽象转换为训练所需要的loss以及预测所需要的输出等。 - -框架对这三部分组件的实现基于一种解耦合的设计,每个组件都会包括对输入对象的描述inputs_attr(s)和对输出对象的描述outputs_attr,每个输入或输出对象都会包含名字(描述含义)、形状(tensor shape)和数值类型(data type)。例如,主干网络BERT的输入输出对象的声明如下 - -```python - @property - def inputs_attr(self): - return {"token_ids": [[None, None], 'int64'], - "position_ids": [[None, None], 'int64'], - "segment_ids": [[None, None], 'int64'], - "input_mask": [[None, None], 'float32']} - - @property - def outputs_attr(self): - return {"word_embedding": [[None, None, self._emb_size], 'float32'], - "embedding_table": [[None, self._voc_size, self._emb_size], 'float32'], - "encoder_outputs": [[None, None, self._emb_size], 'float32'], - "sentence_embedding": [[None, self._emb_size], 'float32'], - "sentence_pair_embedding": [[None, self._emb_size], 'float32']} -``` - -其中`inputs_attr`描述了BERT的输入对象,包含`token_ids`, `position_ids`, `segment_ids`和`input_mask`,并且附带了它们的形状(None表示Tensor在该维度的大小可变)和数据类型。`outputs_attr`则描述了BERT模块能提供的输出对象,包含`word_embedding`, `embedding_table`, `encoder_outputs`等。 - -当用户创建任务实例时,只需要保证每个组件的输入对象是包含在上游组件的输出内的,那么这些组件就可以搭配在一起使用。其中,backbone的上游组件是reader,paradigm的上游组件同时包含reader和backbone。 - - -### 多目标任务下的训练终止条件与预期训练步数 - -#### 多个目标任务 -框架支持设定多个目标任务,当全局配置文件的`task_instance`字段指定超过一个任务实例时,**这多个任务实例默认均为目标任务(即`target_tag`字段被自动填充为全1)**。对于被设置成目标任务的任务实例,框架会为其计算预期的训练步数并在达到预期训练步数后为其保存预测模型。 - -当框架存在多个目标任务时,全局配置文件中的`num_epochs`(训练集遍历次数)仅会作用于第一个出现的目标任务,称为主任务(main task)。框架会根据主任务的训练步数来推理其他目标任务的预期训练步数(可通过`mix_ratio`控制,详情见下一节)。**注意,除了用来标记`num_epochs`的作用对象外,主任务与其他目标任务没有任何不同。** - -*注意:在多目标任务训练时,依然可以使用辅助任务来提升所有目标任务的测试集表现,但是要注意使用target_tag为引入的辅助任务打上辅助标记「0」* - - -#### 训练终止条件 -在训练开始前,`Controller`会为所有每个目标任务计算出预期的训练步数。当某个目标任务的完成预期的训练步数后,`Controller`保存该任务的预测模型,而后继续按照设定的各任务的采样概率进行多任务训练。当所有目标任务均达到预期的训练步数后,多任务学习终止。需要注意的是,`Controller`不会为辅助任务计算预期训练步数,也不会为其保存预测模型,其仅仅起到“陪同目标任务训练”的作用,不会影响到多任务学习的终止与否。 - -#### 任务采样概率与预期训练步数 -此外,在默认情况下,每个训练step的各个任务被采样到的概率均等,若用户希望更改其中某些任务的采样概率(比如某些任务的训练集较小,希望减少对其采样的次数;或某些任务较难,希望被更多的训练),可以在全局配置文件中通过`mix_ratio`字段控制各个任务的采样概率。例如,我们有三个任务,其中mrqa任务为目标任务,其余为辅助任务,我们对其`mix_ratio`进行如下设定: - -```yaml -task_instance: mrqa, match4mrqa, mlm4mrqa -mix_ratio: 1.0, 0.5, 0.5 -``` - -上述设置表示`match4mrqa`和`mlm4mrqa`任务的期望被采样次数均为`mrqa`任务的一半。此时,在mrqa任务被设置为目标任务的情况下,若mrqa任务训练一个epoch要经历5000 steps,且全局配置文件中设置了num_epochs为2,则根据上述`mix_ratio`的设置,mrqa任务将被训练5000\*2\*1.0=10000个steps,而`match4mrqa`任务和`mlm4mrqa`任务都会被训练5000个steps**左右**。 - -> 注意:若match4mrqa, mlm4mrqa被设置为辅助任务,则实际训练步数可能略多或略少于5000个steps。对于目标任务,则是精确的5000 steps。 - -#### 多个目标任务时预期训练步数的计算 - -当存在多个目标任务时,`num_epochs`仅作用于**第一个设定的目标任务(称为“主任务(main task)”)**,而后根据`mix_ratio`的设定为其余目标任务和辅助任务计算出预期的训练步数。 - -### 模型保存与预测机制 - -`Controller`可以在训练过程中保存两类模型,一类称为检查点模型(checkpoint),一类为预测模型(inference model)。 - -检查点模型会描述当前训练时刻的网络全局状态,包括backbone、所有任务以及优化器的全局参数,局部参数,长期变量等,即完整的多任务学习计算图。检查点模型用于训练意外终止时的断点恢复,或分阶段的对相同的模型进行连续训练。对于检查点模型,`Controller`默认不进行保存,但是用户可以通过在全局配置文件中添加`save_every_n_steps`来控制检查点模型的保存频率,例如设置为5000,则表示每5000个全局训练steps就会保存一次检查点模型。检查点模型放置在全局配置文件中设置的`save_path`指定的路径下。 - -预测模型则描述的是某个任务的完整预测模型,该模型内不会包含其他任务的参数,也不会保存优化器、dropout层等推理阶段不需要的节点。在保存预测模型时,`Controller`会同时保存预测相关的必要配置,如预测模型的输入输出列表,在进行预测时,可以调用实例化后的`Controller`的预测接口`pred`直接对相关任务进行预测。关于预测的用法示例可以参加DEMO2。 - -### 分布式训练 - -框架将单机单卡训练与单机多卡训练进行了无缝集成。当环境内有多张可用的GPU显卡时,框架会自动将模型复制到多张卡上,并且对于每个step,每张卡都会计算`batch_size`个训练样本,框架会自动对多卡的梯度进行合并。例如,环境中存在8张显卡,且`batch_size`设置为32时,这时每个step的实际batch size为32\*8=256。 - -当用户在多卡环境下希望仅用一张卡进行训练时,可以通过改变环境变量[CUDA_VISIBLE_DEVICES](https://devblogs.nvidia.com/cuda-pro-tip-control-gpu-visibility-cuda_visible_devices/)来进行控制。 - - - -## 附录A:内置数据集载入与处理工具(reader) - -所有的内置reader均同时支持中英文输入数据,**默认读取的数据为英文数据**,希望读入中文数据时,需在配置文件中设置 - -```yaml -for_cn: True -``` - -所有的内置reader,均支持以下字段 - -```yaml -vocab_path(REQUIRED): str类型。字典文件路径。 -max_seq_len(REQUIRED): int类型。切词后的序列最大长度(即token ids的最大长度)。注意经过分词后,token ids的数量往往多于原始的单词数(e.g., 使用wordpiece tokenizer时)。 -batch_size(REQUIRED): int类型。训练或预测时的批大小(每个step喂入神经网络的样本数)。 -train_file(REQUIRED): str类型。训练集文件所在路径。仅进行预测时,该字段可不设置。 -pred_file(REQUIRED): str类型。测试集文件所在路径。仅进行训练时,该字段可不设置。 - -do_lower_case(OPTIONAL): bool类型,默认为False。是否将大写英文字母转换成小写。 -shuffle(OPTIONAL): bool类型,默认为True。训练阶段打乱数据集样本的标志位,当置为True时,对数据集的样本进行全局打乱。注意,该标志位的设置不会影响预测阶段(预测阶段不会shuffle数据集)。 -seed(OPTIONAL): int类型,默认为。 -pred_batch_size(OPTIONAL): int类型。预测阶段的批大小,当该参数未设置时,预测阶段的批大小取决于`batch_size`字段的值。 -print_first_n(OPTIONAL): int类型。打印数据集的前n条样本和对应的reader输出,默认为0。 -``` - -#### 文本分类数据集reader工具:cls - -该reader完成文本分类数据集的载入与处理,reader接受[tsv格式](https://en.wikipedia.org/wiki/Tab-separated_values)的数据集输入,数据集应该包含两列,一列为样本标签`label`,一列为原始文本`text_a`。数据集范例可参考`data/cls4mrqa`中的数据集文件,格式形如 - -``` -label text_a -1 when was the last time the san antonio spurs missed the playoffshave only missed the playoffs four times since entering the NBA -0 the creation of the federal reserve system was an attempt toReserve System ( also known as the Federal Reserve or simply the Fed ) is the central banking system of the United States of America . -2 group f / 64 was a major backlash against the earlier photographic movement off / 64 was formed , Edward Weston went to a meeting of the John Reed Club , which was founded to support Marxist artists and writers . -0 Bessarabia eventually became under the control of which country? -``` -***注意:数据集的第一列必须为header,即标注每一列的列名*** - -该reader额外包含以下配置字段 - -```yaml -n_classes(REQUIRED): int类型。分类任务的类别数。 -``` - -reader的输出(生成器每次yield出的数据)包含以下字段 - -```yaml -token_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的单词id。 -position_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的位置id。 -segment_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持BERT、ERNIE等模型的输入。 -input_mask: 一个shape为[batch_size, seq_len]的矩阵,其中的每个元素为0或1,表示该位置是否是padding词(为1时代表是真实词,为0时代表是填充词)。 -label_ids: 一个shape为[batch_size]的矩阵,其中的每个元素为该样本的类别标签。 -task_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持ERNIE模型的输入。 -``` - -当处于预测阶段时,reader所yield出的数据不会包含`label_ids`字段。 - - -#### 文本匹配数据集reader工具:match - -该reader完成文本匹配数据集的载入与处理,reader接受[tsv格式](https://en.wikipedia.org/wiki/Tab-separated_values)的数据集输入,数据集应该包含三列,一列为样本标签`label`,其余两列分别为待匹配的文本`text_a`和文本`text_b`。数据集范例可参考`data/match4mrqa`中的数据集文件,格式形如 - -```yaml -label text_a text_b -1 From what work of Durkheim's was interaction ritual theory derived? **[TAB]** Subsequent to these developments, Randall Collins (2004) formulated his interaction ritual theory by drawing on Durkheim's work on totemic rituals that was extended by Goffman (1964/2013; 1967) into everyday focused encounters. Based on interaction ritual theory, we experience different levels -0 where is port au prince located in haiti **[TAB]** Its population is difficult to ascertain due to the rapid growth of slums in the hillsides -0 What is the world’s first-ever pilsner type blond lager, the company also awarded the Master Homebrewer Competition held in San Francisco to an award-winning brewer who won the prestigious American Homebrewers Associations' Homebrewer of the Year award in 2013? **[TAB]** of the Year award in 2013, becoming the first woman in thirty years, and the first African American person ever to ever win the award. -1 What has Pakistan told phone companies? **[TAB]** Islamabad, Pakistan (CNN) -- Under heavy criticism for a telling cell phone carriers to ban certain words in text messages, the Pakistan Telecommunication Authority went into damage control mode Wednesday. -``` - -***注意:数据集的第一列必须为header,即标注每一列的列名*** - -reader的输出(生成器每次yield出的数据)包含以下字段: - -```yaml -token_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本(文本对),其中的每个元素为文本对中的每个token对应的单词id,文本对使用`[SEP]`所对应的id隔开。 -position_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的位置id。 -segment_ids: 一个shape为[batch_size, seq_len]的矩阵,在文本1的token位置,元素取值为0;在文本2的token位置,元素取值为1。用于支持BERT、ERNIE等模型的输入。 -input_mask: 一个shape为[batch_size, seq_len]的矩阵,其中的每个元素为0或1,表示该位置是否是padding词(为1时代表是真实词,为0时代表是填充词)。 -label_ids: 一个shape为[batch_size]的矩阵,其中的每个元素为该样本的类别标签,为0时表示两段文本不匹配,为1时代表构成匹配。 -task_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持ERNIE模型的输入。 -``` - -当处于预测阶段时,reader所yield出的数据不会包含`label_ids`字段。 - - -#### 机器阅读理解数据集reader工具:mrc - -该reader支持基于滑动窗口的机器阅读理解数据集载入,可以自动将较长的context按照步长切分成若干子文档,每个子文档与question分别计算答案片段,并在最终阶段合并。该reader接受[json格式]()的数据集。数据集范例可参考`data/mrqa`中的数据集文件,格式如下。 - -```json -{ - "version": "1.0", - "data": [ - {"title": "...", - "paragraphs": [ - {"context": "...", - "qas": [ - {"question": "..." - "id": "..." - "answers": [ - {"text": "...", - "answer_start": ...} - {...} - ... - ] - } - {...} - ... - {...}, - ... - ] - } - {...} - ... - ] - } - ``` - -数据集的最外层数据结构为字典,包含数据集版本号`version`和数据集`data`。在`data`字段内为各个样本,每个样本包含文章标题`title`和若干段落`paragraphs`,在`paragraphs`中的每个元素为一个段落`context`,基于该段落的内容,可以包含若干个问题和对应的答案`qas`,答案均位于该段落内。对于`qas`中的每个元素,包含一个问题`question`和一个全局唯一的标识`id`,以及(若干)答案`answers`。答案中的每个元素包含答案本身`text`及其在`context`中的起始位置`answer_start`。注意起始位置为字符级。此外,在测试集中,`qas`可以不包含`answers`字段。 - -该reader包含如下额外的可配置字段: - -```yaml -doc_stride (REQUIRED): int类型。对context应用滑动窗口时的滑动步长。 -max_query_len (REQUIRED): int类型。query的最大长度。 -max_answer_len (REQUIRED): int类型。预测阶段answer的最大长度,不训练时该字段可为空。 -n_best_size (OPTIONAL): int类型。预测阶段合并滑动窗口的样本时,每个样本所取的n_best列表大小。 -``` - - -reader的输出(生成器每次yield出的数据)包含以下字段: - -```yaml -token_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本(文本对),文本1为context,文本2为question,其中的每个元素为文本对中的每个token对应的单词id,文本对使用`[SEP]`所对应的id隔开。 -position_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的位置id。 -segment_ids: 一个shape为[batch_size, seq_len]的矩阵,在文本1的token位置,元素取值为0;在文本2的token位置,元素取值为1。用于支持BERT、ERNIE等模型的输入。 -input_mask: 一个shape为[batch_size, seq_len]的矩阵,其中的每个元素为0或1,表示该位置是否是padding词(为1时代表是真实词,为0时代表是填充词)。 -task_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持ERNIE模型的输入。 -start_positions: 一个shape为[batch_size]的向量,每个元素代表当前样本的答案片段的起始位置。 -end_positions: 一个shape为[batch_size]的向量,每个元素代表当前样本的答案片段的结束位置。 -``` - -当处于预测阶段时,reader所yield出的数据不会包含`label_ids`字段,但会额外的包含`unique_ids`字段: - -```yaml -unique_ids: 一个shape为[batch_size, seq_len]的矩阵,代表每个样本的全局唯一的id,用于预测后对滑动窗口的结果进行合并。 -``` - - -#### 掩码语言模型数据集reader工具:mlm -该reader完成掩码语言模型数据集的载入与处理,reader接受[tsv格式](https://en.wikipedia.org/wiki/Tab-separated_values)的数据集输入,MLM任务为自监督任务,数据集仅包含一列`text_a`,reader会自动为每个样本生成随机的训练标签。格式如下 - -``` -text_a -Subsequent to these developments, Randall Collins (2004) formulated his interaction ritual theory by drawing on Durkheim's work on totemic rituals that was extended by Goffman (1964/2013; 1967) into everyday focused encounters. -Presidential spokesman Abigail Valte earlier Saturday urged residents of low-lying and mountainous areas that could be hit hard by the storm to evacuate, the state news agency said, citing an interview conducted on a government radio station. World Vision, the Christian humanitarian organization, said Saturday that it had to postpone some of its relief efforts due to Nalgae, with two of three emergency teams set to deploy once the storm passes. Another team is in Bulcan province, most of which is "still submerged" because of Nesat. The group is focusing its post-Nesat efforts on two communities in Manila and three in the northern Isabela and Zambales provinces. -of the Year award in 2013, becoming the first woman in thirty years, and the first African American person ever to ever win the award. After an extensive career with the California State Legislature she began working for PicoBrew, a product development company in Seattle, WA that specializes in automated brewing equipment. -the gakkel ridge is a boundary between which two tectonic plates Mid-Atlantic Ridge ( MAR ) is a mid-ocean ridge , a divergent tectonic plate or constructive plate boundary located along the floor of the Atlantic Ocean , and part of the longest mountain range in the world . The ridge extends from a junction with the Gakkel Ridge ( Mid-Arctic Ridge ) northeast of Greenland southward to the Bouvet Triple Junction in the South Atlantic . -``` - -***注意:数据集的第一列必须为header,即标注每一列的列名*** - -reader的输出(生成器每次yield出的数据)包含以下对象: - -```yaml -token_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的单词id。 -position_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的位置id。 -segment_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持BERT、ERNIE等模型的输入。 -input_mask: 一个shape为[batch_size, seq_len]的矩阵,其中的每个元素为0或1,表示该位置是否是padding词(为1时代表是真实词,为0时代表是填充词)。 -mask_label: 一个shape为[None]的向量,其中的每个元素为被mask掉的单词的真实单词id。 -mask_pos: 一个shape为[None]的向量,长度与`mask_pos`一致且元素一一对应。每个元素表示被mask掉的单词的位置。 -task_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持ERNIE模型的输入。 -``` - -## 附录B:内置主干网络(backbone) - -框架中内置了BERT和ERNIE作为主干网络,未来框架会引入更多的骨干网络如XLNet等。 - -#### BERT - -BERT包含了如下输入对象 - -```yaml -token_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的单词id。 -position_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的位置id。 -segment_ids: 一个shape为[batch_size, seq_len]的0/1矩阵,用于支持BERT、ERNIE等模型的输入,当元素为0时,代表当前token属于分类任务或匹配任务的text1,为1时代表当前token属于匹配任务的text2. -input_mask: 一个shape为[batch_size, seq_len]的矩阵,其中的每个元素为0或1,表示该位置是否是padding词(为1时代表是真实词,为0时代表是填充词)。 -``` - -提供了如下输出对象供下游组件使用。 - -```yaml -word_embedding: 一个shape为[batch_size, seq_len, emb_size]的张量(Tensor),float32类型。表示当前batch中各个样本的(上下文无关)词向量序列。 -embedding_table: 一个shape为[vocab_size, emb_size]的矩阵,float32类型。表示BERT当前维护的词向量查找表矩阵。 -encoder_outputs: 一个shape为[batch_size, seq_len, hidden_size]的Tensor, float32类型。表示BERT encoder对当前batch中各个样本的encoding结果。 -sentence_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -sentence_pair_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -``` - -#### ERNIE - -ERNIE包含了如下输入对象 - -```yaml -token_ids: 。一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的单词id。 -position_ids: 一个shape为[batch_size, seq_len]的矩阵,每行是一条样本,其中的每个元素为文本中的每个token对应的位置id。 -segment_ids: 一个shape为[batch_size, seq_len]的0/1矩阵,用于支持BERT、ERNIE等模型的输入,当元素为0时,代表当前token属于分类任务或匹配任务的text1,为1时代表当前token属于匹配任务的text2. -input_mask: 一个shape为[batch_size, seq_len]的矩阵,其中的每个元素为0或1,表示该位置是否是padding词(为1时代表是真实词,为0时代表是填充词)。 -segment_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持BERT、ERNIE等模型的输入。 -task_ids: 一个shape为[batch_size, seq_len]的全0矩阵,用于支持ERNIE finetuning。 -``` - -提供了如下输出对象供下游组件使用。 - -```yaml -word_embedding: 一个shape为[batch_size, seq_len, emb_size]的张量(Tensor),float32类型。表示当前batch中各个样本的(上下文无关)词向量序列。 -embedding_table: 一个shape为[vocab_size, emb_size]的矩阵,float32类型。表示BERT当前维护的词向量查找表矩阵。 -encoder_outputs: 一个shape为[batch_size, seq_len, hidden_size]的Tensor, float32类型。表示BERT encoder对当前batch中各个样本的encoding结果。 -sentence_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -sentence_pair_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -``` - - -## 附录C:内置任务范式(paradigm) - -#### 分类范式:cls - -分类范式额外包含以下配置字段: - -```yaml -n_classes(REQUIRED): int类型。分类任务的类别数。 -pred_output_path (OPTIONAL) : str类型。预测输出结果的保存路径,当该参数未空时,保存至全局配置文件中的`save_path`字段指定路径下的任务目录。 -``` - -分类范式包含如下的输入对象: - -训练阶段: -```yaml -sentence_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -label_ids: 一个shape为[batch_size]的矩阵,其中的每个元素为该样本的类别标签。 -``` - -预测阶段: -```yaml -sentence_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -``` - -在训练阶段,输出loss;预测阶段输出各个类别的预测概率。 - -#### 匹配范式:match - - -匹配范式额外包含以下配置字段: - -```yaml -pred_output_path (OPTIONAL) : str类型。预测输出结果的保存路径,当该参数未空时,保存至全局配置文件中的`save_path`字段指定路径下的任务目录。 -``` - -匹配范式包含如下的输入对象: - -训练阶段: -```yaml -sentence_pair_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -label_ids: 一个shape为[batch_size]的矩阵,其中的每个元素为该样本的类别标签,为0时表示两段文本不匹配,为1时代表构成匹配 -``` - -预测阶段: -```yaml -sentence_pair_embedding: 一个shape为[batch_size, hidden_size]的matrix, float32类型。每一行代表BERT encoder对当前batch中相应样本的句子向量(sentence embedding) -``` - -在训练阶段,输出loss;预测阶段输出匹配与否的概率分布。 - -#### 机器阅读理解范式:mrc - - -分类范式额外包含以下配置字段: - -```yaml -max_answer_len(REQUIRED): int类型。预测的最大答案长度 -n_best_size (OPTIONAL) : int类型,默认为20。预测时保存的nbest回答文件中每条样本的n_best数量 -pred_output_path (OPTIONAL) : str类型。预测输出结果的保存路径,当该参数未空时,保存至全局配置文件中的`save_path`字段指定路径下的任务目录 -``` - -机器阅读理解范式包含如下的输入对象: - -训练阶段: -```yaml -encoder_outputs: 一个shape为[batch_size, seq_len, hidden_size]的Tensor, float32类型。表示BERT encoder对当前batch中各个样本的encoding结果。 -start_positions: 一个shape为[batch_size]的向量,每个元素代表当前样本的答案片段的起始位置。 -end_positions: 一个shape为[batch_size]的向量,每个元素代表当前样本的答案片段的结束位置。 -``` - -预测阶段: -```yaml -encoder_outputs: 一个shape为[batch_size, seq_len, hidden_size]的Tensor, float32类型。表示BERT encoder对当前batch中各个样本的encoding结果。 -unique_ids: 一个shape为[batch_size, seq_len]的矩阵,代表每个样本的全局唯一的id,用于预测后对滑动窗口的结果进行合并。 -``` - - -#### 掩码语言模型范式:mlm - -该任务范式为无监督任务范式,不支持预测,仅用于(辅助)训练。包含如下的输入对象: - -```yaml -mask_label: 一个shape为[None]的向量,其中的每个元素为被mask掉的单词的真实单词id。 -mask_pos": 一个shape为[None]的向量,长度与`mask_pos`一致且元素一一对应。每个元素表示被mask掉的单词的位置。 -embedding_table: 一个shape为[vocab_size, emb_size]的矩阵,float32类型。表示BERT当前维护的词向量查找表矩阵。 -encoder_outputs: 一个shape为[batch_size, seq_len, hidden_size]的Tensor, float32类型。表示BERT encoder对当前batch中各个样本的encoding结果。 -``` - -## 附录D:可配置的全局参数列表 - -```yaml - -task_instance(REQUIRED): str类型。需要进行训练或预测的任务实例名。在多任务模式下,多个任务之间使用逗号`,`隔开。名称选取自任务实例配置文件的文件名(不包含后缀.yaml)。 -mix_ratio (OPTIONAL): str类型。每个任务的训练阶段的采样概率,各个值通过逗号`,`隔开,且与task_instance中的元素一一对应。默认每个任务的采样概率均为1.0,即所有任务等概率采样(代表与主任务采样次数的期望相同)。详情见 《进阶篇-训练终止条件与预期训练步数》。 -target_tag (OPTIONAL): str类型。目标/辅助任务标志位,各个值通过逗号`,`隔开,且与task_instance中的元素一一对应。标记为1的任务代表目标任务,标记为0的任务代表辅助任务。默认每个值均为1(即默认每个任务为目标任务)。相关使用示例见DEMO2。 -task_reuse_tag (OPTIONAL): str类型。任务层复用标志位,各个值通过逗号`,`隔开,且与task_instance中的元素一一对应。元素取值相同的任务会自动共享任务层参数,取值不同的任务不复用任务层参数。相关使用示例见DEMO3。 - -backbone(REQUIRED): str类型。主干网络名。 -backbone_config_path (OPTIONAL): str类型。主干网络配置文件路径。 - -save_path(REQUIRED): str类型。checkpoint文件和各个目标任务的预测模型保存路径。 -vocab_path(REQUIRED): str类型。字典文件,纯文本格式存储,其中每行为一个单词,供reader、backbone和各个任务使用。 -do_lower_case (OPTIONAL): bool类型。大小写标志位。默认为False,即区分大小写。 -for_cn: bool类型。中文模式标志位。默认为False,即默认输入为英文,设置为True后,分词器、后处理等按照中文语言进行处理。 - -print_every_n_steps (OPTIONAL): int类型。默认为5。训练阶段打印日志的频率(step为单位)。 -save_every_n_steps (OPTIONAL): int类型。默认为-1。训练过程中保存checkpoint模型的频率,默认不保存。 - -optimizer(REQUIRED): str类型。优化器名称,目前框架只支持adam,未来会支持更多优化器。 -learning_rate(REQUIRED): str类型。训练阶段的学习率。 -batch_size(REQUIRED): int类型。批大小,即每个训练或推理step所使用样本数。 -epoch(REQUIRED): int类型。主任务的训练epoch数。 - -use_gpu (OPTIONAL): bool类型。默认为True。框架默认使用GPU进行单机单卡或分布式训练,若希望使用cpu训练或推理,可将该标志位置为False。 - -warmup_proportion (OPTIONAL): float类型。默认为0。对预训练模型finetuning时的warmup的训练step占预估的全部训练步数的比例。 -use_ema (OPTIONAL): bool类型。默认为False。是否开启[ema](https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) 进行训练和推理。 -ema_decay (OPTIONAL): float类型。默认为0。开启ema时的权重衰减指数。 -random_seed (OPTIONAL): int类型。随机种子,默认1。 -``` - ## License This tutorial is contributed by [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) and licensed under the [Apache-2.0 license](https://github.com/PaddlePaddle/models/blob/develop/LICENSE). diff --git a/backbone/README.md b/backbone/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/backbone/__init__.py b/backbone/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/backbone/bert.py b/backbone/bert.py deleted file mode 100644 index 74f772c..0000000 --- a/backbone/bert.py +++ /dev/null @@ -1,155 +0,0 @@ -# -*- coding: UTF-8 -*- -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""v1.1 -BERT model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from paddle import fluid -from paddle.fluid import layers - -from paddlepalm.backbone.utils.transformer import pre_process_layer, encoder -from paddlepalm.interface import backbone - - -class Model(backbone): - - def __init__(self, config, phase): - - # self._is_training = phase == 'train' # backbone一般不用关心运行阶段,因为outputs在任何阶段基本不会变 - self._emb_size = config["hidden_size"] - self._n_layer = config["num_hidden_layers"] - self._n_head = config["num_attention_heads"] - self._voc_size = config["vocab_size"] - self._max_position_seq_len = config["max_position_embeddings"] - self._sent_types = config["type_vocab_size"] - self._hidden_act = config["hidden_act"] - self._prepostprocess_dropout = config["hidden_dropout_prob"] - self._attention_dropout = config["attention_probs_dropout_prob"] - - self._word_emb_name = "word_embedding" - self._pos_emb_name = "pos_embedding" - self._sent_emb_name = "sent_embedding" - - # Initialize all weigths by truncated normal initializer, and all biases - # will be initialized by constant zero by default. - self._param_initializer = fluid.initializer.TruncatedNormal( - scale=config["initializer_range"]) - - @property - def inputs_attr(self): - return {"token_ids": [[-1, -1, 1], 'int64'], - "position_ids": [[-1, -1, 1], 'int64'], - "segment_ids": [[-1, -1, 1], 'int64'], - "input_mask": [[-1, -1, 1], 'float32']} - - @property - def outputs_attr(self): - return {"word_embedding": [[-1, -1, self._emb_size], 'float32'], - "embedding_table": [[-1, self._voc_size, self._emb_size], 'float32'], - "encoder_outputs": [[-1, -1, self._emb_size], 'float32'], - "sentence_embedding": [[-1, self._emb_size], 'float32'], - "sentence_pair_embedding": [[-1, self._emb_size], 'float32']} - - def build(self, inputs, scope_name=""): - src_ids = inputs['token_ids'] - pos_ids = inputs['position_ids'] - sent_ids = inputs['segment_ids'] - input_mask = inputs['input_mask'] - - self._emb_dtype = 'float32' - # padding id in vocabulary must be set to 0 - emb_out = fluid.layers.embedding( - input=src_ids, - size=[self._voc_size, self._emb_size], - dtype=self._emb_dtype, - param_attr=fluid.ParamAttr( - name=scope_name+self._word_emb_name, initializer=self._param_initializer), - is_sparse=False) - - # fluid.global_scope().find_var('backbone-word_embedding').get_tensor() - embedding_table = fluid.default_main_program().global_block().var(scope_name+self._word_emb_name) - - position_emb_out = fluid.layers.embedding( - input=pos_ids, - size=[self._max_position_seq_len, self._emb_size], - dtype=self._emb_dtype, - param_attr=fluid.ParamAttr( - name=scope_name+self._pos_emb_name, initializer=self._param_initializer)) - - sent_emb_out = fluid.layers.embedding( - sent_ids, - size=[self._sent_types, self._emb_size], - dtype=self._emb_dtype, - param_attr=fluid.ParamAttr( - name=scope_name+self._sent_emb_name, initializer=self._param_initializer)) - - emb_out = emb_out + position_emb_out - emb_out = emb_out + sent_emb_out - - emb_out = pre_process_layer( - emb_out, 'nd', self._prepostprocess_dropout, name=scope_name+'pre_encoder') - - self_attn_mask = fluid.layers.matmul( - x=input_mask, y=input_mask, transpose_y=True) - - self_attn_mask = fluid.layers.scale( - x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False) - n_head_self_attn_mask = fluid.layers.stack( - x=[self_attn_mask] * self._n_head, axis=1) - n_head_self_attn_mask.stop_gradient = True - - enc_out = encoder( - enc_input=emb_out, - attn_bias=n_head_self_attn_mask, - n_layer=self._n_layer, - n_head=self._n_head, - d_key=self._emb_size // self._n_head, - d_value=self._emb_size // self._n_head, - d_model=self._emb_size, - d_inner_hid=self._emb_size * 4, - prepostprocess_dropout=self._prepostprocess_dropout, - attention_dropout=self._attention_dropout, - relu_dropout=0, - hidden_act=self._hidden_act, - preprocess_cmd="", - postprocess_cmd="dan", - param_initializer=self._param_initializer, - name=scope_name+'encoder') - - - next_sent_feat = fluid.layers.slice( - input=enc_out, axes=[1], starts=[0], ends=[1]) - next_sent_feat = fluid.layers.reshape(next_sent_feat, [-1, next_sent_feat.shape[-1]]) - next_sent_feat = fluid.layers.fc( - input=next_sent_feat, - size=self._emb_size, - act="tanh", - param_attr=fluid.ParamAttr( - name=scope_name+"pooled_fc.w_0", initializer=self._param_initializer), - bias_attr=scope_name+"pooled_fc.b_0") - - return {'embedding_table': embedding_table, - 'word_embedding': emb_out, - 'encoder_outputs': enc_out, - 'sentence_embedding': next_sent_feat, - 'sentence_pair_embedding': next_sent_feat} - - def postprocess(self, rt_outputs): - pass - - diff --git a/backbone/ernie.py b/backbone/ernie.py deleted file mode 100644 index 1e47153..0000000 --- a/backbone/ernie.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- coding: UTF-8 -*- -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Ernie model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import absolute_import - -from paddle import fluid -from paddle.fluid import layers - -from paddlepalm.backbone.utils.transformer import pre_process_layer, encoder -from paddlepalm.interface import backbone - - -class Model(backbone): - - def __init__(self, - config, - phase): - - # self._is_training = phase == 'train' # backbone一般不用关心运行阶段,因为outputs在任何阶段基本不会变 - - self._emb_size = config['hidden_size'] - self._n_layer = config['num_hidden_layers'] - self._n_head = config['num_attention_heads'] - self._voc_size = config['vocab_size'] - self._max_position_seq_len = config['max_position_embeddings'] - if config['sent_type_vocab_size']: - self._sent_types = config['sent_type_vocab_size'] - else: - self._sent_types = config['type_vocab_size'] - - self._task_types = config['task_type_vocab_size'] - - self._hidden_act = config['hidden_act'] - self._prepostprocess_dropout = config['hidden_dropout_prob'] - self._attention_dropout = config['attention_probs_dropout_prob'] - - self._word_emb_name = "word_embedding" - self._pos_emb_name = "pos_embedding" - self._sent_emb_name = "sent_embedding" - self._task_emb_name = "task_embedding" - self._emb_dtype = "float32" - - self._param_initializer = fluid.initializer.TruncatedNormal( - scale=config['initializer_range']) - - @property - def inputs_attr(self): - return {"token_ids": [[-1, -1, 1], 'int64'], - "position_ids": [[-1, -1, 1], 'int64'], - "segment_ids": [[-1, -1, 1], 'int64'], - "input_mask": [[-1, -1, 1], 'float32'], - "task_ids": [[-1,-1, 1], 'int64']} - - @property - def outputs_attr(self): - return {"word_embedding": [[-1, -1, self._emb_size], 'float32'], - "embedding_table": [[-1, self._voc_size, self._emb_size], 'float32'], - "encoder_outputs": [[-1, -1, self._emb_size], 'float32'], - "sentence_embedding": [[-1, self._emb_size], 'float32'], - "sentence_pair_embedding": [[-1, self._emb_size], 'float32']} - - def build(self, inputs, scope_name=""): - - src_ids = inputs['token_ids'] - pos_ids = inputs['position_ids'] - sent_ids = inputs['segment_ids'] - input_mask = inputs['input_mask'] - task_ids = inputs['task_ids'] - - # padding id in vocabulary must be set to 0 - emb_out = fluid.layers.embedding( - input=src_ids, - size=[self._voc_size, self._emb_size], - dtype=self._emb_dtype, - param_attr=fluid.ParamAttr( - name=scope_name+self._word_emb_name, initializer=self._param_initializer), - is_sparse=False) - - # fluid.global_scope().find_var('backbone-word_embedding').get_tensor() - embedding_table = fluid.default_main_program().global_block().var(scope_name+self._word_emb_name) - - position_emb_out = fluid.layers.embedding( - input=pos_ids, - size=[self._max_position_seq_len, self._emb_size], - dtype=self._emb_dtype, - param_attr=fluid.ParamAttr( - name=scope_name+self._pos_emb_name, initializer=self._param_initializer)) - - sent_emb_out = fluid.layers.embedding( - sent_ids, - size=[self._sent_types, self._emb_size], - dtype=self._emb_dtype, - param_attr=fluid.ParamAttr( - name=scope_name+self._sent_emb_name, initializer=self._param_initializer)) - - emb_out = emb_out + position_emb_out - emb_out = emb_out + sent_emb_out - - task_emb_out = fluid.layers.embedding( - task_ids, - size=[self._task_types, self._emb_size], - dtype=self._emb_dtype, - param_attr=fluid.ParamAttr( - name=scope_name+self._task_emb_name, - initializer=self._param_initializer)) - - emb_out = emb_out + task_emb_out - - emb_out = pre_process_layer( - emb_out, 'nd', self._prepostprocess_dropout, name=scope_name+'pre_encoder') - - self_attn_mask = fluid.layers.matmul( - x=input_mask, y=input_mask, transpose_y=True) - - self_attn_mask = fluid.layers.scale( - x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False) - n_head_self_attn_mask = fluid.layers.stack( - x=[self_attn_mask] * self._n_head, axis=1) - n_head_self_attn_mask.stop_gradient = True - - enc_out = encoder( - enc_input=emb_out, - attn_bias=n_head_self_attn_mask, - n_layer=self._n_layer, - n_head=self._n_head, - d_key=self._emb_size // self._n_head, - d_value=self._emb_size // self._n_head, - d_model=self._emb_size, - d_inner_hid=self._emb_size * 4, - prepostprocess_dropout=self._prepostprocess_dropout, - attention_dropout=self._attention_dropout, - relu_dropout=0, - hidden_act=self._hidden_act, - preprocess_cmd="", - postprocess_cmd="dan", - param_initializer=self._param_initializer, - name=scope_name+'encoder') - - - next_sent_feat = fluid.layers.slice( - input=enc_out, axes=[1], starts=[0], ends=[1]) - next_sent_feat = fluid.layers.reshape(next_sent_feat, [-1, next_sent_feat.shape[-1]]) - next_sent_feat = fluid.layers.fc( - input=next_sent_feat, - size=self._emb_size, - act="tanh", - param_attr=fluid.ParamAttr( - name=scope_name+"pooled_fc.w_0", initializer=self._param_initializer), - bias_attr=scope_name+"pooled_fc.b_0") - - return {'embedding_table': embedding_table, - 'word_embedding': emb_out, - 'encoder_outputs': enc_out, - 'sentence_embedding': next_sent_feat, - 'sentence_pair_embedding': next_sent_feat} - - def postprocess(self, rt_outputs): - pass diff --git a/backbone/utils/__init__.py b/backbone/utils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/backbone/utils/transformer.py b/backbone/utils/transformer.py deleted file mode 100644 index b6a540b..0000000 --- a/backbone/utils/transformer.py +++ /dev/null @@ -1,371 +0,0 @@ -# -*- coding: UTF-8 -*- -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Transformer encoder.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from functools import partial - -import paddle.fluid as fluid -import paddle.fluid.layers as layers - -from paddle.fluid.layer_helper import LayerHelper as LayerHelper -from functools import reduce # py3 -def layer_norm(x, begin_norm_axis=1, epsilon=1e-6, param_attr=None, bias_attr=None): - helper = LayerHelper('layer_norm', **locals()) - mean = layers.reduce_mean(x, dim=begin_norm_axis, keep_dim=True) - shift_x = layers.elementwise_sub(x=x, y=mean, axis=0) - variance = layers.reduce_mean(layers.square(shift_x), dim=begin_norm_axis, keep_dim=True) - r_stdev = layers.rsqrt(variance + epsilon) - norm_x = layers.elementwise_mul(x=shift_x, y=r_stdev, axis=0) - - param_shape = [reduce(lambda x, y: x * y, norm_x.shape[begin_norm_axis:])] - param_dtype = norm_x.dtype - scale = helper.create_parameter( - attr=param_attr, - shape=param_shape, - dtype=param_dtype, - default_initializer=fluid.initializer.Constant(1.)) - bias = helper.create_parameter( - attr=bias_attr, - shape=param_shape, - dtype=param_dtype, - is_bias=True, - default_initializer=fluid.initializer.Constant(0.)) - - out = layers.elementwise_mul(x=norm_x, y=scale, axis=-1) - out = layers.elementwise_add(x=out, y=bias, axis=-1) - - return out - - -def multi_head_attention(queries, - keys, - values, - attn_bias, - d_key, - d_value, - d_model, - n_head=1, - dropout_rate=0., - cache=None, - param_initializer=None, - name='multi_head_att'): - """ - Multi-Head Attention. Note that attn_bias is added to the logit before - computing softmax activiation to mask certain selected positions so that - they will not considered in attention weights. - """ - keys = queries if keys is None else keys - values = keys if values is None else values - - if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3): - raise ValueError( - "Inputs: quries, keys and values should all be 3-D tensors.") - - def __compute_qkv(queries, keys, values, n_head, d_key, d_value): - """ - Add linear projection to queries, keys, and values. - """ - q = layers.fc(input=queries, - size=d_key * n_head, - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name=name + '_query_fc.w_0', - initializer=param_initializer), - bias_attr=name + '_query_fc.b_0') - k = layers.fc(input=keys, - size=d_key * n_head, - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name=name + '_key_fc.w_0', - initializer=param_initializer), - bias_attr=name + '_key_fc.b_0') - v = layers.fc(input=values, - size=d_value * n_head, - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name=name + '_value_fc.w_0', - initializer=param_initializer), - bias_attr=name + '_value_fc.b_0') - return q, k, v - - def __split_heads(x, n_head): - """ - Reshape the last dimension of inpunt tensor x so that it becomes two - dimensions and then transpose. Specifically, input a tensor with shape - [bs, max_sequence_length, n_head * hidden_dim] then output a tensor - with shape [bs, n_head, max_sequence_length, hidden_dim]. - """ - hidden_size = x.shape[-1] - # The value 0 in shape attr means copying the corresponding dimension - # size of the input as the output dimension size. - reshaped = layers.reshape( - x=x, shape=[0, 0, n_head, hidden_size // n_head], inplace=True) - - # permuate the dimensions into: - # [batch_size, n_head, max_sequence_len, hidden_size_per_head] - return layers.transpose(x=reshaped, perm=[0, 2, 1, 3]) - - def __combine_heads(x): - """ - Transpose and then reshape the last two dimensions of inpunt tensor x - so that it becomes one dimension, which is reverse to __split_heads. - """ - if len(x.shape) == 3: return x - if len(x.shape) != 4: - raise ValueError("Input(x) should be a 4-D Tensor.") - - trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) - # The value 0 in shape attr means copying the corresponding dimension - # size of the input as the output dimension size. - return layers.reshape( - x=trans_x, - shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]], - inplace=True) - - def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate): - """ - Scaled Dot-Product Attention - """ - scaled_q = layers.scale(x=q, scale=d_key**-0.5) - product = layers.matmul(x=scaled_q, y=k, transpose_y=True) - if attn_bias: - product += attn_bias - weights = layers.softmax(product) - if dropout_rate: - weights = layers.dropout( - weights, - dropout_prob=dropout_rate, - dropout_implementation="upscale_in_train", - is_test=False) - out = layers.matmul(weights, v) - return out - - q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value) - - if cache is not None: # use cache and concat time steps - # Since the inplace reshape in __split_heads changes the shape of k and - # v, which is the cache input for next time step, reshape the cache - # input from the previous time step first. - k = cache["k"] = layers.concat( - [layers.reshape( - cache["k"], shape=[0, 0, d_model]), k], axis=1) - v = cache["v"] = layers.concat( - [layers.reshape( - cache["v"], shape=[0, 0, d_model]), v], axis=1) - - q = __split_heads(q, n_head) - k = __split_heads(k, n_head) - v = __split_heads(v, n_head) - - ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key, - dropout_rate) - - out = __combine_heads(ctx_multiheads) - - # Project back to the model size. - proj_out = layers.fc(input=out, - size=d_model, - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name=name + '_output_fc.w_0', - initializer=param_initializer), - bias_attr=name + '_output_fc.b_0') - return proj_out - - -def positionwise_feed_forward(x, - d_inner_hid, - d_hid, - dropout_rate, - hidden_act, - param_initializer=None, - name='ffn'): - """ - Position-wise Feed-Forward Networks. - This module consists of two linear transformations with a ReLU activation - in between, which is applied to each position separately and identically. - """ - hidden = layers.fc(input=x, - size=d_inner_hid, - num_flatten_dims=2, - act=hidden_act, - param_attr=fluid.ParamAttr( - name=name + '_fc_0.w_0', - initializer=param_initializer), - bias_attr=name + '_fc_0.b_0') - if dropout_rate: - hidden = layers.dropout( - hidden, - dropout_prob=dropout_rate, - dropout_implementation="upscale_in_train", - is_test=False) - out = layers.fc(input=hidden, - size=d_hid, - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name=name + '_fc_1.w_0', initializer=param_initializer), - bias_attr=name + '_fc_1.b_0') - return out - - -def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0., - name=''): - """ - Add residual connection, layer normalization and droput to the out tensor - optionally according to the value of process_cmd. - This will be used before or after multi-head attention and position-wise - feed-forward networks. - """ - for cmd in process_cmd: - if cmd == "a": # add residual connection - out = out + prev_out if prev_out else out - elif cmd == "n": # add layer normalization - out_dtype = out.dtype - if out_dtype == fluid.core.VarDesc.VarType.FP16: - out = layers.cast(x=out, dtype="float32") - out = layer_norm( - out, - begin_norm_axis=len(out.shape) - 1, - param_attr=fluid.ParamAttr( - name=name + '_layer_norm_scale', - initializer=fluid.initializer.Constant(1.)), - bias_attr=fluid.ParamAttr( - name=name + '_layer_norm_bias', - initializer=fluid.initializer.Constant(0.))) - if out_dtype == fluid.core.VarDesc.VarType.FP16: - out = layers.cast(x=out, dtype="float16") - elif cmd == "d": # add dropout - if dropout_rate: - out = layers.dropout( - out, - dropout_prob=dropout_rate, - dropout_implementation="upscale_in_train", - is_test=False) - return out - - -pre_process_layer = partial(pre_post_process_layer, None) -post_process_layer = pre_post_process_layer - - -def encoder_layer(enc_input, - attn_bias, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - hidden_act, - preprocess_cmd="n", - postprocess_cmd="da", - param_initializer=None, - name=''): - """The encoder layers that can be stacked to form a deep encoder. - This module consits of a multi-head (self) attention followed by - position-wise feed-forward networks and both the two components companied - with the post_process_layer to add residual connection, layer normalization - and droput. - """ - attn_output = multi_head_attention( - pre_process_layer( - enc_input, - preprocess_cmd, - prepostprocess_dropout, - name=name + '_pre_att'), - None, - None, - attn_bias, - d_key, - d_value, - d_model, - n_head, - attention_dropout, - param_initializer=param_initializer, - name=name + '_multi_head_att') - attn_output = post_process_layer( - enc_input, - attn_output, - postprocess_cmd, - prepostprocess_dropout, - name=name + '_post_att') - ffd_output = positionwise_feed_forward( - pre_process_layer( - attn_output, - preprocess_cmd, - prepostprocess_dropout, - name=name + '_pre_ffn'), - d_inner_hid, - d_model, - relu_dropout, - hidden_act, - param_initializer=param_initializer, - name=name + '_ffn') - return post_process_layer( - attn_output, - ffd_output, - postprocess_cmd, - prepostprocess_dropout, - name=name + '_post_ffn') - - -def encoder(enc_input, - attn_bias, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - hidden_act, - preprocess_cmd="n", - postprocess_cmd="da", - param_initializer=None, - name=''): - """ - The encoder is composed of a stack of identical layers returned by calling - encoder_layer. - """ - for i in range(n_layer): - enc_output = encoder_layer( - enc_input, - attn_bias, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - hidden_act, - preprocess_cmd, - postprocess_cmd, - param_initializer=param_initializer, - name=name + '_layer_' + str(i)) - enc_input = enc_output - enc_output = pre_process_layer( - enc_output, preprocess_cmd, prepostprocess_dropout, name="post_encoder") - - return enc_output diff --git a/demo/demo2/run.py b/demo/demo2/run.py index 826ae33..bc0ef9a 100644 --- a/demo/demo2/run.py +++ b/demo/demo2/run.py @@ -25,14 +25,11 @@ if __name__ == '__main__': # 创建该分类任务的reader,由诸多参数控制数据集读入格式、文件数量、预处理规则等 cls_reader = palm.reader.ClassifyReader(vocab_path, max_seqlen) cls_reader2 = palm.reader.ClassifyReader(vocab_path, max_seqlen) - predict_cls_reader = palm.reader.ClassifyReader(vocab_path, max_seqlen, phase='predict') print(cls_reader.outputs_attr) - print(predict_cls_reader.outputs_attr) # 不同的backbone会对任务reader有不同的特征要求,例如对于分类任务,基本的输入feature为token_ids和label_ids,但是对于BERT,还要求从输入中额外提取position、segment、input_mask等特征,因此经过register后,reader会自动补充backbone所要求的字段 cls_reader.register_with(ernie) cls_reader2.register_with(ernie) print(cls_reader.outputs_attr) - print(predict_cls_reader.outputs_attr) print("preparing data...") print(cls_reader.num_examples) @@ -66,8 +63,8 @@ if __name__ == '__main__': adam = palm.optimizer.Adam(loss_var, lr, sched) mh_trainer.build_backward(optimizer=adam, weight_decay=0.001) - - mh_trainer.random_init_params() + + # mh_trainer.random_init_params() mh_trainer.load_pretrain('pretrain/ernie/params') # trainer.train(iterator_fn, print_steps=1, save_steps=5, save_path='outputs', save_type='ckpt,predict') diff --git a/paddlepalm/backbone/bert.py b/paddlepalm/backbone/bert.py index 5c5ece0..06080e1 100644 --- a/paddlepalm/backbone/bert.py +++ b/paddlepalm/backbone/bert.py @@ -23,10 +23,10 @@ from paddle import fluid from paddle.fluid import layers from paddlepalm.backbone.utils.transformer import pre_process_layer, encoder -from paddlepalm.backbone.base_backbone import BaseBackbone +from paddlepalm.backbone.base_backbone import Backbone -class BERT(BaseBackbone): +class BERT(Backbone): def __init__(hidden_size, num_hidden_layers, num_attention_heads, vocab_size, \ diff --git a/paddlepalm/backbone/ernie.py b/paddlepalm/backbone/ernie.py index 3300b11..615766a 100644 --- a/paddlepalm/backbone/ernie.py +++ b/paddlepalm/backbone/ernie.py @@ -24,10 +24,10 @@ from paddle import fluid from paddle.fluid import layers from paddlepalm.backbone.utils.transformer import pre_process_layer, encoder -from paddlepalm.backbone.base_backbone import BaseBackbone +from paddlepalm.backbone.base_backbone import Backbone -class ERNIE(BaseBackbone): +class ERNIE(Backbone): def __init__(self, hidden_size, num_hidden_layers, num_attention_heads, vocab_size, \ max_position_embeddings, sent_type_vocab_size, task_type_vocab_size, \ diff --git a/paddlepalm/head/base_head.py b/paddlepalm/head/base_head.py index 7d24798..2446885 100644 --- a/paddlepalm/head/base_head.py +++ b/paddlepalm/head/base_head.py @@ -16,7 +16,7 @@ import os import json -class BaseHead(object): +class Head(object): def __init__(self, phase='train'): """ diff --git a/paddlepalm/head/cls.py b/paddlepalm/head/cls.py index 133e013..6a1aef4 100644 --- a/paddlepalm/head/cls.py +++ b/paddlepalm/head/cls.py @@ -15,12 +15,12 @@ import paddle.fluid as fluid from paddle.fluid import layers -from paddlepalm.head.base_head import BaseHead +from paddlepalm.head.base_head import Head import numpy as np import os -class Classify(BaseHead): +class Classify(Head): """ classification """ diff --git a/paddlepalm/multihead_trainer.py b/paddlepalm/multihead_trainer.py index 6708b71..4c0cfaa 100644 --- a/paddlepalm/multihead_trainer.py +++ b/paddlepalm/multihead_trainer.py @@ -12,10 +12,20 @@ VERBOSE=False class MultiHeadTrainer(Trainer): + """ + The core unit to start a multi-task training/predicting session. A MultiHeadTrainer is built based on several Trainers. Beyond the inheritance of Trainer, it additionally achieves model backbone reuse across tasks, trainer sampling for multi-task learning, and multi-head inference for effective evaluation and prediction. + """ - def __init__(self, trainers, reuse_flags=None): - if reuse_flags is not None: - assert len(reuse_flags) == len(trainers) + def __init__(self, trainers): + """Create a new multi_head_trainer. + + Args: + trainers: a list of Trainer objects. + + """ + # if reuse_flags is not None: + # assert len(reuse_flags) == len(trainers) + Trainer.__init__(self, '') self._trainers = trainers @@ -46,6 +56,16 @@ class MultiHeadTrainer(Trainer): t._set_multitask() def build_forward(self, backbone, heads): + """ + Build forward computation graph for training, which usually built from input layer to loss node. + + Args: + backbone: a Backbone object with phase == 'train', which is used to extract multi-level text features, e.g., contextual word embedding and sentence embedding. + heads: a list of Head objects. Phase of each head should be set as 'train', which is used to build task specific output layers. + + Return: + - loss_var: a Variable object. The computational graph variable(node) of loss. + """ if isinstance(heads, list): head_dict = {k.name: v for k,v in zip(self._trainers, heads)} @@ -103,12 +123,21 @@ class MultiHeadTrainer(Trainer): # print(var) # exit() # print(var) + if not self._multi_task: + self._init_exe_prog(for_train=True) return loss_var def fit_readers(self, reader_dict): raise NotImplementedError() def fit_readers_with_mixratio(self, readers, sampling_reference, num_epochs, phase='train'): + """ + Bind readers and loaded train/predict data to trainers. + + Args: + readers: a dict or list of Reader objects. For dict case, each key is a trainer's name, and the mapped value is the reader object to bind to the trainer. For list case, each + """ + self._check_phase(phase) if isinstance(readers, list): reader_dict = {k.name: v for k,v in zip(self._trainers, readers)} @@ -118,12 +147,13 @@ class MultiHeadTrainer(Trainer): raise ValueError() num_heads = len(self._trainers) - assert len(reader_dict) == num_heads + assert len(reader_dict) == num_heads, "received number of readers is not consistent with trainers." trainer_dict = {t.name: t for t in self._trainers} assert sampling_reference in trainer_dict - trainer_dict[sampling_reference].fit_reader(reader_dict[sampling_reference], task_id=self._task_id_var) + trainer_dict[sampling_reference]._set_task_id(self._task_id_var) + trainer_dict[sampling_reference].fit_reader(reader_dict[sampling_reference]) base_steps_pur_epoch = trainer_dict[sampling_reference]._steps_pur_epoch self._finish_steps = {} @@ -152,7 +182,8 @@ class MultiHeadTrainer(Trainer): global_steps += max_train_steps if t.name != sampling_reference: - t.fit_reader(reader_dict[t.name], task_id=self._task_id_var) + t._set_task_id(self._task_id_var) + t.fit_reader(reader_dict[t.name]) net_inputs.append(t._net_inputs) prefixes.append(t.name) mrs.append(t.mix_ratio) @@ -180,7 +211,7 @@ class MultiHeadTrainer(Trainer): self._predict_reader = distribute_feeder_fn() self._pred_feed_batch_process_fn = feed_batch_process_fn - def check_finish(self, task_name, silent=False): + def _check_finish(self, task_name, silent=False): trainers = {t.name:t for t in self._trainers} if trainers[task_name]._cur_train_step == self._finish_steps[task_name]: if not silent: @@ -189,30 +220,19 @@ class MultiHeadTrainer(Trainer): flags = list(set(self._finish.values())) return len(flags) == 1 and flags[0] == True - def train(self, save_path=None, save_steps=None, save_type='ckpt', print_steps=5): + def train(self, print_steps=5): + """ + start training. + + Args: + print_steps: int. Logging frequency of training message, e.g., current step, loss and speed. + """ iterator = self._train_reader self._distribute_train_prog = fluid.CompiledProgram(self._train_prog).with_data_parallel(loss_name=self._loss_var.name) - - save_type = save_type.split(',') - if 'predict' in save_type: - assert self._pred_head is not None, "Predict head not found! You should build_predict_head first if you want to save predict model." - assert save_path is not None and save_steps is not None, 'save_path and save_steps is required to save model.' - save_predict = True - if not os.path.exists(save_path): - os.makedirs(save_path) - else: - save_predict = False - - if 'ckpt' in save_type: - if save_path is not None and save_steps is not None: - save_ckpt = True - if not os.path.exists(save_path): - os.makedirs(save_path) - else: - "WARNING: save_path or save_steps is not set, model will not be saved during training." - save_ckpt = False - else: - save_ckpt = False + for t in self._trainers: + t._set_exe(self._exe) + t._set_dist_train(self._distribute_train_prog) + t._set_fetch_list(self._fetch_list) time_begin = time.time() for feed in iterator: @@ -237,7 +257,7 @@ class MultiHeadTrainer(Trainer): time_begin = time.time() self._check_save() - finish = self.check_finish(self._trainers[task_id].name) + finish = self._check_finish(self._trainers[task_id].name) if finish: break @@ -262,9 +282,11 @@ class MultiHeadTrainer(Trainer): assert isinstance(batch, dict) task_id = batch['__task_id'][0] - rt_outputs = self._trainers[task_id].train_one_step(batch, self._exe, self._distribute_train_prog, self._fetch_list) + # rt_outputs = self._trainers[task_id].train_one_step(batch, self._exe, self._distribute_train_prog, self._fetch_list) + rt_outputs = self._trainers[task_id].train_one_step(batch) self._cur_train_step += 1 + self._check_save() return rt_outputs, task_id # if dev_count > 1: diff --git a/paddlepalm/trainer.py b/paddlepalm/trainer.py index bbfd885..cf48527 100644 --- a/paddlepalm/trainer.py +++ b/paddlepalm/trainer.py @@ -28,12 +28,21 @@ DEBUG=False class Trainer(object): + """ + The core unit to start a training/predicting session for single task. A trainer is to build computation graph, manage training and evaluation process, achieve model/checkpoint saving and pretrain_model/checkpoint loading. + """ - def __init__(self, name, mix_ratio=1.0, reuse_head_with=None, \ - silent=False): + def __init__(self, name, mix_ratio=1.0, reuse_head_with=None): + """Create a new trainer. + + Args: + name: string. The name of the trainer(training task). + mix_ratio: sampling weight of this trainer in multi-task learning mode. Default is 1.0. + reuse_head_with: reuse parameters of task head with another trainer. Default is None, not reuse with others. + + """ self._name = name - self._verbose = not silent self._pred_reader = None self._task_head = None self._pred_head = None @@ -63,6 +72,7 @@ class Trainer(object): self._multi_task = False self._as_auxilary = False + self._task_id = None # training process management self._mix_ratio = mix_ratio @@ -92,59 +102,19 @@ class Trainer(object): self._lock = False self._build_forward = False - def build_predict_forward(self, pred_backbone, pred_head, pred_prog=None, pred_init_prog=None): - self._pred_head = pred_head - self._pred_backbone = pred_backbone - # self._pred_reader = self._reader.clone(phase='pred') - pred_task_attr_from_reader = helper.encode_inputs(self._pred_head.inputs_attrs['reader'], self.name) - # pred_task_attr_from_reader = self._pred_head.inputs_attrs['reader'] - - # _check_io(pred_backbone.inputs_attr, pred_reader.outputs_attr, in_name=bb_name+'_backbone', out_name='reader.pred') + def build_forward(self, backbone, task_head): + """ + Build forward computation graph for training, which usually built from input layer to loss node. - # _check_io(pred_backbone.inputs_attr, pred_reader.outputs_attr, in_name=bb_name+'_backbone', out_name='reader.pred') - # _check_io(pred_parad.inputs_attrs['reader'], pred_reader.outputs_attr, in_name='task_paradigm.pred.reader', out_name='reader.pred') - # _check_io(pred_parad.inputs_attrs['backbone'], pred_backbone.outputs_attr, in_name='task_paradigm.pred.backbone', out_name=bb_name+'_backbone') - pred_input_names, pred_shape_and_dtypes, pred_name_to_position = reader_helper.merge_input_attrs(pred_backbone.inputs_attr, pred_task_attr_from_reader, insert_taskid=False) - pred_input_attrs = [[i, j, k] for i, (j,k) in zip(pred_input_names, pred_shape_and_dtypes)] - self._pred_shape_and_dtypes = pred_shape_and_dtypes - self._pred_name_to_position = pred_name_to_position + Args: + backbone: a Backbone object with phase == 'train', which is used to extract multi-level text features, e.g., contextual word embedding and sentence embedding. + head: a Head object with phase == 'train', which is used to build task specific output layers. - if pred_prog is None: - pred_prog = fluid.Program() - self._pred_prog = pred_prog - if pred_init_prog is None: - pred_init_prog = fluid.Program() - self._pred_init_prog = pred_init_prog - with fluid.program_guard(pred_prog, pred_init_prog): - pred_net_inputs = reader_helper.create_net_inputs(pred_input_attrs) - # pred_bb_output_vars = pred_backbone.build(pred_net_inputs, scope_name='__paddlepalm_') - pred_bb_output_vars = pred_backbone.build(pred_net_inputs) - self._pred_net_inputs = pred_net_inputs - - # prepare predict vars for saving inference model - with fluid.program_guard(pred_prog, pred_init_prog): - cur_inputs = helper.decode_inputs(pred_net_inputs, self.name) - # self.pred_input = cur_inputs - self._pred_input_name_list, self._pred_input_varname_list = \ - zip(*[[k, v.name] for k,v in cur_inputs.items()]) - - pred_task_inputs = {'backbone': pred_bb_output_vars, 'reader': cur_inputs} - scope = self.name + '.' - with fluid.unique_name.guard(scope): - output_vars = self._build_head(pred_task_inputs, phase='pred', scope=scope) - - if output_vars is not None: - self._pred_fetch_name_list, self._pred_fetch_list = zip(*output_vars.items()) - else: - self._pred_fetch_name_list = [] - self._pred_fetch_var_list = [] - - return output_vars + Return: + loss_var: a Variable object. The computational graph variable(node) of loss. + """ - def _set_multitask(self): - self._multi_task = True - def build_forward(self, backbone, task_head): # assert not self._multi_task, "you cannot build_forward in trainer when a train is wrapper by MultiHeadTrainer." self._task_head = task_head self._backbone = backbone @@ -242,12 +212,87 @@ class Trainer(object): # for var in block.vars: # print("[debug] : %d, %s" % (_id, var)) self._loss_var = loss_var + + if not self._multi_task: + self._init_exe_prog(for_train=True) + return loss_var + def build_predict_forward(self, pred_backbone, pred_head): + """ + Build computation graph for evaluation and prediction. + + Arguments: + - pred_backbone: a Backbone object with phase == 'predict'. For evaluating model during training, the predict backbone should keep the same with train backbone. + - pred_head: a Head object with phase == 'predict'. For evaluating model during training, the predict head should keep the same with train head. + + Return: + - output_vars: dict type. Each value is a computational graph variable(node) argumented by pred_head outputs_attr. + """ + self._pred_head = pred_head + self._pred_backbone = pred_backbone + # self._pred_reader = self._reader.clone(phase='pred') + pred_task_attr_from_reader = helper.encode_inputs(self._pred_head.inputs_attrs['reader'], self.name) + # pred_task_attr_from_reader = self._pred_head.inputs_attrs['reader'] + + # _check_io(pred_backbone.inputs_attr, pred_reader.outputs_attr, in_name=bb_name+'_backbone', out_name='reader.pred') + + # _check_io(pred_backbone.inputs_attr, pred_reader.outputs_attr, in_name=bb_name+'_backbone', out_name='reader.pred') + # _check_io(pred_parad.inputs_attrs['reader'], pred_reader.outputs_attr, in_name='task_paradigm.pred.reader', out_name='reader.pred') + # _check_io(pred_parad.inputs_attrs['backbone'], pred_backbone.outputs_attr, in_name='task_paradigm.pred.backbone', out_name=bb_name+'_backbone') + pred_input_names, pred_shape_and_dtypes, pred_name_to_position = reader_helper.merge_input_attrs(pred_backbone.inputs_attr, pred_task_attr_from_reader, insert_taskid=False) + pred_input_attrs = [[i, j, k] for i, (j,k) in zip(pred_input_names, pred_shape_and_dtypes)] + self._pred_shape_and_dtypes = pred_shape_and_dtypes + self._pred_name_to_position = pred_name_to_position + + pred_prog = fluid.Program() + self._pred_prog = pred_prog + pred_init_prog = fluid.Program() + self._pred_init_prog = pred_init_prog + with fluid.program_guard(pred_prog, pred_init_prog): + pred_net_inputs = reader_helper.create_net_inputs(pred_input_attrs) + # pred_bb_output_vars = pred_backbone.build(pred_net_inputs, scope_name='__paddlepalm_') + pred_bb_output_vars = pred_backbone.build(pred_net_inputs) + self._pred_net_inputs = pred_net_inputs + + # prepare predict vars for saving inference model + with fluid.program_guard(pred_prog, pred_init_prog): + cur_inputs = helper.decode_inputs(pred_net_inputs, self.name) + # self.pred_input = cur_inputs + self._pred_input_name_list, self._pred_input_varname_list = \ + zip(*[[k, v.name] for k,v in cur_inputs.items()]) + + pred_task_inputs = {'backbone': pred_bb_output_vars, 'reader': cur_inputs} + scope = self.name + '.' + with fluid.unique_name.guard(scope): + output_vars = self._build_head(pred_task_inputs, phase='predict', scope=scope) + + if output_vars is not None: + self._pred_fetch_name_list, self._pred_fetch_list = zip(*output_vars.items()) + else: + self._pred_fetch_name_list = [] + self._pred_fetch_var_list = [] + + if not self._multi_task: + self._init_exe_prog(for_train=False) + self._exe.run(self._pred_init_prog) + + return output_vars + def build_backward(self, optimizer, weight_decay=None, use_ema=False, ema_decay=None): + """ + Build backward computation graph and training strategy. + + Arguments: + - optimizer: + - weight_decay: optional, default is None (disable weight decay). + - use_ema: optional, default is False. The flag to control whether to apply Exponential Moving Average strategy on parameter updates. + - ema_decay: optional, default is None. Only works with use_ema == True. Control decay rate of EMA strategy. + + """ # assert not self._multi_task, "you cannot build_backward in trainer when a train is wrapper by MultiHeadTrainer." # build optimizer - assert self._train_init_prog is not None, "train graph not foung! You should build_forward first." + assert self._loss_var is not None and self._train_init_prog is not None, "train graph not foung! You should build_forward first." optimizer._set_prog(self._train_prog, self._train_init_prog) with fluid.program_guard(self._train_prog, self._train_init_prog): param_grads = optimizer.build() @@ -290,21 +335,25 @@ class Trainer(object): # print("%d : %s" % (bid, var)) # print(self._train_prog) + self._exe.run(self._train_init_prog) def set_as_aux(self): """Set the task in this trainer as auxilary task. \nCAUSIOUS: This API only works on multi-task learning mode. Each task is set as target task by default. """ self._as_auxilary = True - def fit_reader(self, reader, phase='train', task_id=None): + def fit_reader(self, reader, phase='train'): """ - Bind a reader and train/predict data to this trainer. + Bind a reader and loaded train/predict data to trainer. - Arguments: - - reader: + Args: + reader: a Reader object. The running phase of the reader should be consistent with `phase` argument of this method. + phase: running phase. Currently support: train, predict. + """ # assert not self._multi_task, "you cannot fit_reader in trainer when a train is wrapper by MultiHeadTrainer." # load data + self._check_phase(phase) assert self._shape_and_dtypes is not None or self._pred_shape_and_dtypes is not None, "You need to build_forward or build_predict_head first to prepare input features." # 这里不确定是否要向上取整,需确认 @@ -316,8 +365,8 @@ class Trainer(object): self._steps_pur_epoch = reader.num_examples // batch_size shape_and_dtypes = self._shape_and_dtypes name_to_position = self._name_to_position - if task_id is not None: - self._net_inputs['__task_id'] = task_id + if self._task_id is not None: + self._net_inputs['__task_id'] = self._task_id net_inputs = self._net_inputs self._train_batch_size = batch_size self._num_examples = reader.num_examples @@ -366,60 +415,56 @@ class Trainer(object): self._pred_feed_batch_process_fn = feed_batch_process_fn # return distribute_feeder_fn() - def _init_exe_prog(self, for_train=True): - if not self._train_init and not self._predict_init: - on_gpu = gpu_dev_count > 0 - self._exe = helper.build_executor(on_gpu) - - if for_train: - assert self._train_prog is not None, "train graph not foung! You should build_forward first before you random init parameters." - self._train_init = True - else: - assert self._pred_prog is not None, "predict graph not foung! You should build_predict_head first before you random init parameters." - self._predict_init = True - - def random_init_params(self): - - if not self._train_init: - self._init_exe_prog() - - print('random init params...') - self._exe.run(self._train_init_prog) + def load_ckpt(self, model_path): + """ + load training checkpoint for further training or predicting. - def load_ckpt(self, model_path, phase='train'): + Args: + model_path: the path of saved checkpoint/parameters. + """ # load pretrain model (or ckpt) # assert self._exe is not None, "You need to random_init_params before load checkpoints." - if phase == 'train' and not self._train_init: - self._init_exe_prog(for_train=True) - self._exe.run(self._train_init_prog) - if phase == 'predict' and not self._predict_init: - self._init_exe_prog(for_train=False) - self._exe.run(self._pred_init_prog) - - if phase == 'train': - assert self._train_init_prog is not None, "train graph not found! You should build_forward first before load checkpoint." + # if phase == 'train' and not self._train_init: + # self._init_exe_prog(for_train=True) + # self._exe.run(self._train_init_prog) + # if phase == 'predict' and not self._predict_init: + # self._init_exe_prog(for_train=False) + # self._exe.run(self._pred_init_prog) + + assert self._train_init_prog is not None or self._pred_init_prog is not None, "model graph not built. You should at least build_forward or build_predict_forward to load its checkpoint." + + # if phase == 'train': + # assert self._train_init_prog is not None, "train graph not found! You should build_forward first before load checkpoint." + if self._train_init_prog is not None: saver.init_pretraining_params( self._exe, model_path, main_program=self._train_init_prog, strict=True) - elif phase == 'predict': - assert self._pred_init_prog is not None, "predict graph not found! You should build_predict_head first before load checkpoint." + # elif phase == 'predict': + elif self._pred_init_prog is not None: + # assert self._pred_init_prog is not None, "predict graph not found! You should build_predict_head first before load checkpoint." saver.init_pretraining_params( self._exe, model_path, main_program=self._pred_init_prog, strict=True) else: - raise NotImplementedError() + raise Exception("model not found. You should at least build_forward or build_predict_forward to load its checkpoint.") - def load_predict_model(self, model_path): raise NotImplementedError() def load_pretrain(self, model_path, convert=False): + """ + load pretrain models(backbone) for training. + + Args: + model_path: the path of saved pretrained parameters. + """ # load pretrain model (or ckpt) - assert self._exe is not None, "You need to random_init_params before load pretrain models." + # assert self._exe is not None, "You need to random_init_params before load pretrain models." + assert self._train_init_prog is not None, "training graph not found. You should at least build_forward to load its pretrained parameters." saver.init_pretraining_params( self._exe, @@ -428,6 +473,15 @@ class Trainer(object): main_program=self._train_init_prog) def set_saver(self, save_path, save_steps, save_type='ckpt'): + """ + create a build-in saver into trainer. A saver will automatically save checkpoint or predict model every `save_steps` training steps. + + Args: + save_path: a string. the path to save checkpoints or predict models. + save_steps: an integer. the frequency to save models. + save_type: a string. The type of saved model. Currently support checkpoint(ckpt) and predict model(predict), default is ckpt. If both two types are needed to save, you can set as "ckpt,predict". + + """ save_type = save_type.split(',') if 'predict' in save_type: @@ -453,7 +507,7 @@ class Trainer(object): def temp_func(): if (self._save_predict or self._save_ckpt) and self._cur_train_step % save_steps == 0: if self._save_predict: - self.save(save_path, suffix='pred.step'+str(self._cur_train_step)) + self._save(save_path, suffix='pred.step'+str(self._cur_train_step)) print('predict model has been saved at '+os.path.join(save_path, 'pred.step'+str(self._cur_train_step))) if self._save_ckpt: fluid.io.save_persistables(self._exe, os.path.join(save_path, 'ckpt.step'+str(self._cur_train_step)), self._train_prog) @@ -464,10 +518,12 @@ class Trainer(object): self._check_save = temp_func - def train(self, save_path=None, save_steps=None, save_type='ckpt', print_steps=5): + def train(self, print_steps=5): """ - Argument: - save_type: ckpt, predict, pretrain + start training. + + Args: + print_steps: int. Logging frequency of training message, e.g., current step, loss and speed. """ iterator = self._train_reader self._distribute_train_prog = fluid.CompiledProgram(self._train_prog).with_data_parallel(loss_name=self._loss_var.name) @@ -530,19 +586,14 @@ class Trainer(object): # print('checkpoint has been saved at '+save_path) # print("ALL tasks train finished, exiting...") - - def get_one_batch(self, phase='train'): - if phase == 'train': - return next(self._train_reader) - elif phase == 'predict': - return next(self._predict_reader) - else: - raise NotImplementedError() def predict(self, output_dir=None, print_steps=1000): """ - Argument: - save_type: ckpt, predict, pretrain + start predicting. + + Args: + output_dir: str. The path to save prediction results, default is None. If set as None, the results would output to screen directly. + print_steps: int. Logging frequency of predicting message, e.g., current progress and speed. """ iterator = self._predict_reader self._distribute_pred_prog = fluid.CompiledProgram(self._pred_prog).with_data_parallel() @@ -577,10 +628,65 @@ class Trainer(object): results = self._pred_head.epoch_postprocess({'reader':reader_outputs}, output_dir=output_dir) return results - def train_one_step(self, batch, executor=None, distribute_train_prog=None, fetch_list=None): - exe = self._exe if executor is None else executor - distribute_train_prog = self._distribute_train_prog if distribute_train_prog is None else distribute_train_prog - fetch_list = self._fetch_list if fetch_list is None else fetch_list + def _check_phase(self, phase): + assert phase in ['train', 'predict'], "Supported phase: train, predict," + + def _set_multitask(self): + self._multi_task = True + + def _set_task_id(self, task_id): + self._task_id = task_id + + def _init_exe_prog(self, for_train=True): + if not self._train_init and not self._predict_init: + on_gpu = gpu_dev_count > 0 + self._exe = helper.build_executor(on_gpu) + + if for_train: + assert self._train_prog is not None, "train graph not found! You should build_forward first before you random init parameters." + self._train_init = True + else: + assert self._pred_prog is not None, "predict graph not found! You should build_predict_head first before you random init parameters." + self._predict_init = True + + # def random_init_params(self): + # """ + # randomly initialize model parameters. + # """ + # + # if not self._train_init: + # self._init_exe_prog() + # + # print('random init params...') + # self._exe.run(self._train_init_prog) + + def get_one_batch(self, phase='train'): + self._check_phase(phase) + if phase == 'train': + return next(self._train_reader) + elif phase == 'predict': + return next(self._predict_reader) + else: + raise NotImplementedError() + + def _set_exe(self, exe): + self._exe = exe + + def _set_dist_train(self, prog): + self._distribute_train_prog = prog + + def _set_fetch_list(self, fetch_list): + self._fetch_list = fetch_list + + # def train_one_step(self, batch, executor=None, distribute_train_prog=None, fetch_list=None): + def train_one_step(self, batch): + # exe = self._exe if executor is None else executor + # distribute_train_prog = self._distribute_train_prog if distribute_train_prog is None else distribute_train_prog + # fetch_list = self._fetch_list if fetch_list is None else fetch_list + + exe = self._exe + distribute_train_prog = self._distribute_train_prog + fetch_list = self._fetch_list if gpu_dev_count > 1: feed, mask = batch @@ -599,22 +705,6 @@ class Trainer(object): self._check_save() return rt_outputs - @property - def num_epochs(self): - return self._num_epochs - - @property - def cur_train_steps(self): - return self._cur_train_step - - @property - def cur_train_epoch(self): - return self._cur_train_epoch - - @property - def steps_pur_epoch(self): - return self._steps_pur_epoch - def predict_one_batch(self, batch): if gpu_dev_count > 1: feed, mask = batch @@ -630,14 +720,47 @@ class Trainer(object): rt_outputs = {k:v for k,v in zip(self._pred_fetch_name_list, rt_outputs)} return rt_outputs + @property + def name(self): + return self._name + + @property + def num_examples(self): + return self._num_examples + + @property + def mix_ratio(self): + return self._mix_ratio + + @mix_ratio.setter + def mix_ratio(self, value): + self._mix_ratio = value + + @property + def num_epochs(self): + return self._num_epochs + + @property + def cur_train_step(self): + return self._cur_train_step + + @property + def cur_train_epoch(self): + return self._cur_train_epoch + + @property + def steps_pur_epoch(self): + return self._steps_pur_epoch + def _build_head(self, net_inputs, phase, scope=""): + self._check_phase(phase) if phase == 'train': output_vars = self._task_head.build(net_inputs, scope_name=scope) - if phase == 'pred': + if phase == 'predict': output_vars = self._pred_head.build(net_inputs, scope_name=scope) return output_vars - def save(self, save_path, suffix=None): + def _save(self, save_path, suffix=None): # dirpath = save_path.rstrip('/').rstrip('\\') + suffix if suffix is not None: dirpath = os.path.join(save_path, suffix) @@ -670,22 +793,3 @@ class Trainer(object): print(self._name+': inference model loaded from ' + infer_model_path) return pred_prog - @property - def name(self): - return self._name - - @property - def num_examples(self): - return self._num_examples - - @property - def mix_ratio(self): - if self._mix_ratio is not None: - return self._mix_ratio - else: - raise ValueError("{}: mix_ratio is None".format(self._name)) - - @mix_ratio.setter - def mix_ratio(self, value): - self._lock = True - -- GitLab