From a95d49323ed504e5a9164586f171f408954fd43a Mon Sep 17 00:00:00 2001 From: sserdoubleh Date: Thu, 14 Nov 2019 18:01:56 +0800 Subject: [PATCH] Update Dialog-PLATO: Support paddlepaddle1.6. Release PLATO w/o latent. (#3931) * Upload mode: Dialogue-BLATO. * Update README.md. * Update Dialog-PLATO: Support APIs in paddlepaddle 1.6 and more features. Release PLATO w/o latent. --- PaddleNLP/Research/Dialogue-PLATO/README.md | 61 ++- .../Dialogue-PLATO/models/generator.py | 210 --------- .../Dialogue-PLATO/{ => plato}/args.py | 2 +- .../data/data_loader.py} | 17 +- .../{ => plato/data}/dataset.py | 0 .../Dialogue-PLATO/{ => plato/data}/field.py | 56 ++- .../{ => plato/data}/sampler.py | 33 ++ .../{ => plato/data}/tokenizer.py | 268 ++++++++++- .../{ => plato}/metrics/metrics.py | 0 .../{ => plato}/metrics/metrics_tracker.py | 7 + .../Dialogue-PLATO/plato/models/__init__.py | 18 + .../Dialogue-PLATO/plato/models/generator.py | 445 ++++++++++++++++++ .../{ => plato}/models/model_base.py | 25 + .../{ => plato}/models/unified_transformer.py | 263 +++++++---- .../{ => plato}/modules/embedder.py | 6 +- .../{ => plato}/modules/feedforward.py | 6 +- .../{ => plato}/modules/functions.py | 6 +- .../plato/modules/layer_norm.py | 91 ++++ .../modules/multihead_attention.py | 6 +- .../{ => plato}/modules/parallel.py | 5 +- .../{ => plato}/modules/transformer_block.py | 12 +- .../Dialogue-PLATO/{ => plato}/trainer.py | 141 ++++-- .../Research/Dialogue-PLATO/preprocess.py | 16 +- PaddleNLP/Research/Dialogue-PLATO/run.py | 51 +- .../scripts/DSTC7_AVSD/infer.sh | 16 +- .../scripts/DSTC7_AVSD/train.sh | 4 +- .../scripts/DailyDialog/baseline_infer.sh | 35 ++ .../scripts/DailyDialog/baseline_train.sh | 49 ++ .../scripts/DailyDialog/infer.sh | 17 +- .../scripts/DailyDialog/multi_gpu_train.sh | 55 +++ .../scripts/DailyDialog/topk_infer.sh | 39 ++ .../scripts/DailyDialog/train.sh | 16 +- .../scripts/PersonaChat/infer.sh | 14 +- .../scripts/PersonaChat/train.sh | 4 +- 34 files changed, 1485 insertions(+), 509 deletions(-) delete mode 100644 PaddleNLP/Research/Dialogue-PLATO/models/generator.py rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/args.py (98%) rename PaddleNLP/Research/Dialogue-PLATO/{dataloader.py => plato/data/data_loader.py} (76%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato/data}/dataset.py (100%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato/data}/field.py (87%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato/data}/sampler.py (60%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato/data}/tokenizer.py (56%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/metrics/metrics.py (100%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/metrics/metrics_tracker.py (88%) create mode 100644 PaddleNLP/Research/Dialogue-PLATO/plato/models/__init__.py create mode 100644 PaddleNLP/Research/Dialogue-PLATO/plato/models/generator.py rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/models/model_base.py (81%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/models/unified_transformer.py (75%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/modules/embedder.py (98%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/modules/feedforward.py (98%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/modules/functions.py (90%) create mode 100644 PaddleNLP/Research/Dialogue-PLATO/plato/modules/layer_norm.py rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/modules/multihead_attention.py (99%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/modules/parallel.py (98%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/modules/transformer_block.py (94%) rename PaddleNLP/Research/Dialogue-PLATO/{ => plato}/trainer.py (66%) create mode 100644 PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_infer.sh create mode 100644 PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_train.sh create mode 100644 PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/multi_gpu_train.sh create mode 100644 PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/topk_infer.sh diff --git a/PaddleNLP/Research/Dialogue-PLATO/README.md b/PaddleNLP/Research/Dialogue-PLATO/README.md index c51a1c9f..ed9b9a85 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/README.md +++ b/PaddleNLP/Research/Dialogue-PLATO/README.md @@ -2,19 +2,25 @@ **PLATO: Pre-trained Dialogue Generation Model with Discrete Latent Variable** [paper link](http://arxiv.org/abs/1910.07931) +**\*\*\*\*\* Update \*\*\*\*\*** + +Nov. 14: Support new APIs in paddlepaddle 1.6.0 (model files in the link have been updated accordingly), multi-GPU training and decoding strategy of top-k sampling. Release our baseline model `PLATO w/o latent`. + ## Requirements ``` - python >= 3.6 -- paddlepaddle >= 1.5.2 +- paddlepaddle >= 1.6.0 - numpy - nltk - tqdm - visualdl >= 1.3.0 (optional) +- regex ``` ## Pre-trained dialogue generation model -A novel pre-training model for dialogue generation is introduced in this work, incorporated with latent discrete variables for one-to-many relationship modeling. Our model is flexible enough to support various kinds of conversations, including chit-chat, knowledge grounded dialogues, and conversational question answering. The pre-training is carried out with Reddit and Twitter corpora. You can download the uncased pre-trained model from: +A novel pre-training model for dialogue generation is introduced in this work, incorporated with latent discrete variables for one-to-many relationship modeling. Our model is flexible enough to support various kinds of conversations, including chit-chat, knowledge grounded dialogues, and conversational question answering. The pre-training is carried out with Reddit and Twitter corpora. You can download the uncased pre-trained model from: * PLATO, uncased [model](https://baidu-nlp.bj.bcebos.com/PLATO/model.tar.gz): 12-layers, 768-hidden, 12-heads, 132M parameters +* PLATO w/o latent, uncased [model](https://baidu-nlp.bj.bcebos.com/PLATO/model-baseline.tar.gz): 12-layers 768-hidden, 12-heads, 109M parameters ```bash mv /path/to/model.tar.gz . @@ -26,19 +32,19 @@ We also provide instructions to fine-tune PLATO on different conversation datase ### Data preparation Download data from the [link](https://baidu-nlp.bj.bcebos.com/PLATO/data.tar.gz). -The tar file contains three processed datasets: DailyDialog, PersonaChat and DSTC7_AVSD. +The tar file contains three processed datasets: `DailyDialog`, `PersonaChat` and `DSTC7_AVSD`. ```bash mv /path/to/data.tar.gz . tar xzf data.tar.gz ``` ### Data format -Our model supports two kinds of data formats for dialogue context: "multi" and "multi_knowledge". -* multi: multi-turn dialogue context. +Our model supports two kinds of data formats for dialogue context: `multi` and `multi_knowledge`. +* `multi`: multi-turn dialogue context. ```txt u_1 __eou__ u_2 __eou__ ... u_n \t r ``` -* multi_knowledge: multi-turn dialogue context with background knowledge. +* `multi_knowledge`: multi-turn dialogue context with background knowledges. ```txt k_1 __eou__ k_2 __eou__ ... k_m \t u_1 __eou__ u_2 __eou__ ... u_n \t r ``` @@ -46,7 +52,7 @@ k_1 __eou__ k_2 __eou__ ... k_m \t u_1 __eou__ u_2 __eou__ ... u_n \t r If you want to use this model on other datasets, you can process your data accordingly. ### Train -Fine-tuning the pre-trained model on different ${DATASET}. +Fine-tuning the pre-trained model on different `${DATASET}`. ```bash # DailyDialog / PersonaChat / DSTC7_AVSD DATASET=DailyDialog @@ -54,11 +60,24 @@ sh scripts/${DATASET}/train.sh ``` After training, you can find the output folder `outputs/${DATASET}` (by default). It contatins `best.model` (best results on validation dataset), `hparams.json` (hyper-parameters of training script) and `trainer.log` (training log). + +Fine-tuning the pre-trained model on multiple GPUs. + +Note: You need to install NCCL library and set up the environment variable `LD_LIBRARY` properly. +```bash +sh scripts/DailyDialog/multi_gpu_train.sh +``` + +You can fine-tune PLATO w/o latent on different `${DATASET}`. We provide an example script on DailyDialog dataset. +```bash +sh scripts/DailyDialog/baseline_train.sh +``` + #### Recommended settings For the fine-tuning of our pre-trained model, it usually requires about 10 epochs to reach convergence with learning rate = 1e-5 and about 2-3 epochs to reach convergence with learning rate = 5e-5. -GPU_MEM | batch_size | max_len +GPU Memory | batch size | max len ------|------|------ 16G | 6 | 256 32G | 12 | 256 @@ -69,9 +88,17 @@ Running inference on test dataset. # DailyDialog / PersonaChat / DSTC7_AVSD DATASET=DailyDialog sh scripts/${DATASET}/infer.sh + +# Running inference of PLATO w/o latent +sh scripts/DailyDialog/baseline_infer.sh ``` After inference, you can find the output foler `outputs/${DATASET}.infer` (by default). It contains `infer_0.result.json` (the inference result), `hparams.json` (hyper-parameters of inference scipt) and `trainer.log` (inference log). +If you want to use top-k sampling (beam search by default), you can follow the example script: +```bash +sh scripts/DailyDialog/topk_infer.sh +``` + ## Result ### DailyDialog @@ -79,37 +106,41 @@ Model | BLEU-1/2 | Distinct-1/2 | Fluency | Coherence | Informativeness | Overal ------|------|------|------|------|------|------- Seq2Seq | 0.336/0.268 | 0.030/0.128 | 1.85 | 0.37 | 0.44 | 0.33 iVAE_MI | 0.309/0.249 | 0.029/0.250 | 1.53 | 0.34 | 0.59 | 0.30 -Our w/o Latent | 0.405/0.322 | 0.046/0.246 | 1.91 | 1.58 | 1.03 | 1.44 -Our Method | 0.352/0.275 | 0.045/0.253 | 1.97 | 1.57 | 1.23 | 1.48 +Our w/o Latent | **0.405/0.322** | 0.046/0.246 | 1.91 | **1.58** | 1.03 | 1.44 +Our Method | 0.397/0.311 | **0.053/0.291** | **1.97** | 1.57 | **1.23** | **1.48** ### PersonaChat Model | BLEU-1/2 | Distinct-1/2 | Knowledge R/P/F1 | Fluency | Coherence | Informativeness | Overall ------|------|------|------|------|------|-------|------- Seq2Seq | 0.448/0.353 | 0.004/0.016 | 0.004/0.016/0.006 | 1.82 | 0.37 | 0.85 | 0.34 LIC | 0.405/0.320 | 0.019/0.113 | 0.042/0.154/0.064 | 1.95 | 1.34 | 1.09 | 1.29 -Our w/o Latent | 0.458/0.357 | 0.012/0.064 | 0.085/0.263/0.125 | 1.98 | 1.36 | 1.04 | 1.30 -Our Method | 0.418/0.324 | 0.014/0.081 | 0.162/0.542/0.242 | 1.99 | 1.51 | 1.70 | 1.50 +Our w/o Latent | **0.458/0.357** | 0.012/0.064 | 0.085/0.263/0.125 | 1.98 | 1.36 | 1.04 | 1.30 +Our Method | 0.406/0.315 | **0.021/0.121** | **0.142/0.461/0.211** | **1.99** | **1.51** | **1.70** | **1.50** ### DSTC7_AVSD Model | BELU-1 | BELU-2 | BLEU-3 | BLEU-4 | METEOR | ROUGH-L | CIDEr ------|------|------|------|------|------|-------|------- Baseline | 0.629 | 0.485 | 0.383 | 0.309 | 0.215 | 0.487 | 0.746 CMU | 0.718 | 0.584 | 0.478 | 0.394 | 0.267 | 0.563 | 1.094 -Our Method | 0.784 | 0.637 | 0.525 | 0.435 | 0.286 | 0.596 | 1.209 +Our Method | **0.784** | **0.637** | **0.525** | **0.435** | **0.286** | **0.596** | **1.209** Our Method Upper Bound | 0.925 | 0.843 | 0.767 | 0.689 | 0.361 | 0.731 | 1.716 -Note: In the experiments on DSTC_AVSD, the response selection of our method is strengthened with an extra ranking step, which ranks the candidates according to the automatic scores and selects the top one as the final answer. +Note: In the experiments on `DSTC7_AVSD`, the response selection of our method is strengthened with an extra ranking step, which ranks the candidates according to the automatic scores and selects the top one as the final answer. ## Citation If you find PLATO useful in your work, please cite the following Arxiv paper: ``` @article{bao2019plato, title={PLATO: Pre-trained Dialogue Generation Model with Discrete Latent Variable}, - author={Bao, Siqi and He, Huang, Wang, Fan and Wu, Hua}, + author={Bao, Siqi and He, Huang and Wang, Fan and Wu, Hua and Wang, Haifeng}, journal={arXiv preprint arXiv:1910.07931}, year={2019} } ``` + +## Disclaimer +This project aims to facilitate further research progress in dialogue generation. Baidu is not responsible for the 3rd party's generation with the pre-trained system. + ## Contact information For help or issues using PLATO, please submit a GitHub issue. diff --git a/PaddleNLP/Research/Dialogue-PLATO/models/generator.py b/PaddleNLP/Research/Dialogue-PLATO/models/generator.py deleted file mode 100644 index 89c4b518..00000000 --- a/PaddleNLP/Research/Dialogue-PLATO/models/generator.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Generator class. -""" - -import numpy as np -import paddle.fluid as fluid -import paddle.fluid.layers as layers -from paddle.fluid.framework import Variable - -from args import str2bool -import modules.functions as F - - -def repeat(var, times): - if isinstance(var, list): - return [repeat(x, times) for x in var] - elif isinstance(var, dict): - return {k: repeat(v, times) for k, v in var.items()} - elif isinstance(var, Variable): - var = F.unsqueeze(var, [1]) - expand_times = [1] * len(var.shape) - expand_times[1] = times - dtype = var.dtype - var = layers.cast(var, "float32") - var = layers.expand(var, expand_times) - shape = [var.shape[0] * var.shape[1]] + var.shape[2:] - var = layers.reshape(var, shape) - var = layers.cast(var, dtype) - return var - else: - return var - - -def gather(var, idx): - if isinstance(var, list): - return [gather(x, idx) for x in var] - elif isinstance(var, dict): - return {k: gather(v, idx) for k, v in var.items()} - elif isinstance(var, Variable): - out = layers.gather(var, idx) - return out - else: - return var - - -class BeamSearch(object): - - @classmethod - def add_cmdline_argument(cls, parser): - group = parser.add_argument_group("Generator") - group.add_argument("--beam_size", type=int, default=5, - help="The beam size in beam search.") - group.add_argument("--min_gen_len", type=int, default=1, - help="The minimum length of generated response.") - group.add_argument("--max_gen_len", type=int, default=30, - help="The maximum length of generated response.") - group.add_argument("--length_average", type=str2bool, default=False, - help="Whether to use length average.") - group.add_argument("--ignore_unk", type=str2bool, default=True, - help="Whether to ignore unkown token in generation.") - return group - - def __init__(self, bpe, hparams): - self.vocab_size = bpe.vocab_size - self.bos_id = bpe.bos_id - self.eos_id = bpe.eos_id - self.unk_id = bpe.unk_id - self.pad_id = bpe.pad_id - self.beam_size = hparams.beam_size - self.min_gen_len = hparams.min_gen_len - assert self.min_gen_len >= 1 - self.max_gen_len = hparams.max_gen_len - self.length_average = hparams.length_average - self.ignore_unk = hparams.ignore_unk - return - - def __call__(self, step_fn, state): - """ - Running beam search. - - @param : step_fn : decoding one step - @type : function - - @param : state : initial state - @type : dict - """ - batch_size = state["batch_size"] - beam_size = self.beam_size - - # shape: [batch_size, 1] - pos_index = layers.range(0, batch_size, 1, dtype="int64") - pos_index = layers.scale(pos_index, beam_size) - pos_index = F.unsqueeze(pos_index, [1]) - - # shape: [batch_size, beam_size, 1] - predictions = layers.fill_constant(shape=[batch_size, beam_size, 1], - dtype="int64", - value=self.bos_id) - - # initial input - state["pred_token"] = predictions[:, :1] - # shape: [batch_size, vocab_size] - scores, state = step_fn(state) - - unk_penalty = np.zeros(self.vocab_size, dtype="float32") - unk_penalty[self.unk_id] = -1e10 - unk_penalty = layers.assign(unk_penalty) - - eos_penalty = np.zeros(self.vocab_size, dtype="float32") - eos_penalty[self.eos_id] = -1e10 - eos_penalty = layers.assign(eos_penalty) - - scores_after_end = np.full(self.vocab_size, -1e10, dtype="float32") - scores_after_end[self.pad_id] = 0 - scores_after_end = layers.assign(scores_after_end) - - if self.ignore_unk: - scores = scores + unk_penalty - scores = scores + eos_penalty - - # shape: [batch_size, beam_size] - sequence_scores, preds = layers.topk(scores, self.beam_size) - - predictions = layers.concat([predictions, F.unsqueeze(preds, [2])], axis=2) - state = repeat(state, beam_size) - - parent_idx_list = [] - pred_list = [] - - for step in range(2, self.max_gen_len + 1): - pre_ids = predictions[:, :, -1:] - state["pred_token"] = layers.reshape(pre_ids, shape=[batch_size * beam_size, 1, 1]) - state["pred_mask"] = 1 - F.equal(state["pred_token"], self.pad_id) - state["pred_pos"] = state["pred_pos"] + 1 - scores, state = step_fn(state) - - # Generate next - # scores shape: [batch_size, beam_size, vocab_size] - if self.ignore_unk: - scores = scores + unk_penalty - - if step <= self.min_gen_len: - scores = scores + eos_penalty - - scores = layers.reshape(scores, shape=[batch_size, beam_size, self.vocab_size]) - - # previous token is [PAD] or [EOS] - pre_eos_mask = F.equal(pre_ids, self.eos_id) + F.equal(pre_ids, self.pad_id) - scores = scores * (1 - pre_eos_mask) + \ - layers.expand(pre_eos_mask, [1, 1, self.vocab_size]) * scores_after_end - - node_scores, node_preds = layers.topk(scores, beam_size) - - if self.length_average: - sequence_scores = layers.scale(sequence_scores, (step - 1.0) / step) - scores = layers.scale(scores, 1.0 / step) - scores = layers.elementwise_add(scores, sequence_scores, axis=0) - else: - scores = layers.elementwise_add(scores, sequence_scores, axis=0) - - scores = layers.reshape(scores, shape=[batch_size, beam_size * self.vocab_size]) - - topk_scores, topk_indices = layers.topk(scores, self.beam_size) - vocab_size = layers.fill_constant(shape=[1], dtype="int64", value=self.vocab_size) - parent_idx = layers.elementwise_floordiv(topk_indices, vocab_size) - preds = layers.elementwise_mod(topk_indices, vocab_size) - - # Gather state / sequence_scores - parent_idx = layers.elementwise_add(parent_idx, pos_index, axis=0) - parent_idx = layers.reshape(parent_idx, [batch_size * beam_size]) - state = gather(state, parent_idx) - sequence_scores = topk_scores - - predictions = layers.reshape(predictions, shape=[batch_size * beam_size, step]) - predictions = gather(predictions, parent_idx) - predictions = layers.reshape(predictions, shape=[batch_size, beam_size, step]) - predictions = layers.concat([predictions, F.unsqueeze(preds, [2])], axis=2) - - pre_ids = predictions[:, :, -1] - pre_eos_mask = F.equal(pre_ids, self.eos_id) + F.equal(pre_ids, self.pad_id) - sequence_scores = sequence_scores * pre_eos_mask + layers.scale(1 - pre_eos_mask, -1e10) - - _, indices = layers.argsort(sequence_scores, axis=1) - indices = indices + pos_index - indices = layers.reshape(indices, [-1]) - sequence_scores = layers.reshape(sequence_scores, [batch_size * beam_size]) - predictions = layers.reshape(predictions, [batch_size * beam_size, -1]) - sequence_scores = gather(sequence_scores, indices) - predictions = layers.gather(predictions, indices) - sequence_scores = layers.reshape(sequence_scores, [batch_size, beam_size]) - predictions = layers.reshape(predictions, [batch_size, beam_size, -1]) - - results = { - "preds": predictions[:, -1], - "scores": sequence_scores[:, -1] - } - return results diff --git a/PaddleNLP/Research/Dialogue-PLATO/args.py b/PaddleNLP/Research/Dialogue-PLATO/plato/args.py similarity index 98% rename from PaddleNLP/Research/Dialogue-PLATO/args.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/args.py index d73dba18..f5fc9e58 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/args.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/args.py @@ -56,7 +56,7 @@ class HParams(dict): params_dict = json.load(fp) for k, v in params_dict.items(): if isinstance(v, dict): - self[k] = HParams(v) + self[k].update(HParams(v)) else: self[k] = v diff --git a/PaddleNLP/Research/Dialogue-PLATO/dataloader.py b/PaddleNLP/Research/Dialogue-PLATO/plato/data/data_loader.py similarity index 76% rename from PaddleNLP/Research/Dialogue-PLATO/dataloader.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/data/data_loader.py index 4ff39079..8cd9e20a 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/dataloader.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/data/data_loader.py @@ -20,9 +20,11 @@ import math import paddle.fluid as fluid import paddle.batch -from args import str2bool -from sampler import RandomSampler -from sampler import SequentialSampler +from plato.args import str2bool +from plato.data.sampler import RandomSampler +from plato.data.sampler import SequentialSampler +from plato.data.sampler import SortedSampler +import plato.modules.parallel as parallel class DataLoader(object): @@ -31,11 +33,13 @@ class DataLoader(object): @classmethod def add_cmdline_argument(cls, group): group.add_argument("--shuffle", type=str2bool, default=True) + group.add_argument("--sort_pool_size", type=int, default=0) return group - def __init__(self, dataset, hparams, collate_fn=None, sampler=None, is_test=False): + def __init__(self, dataset, hparams, collate_fn=None, sampler=None, is_test=False, is_train=False): self.dataset = dataset self.collate_fn = collate_fn + self.sort_pool_size = hparams.sort_pool_size if sampler is None: if hparams.shuffle and not is_test: @@ -43,6 +47,9 @@ class DataLoader(object): else: sampler = SequentialSampler(dataset) + if self.sort_pool_size > 0 and not is_test: + sampler = SortedSampler(sampler, self.sort_pool_size) + def reader(): for idx in sampler: yield idx @@ -50,7 +57,7 @@ class DataLoader(object): self.reader = paddle.batch(reader, batch_size=hparams.batch_size, drop_last=False) self.num_batches = math.ceil(len(dataset) / hparams.batch_size) - if hparams.use_data_distributed: + if hparams.use_data_distributed and parallel.Env().nranks > 1 and is_train: self.reader = fluid.contrib.reader.distributed_batch_reader(self.reader) self.num_batches = self.num_batches // fluid.dygraph.parallel.Env().nranks diff --git a/PaddleNLP/Research/Dialogue-PLATO/dataset.py b/PaddleNLP/Research/Dialogue-PLATO/plato/data/dataset.py similarity index 100% rename from PaddleNLP/Research/Dialogue-PLATO/dataset.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/data/dataset.py diff --git a/PaddleNLP/Research/Dialogue-PLATO/field.py b/PaddleNLP/Research/Dialogue-PLATO/plato/data/field.py similarity index 87% rename from PaddleNLP/Research/Dialogue-PLATO/field.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/data/field.py index e0355991..a5ca7312 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/field.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/data/field.py @@ -22,8 +22,8 @@ import pickle import time from tqdm import tqdm -from tokenizer import Tokenizer -from args import str2bool +from plato.args import str2bool +from plato.data.tokenizer import Tokenizer def max_lens(X): @@ -77,21 +77,26 @@ class BPETextField(object): group.add_argument("--max_knowledge_num", type=int, default=16, help="The maximum number of knowledges.") group.add_argument("--max_knowledge_len", type=int, default=16, - help="The maximum length of each knowledges") + help="The maximum length of each knowledges.") + group.add_argument("--tokenizer_type", type=str, default="Bert", + choices=["Bert", "GPT2"], + help="The type of tokenizer.") return group - def __init__(self, hparam): + def __init__(self, hparams): special_tokens = [self.pad_token, self.bos_token, self.eos_token, self.unk_token] - self.tokenizer = Tokenizer(vocab_path=hparam.vocab_path, special_tokens=special_tokens) - - self.filtered = hparam.filtered - self.max_len = hparam.max_len - self.min_utt_len = hparam.min_utt_len - self.max_utt_len = hparam.max_utt_len - self.min_ctx_turn = hparam.min_ctx_turn - self.max_ctx_turn = hparam.max_ctx_turn - 1 # subtract reply turn - self.max_knowledge_num = hparam.max_knowledge_num - self.max_knowledge_len = hparam.max_knowledge_len + self.tokenizer = Tokenizer(vocab_path=hparams.vocab_path, + special_tokens=special_tokens, + tokenizer_type=hparams.tokenizer_type) + + self.filtered = hparams.filtered + self.max_len = hparams.max_len + self.min_utt_len = hparams.min_utt_len + self.max_utt_len = hparams.max_utt_len + self.min_ctx_turn = hparams.min_ctx_turn + self.max_ctx_turn = hparams.max_ctx_turn - 1 # subtract reply turn + self.max_knowledge_num = hparams.max_knowledge_num + self.max_knowledge_len = hparams.max_knowledge_len return @property @@ -187,6 +192,27 @@ class BPETextField(object): return self.min_ctx_turn <= len(utts) \ and (not self.filtered or len(utts) <= self.max_ctx_turn) + def build_example_multi_turn(self, req): + examples = [] + src = [self.tokenizer.tokenize(s) for s in req["context"]] + src = [s[-self.max_utt_len:] for s in src[-self.max_ctx_turn:]] + src = [self.numericalize(s) + [self.eos_id] for s in src] + ex = {"src": src} + examples.append(ex) + return examples + + def build_example_multi_turn_with_knowledge(self, req): + examples = [] + src = [self.tokenizer.tokenize(s) for s in req["context"]] + src = [s[-self.max_utt_len:] for s in src[-self.max_ctx_turn:]] + src = [self.numericalize(s) + [self.eos_id] for s in src] + knowledge = [self.tokenizer.tokenize(k) for k in req["knowledge"]] + knowledge = [k[:self.max_knowledge_len] for k in knowledge] + knowledge = [self.numericalize(k) + [self.eos_id] for k in knowledge] + ex = {"src": src, "knowledge": knowledge} + examples.append(ex) + return examples + def build_examples_multi_turn(self, data_file, data_type="train"): print(f"Reading examples from '{data_file}' ...") examples = [] @@ -212,7 +238,7 @@ class BPETextField(object): print(f"Built {len(examples)} {data_type.upper()} examples ({ignored} filtered)") return examples - def build_examples_multi_turn_with_knoledge(self, data_file, data_type="train"): + def build_examples_multi_turn_with_knowledge(self, data_file, data_type="train"): print(f"Reading examples from '{data_file}' ...") examples = [] ignored = 0 diff --git a/PaddleNLP/Research/Dialogue-PLATO/sampler.py b/PaddleNLP/Research/Dialogue-PLATO/plato/data/sampler.py similarity index 60% rename from PaddleNLP/Research/Dialogue-PLATO/sampler.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/data/sampler.py index c5dd6969..f807ed10 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/sampler.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/data/sampler.py @@ -47,10 +47,43 @@ class RandomSampler(Sampler): def __init__(self, dataset): self.dataset = dataset + self.epoch = 0 return def __len__(self): return len(self.dataset) def __iter__(self): + np.random.seed(self.epoch) + self.epoch += 1 return iter(np.random.permutation(len(self))) + + +class SortedSampler(Sampler): + """ Sorted Sampler. + + Sort each block of examples by key. + """ + + def __init__(self, sampler, sort_pool_size, key="src"): + self.sampler = sampler + self.sort_pool_size = sort_pool_size + self.key = lambda idx: len(self.sampler.dataset[idx][key]) + return + + def __len__(self): + return len(self.sampler) + + def __iter__(self): + pool = [] + for idx in self.sampler: + pool.append(idx) + if len(pool) == self.sort_pool_size: + pool = sorted(pool, key=self.key) + for i in pool: + yield i + pool = [] + if len(pool) > 0: + pool = sorted(pool, key=self.key) + for i in pool: + yield i diff --git a/PaddleNLP/Research/Dialogue-PLATO/tokenizer.py b/PaddleNLP/Research/Dialogue-PLATO/plato/data/tokenizer.py similarity index 56% rename from PaddleNLP/Research/Dialogue-PLATO/tokenizer.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/data/tokenizer.py index 3cca4261..7c523eb1 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/tokenizer.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/data/tokenizer.py @@ -18,8 +18,11 @@ Tokenizer class. from __future__ import absolute_import, division, print_function, unicode_literals import collections +import json import logging import os +import regex as re +import sys import unicodedata @@ -41,40 +44,71 @@ def clean_string(string): class Tokenizer(object): - def __init__(self, vocab_path, special_tokens=[]): - self.spec_convert_dict = {"[BOS]": "[unused0]", "[EOS]": "[unused1]"} - self.spec_revert_dict = {v: k for k, - v in self.spec_convert_dict.items()} - special_tokens = [self.spec_convert_dict.get(tok, tok) - for tok in special_tokens] - self.special_tokens = ("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]") - self.special_tokens += tuple(x for x in special_tokens if x not in self.special_tokens) - - self._tokenizer = BertTokenizer(vocab_path, never_split=self.special_tokens) - for tok in self.special_tokens: - assert tok in self._tokenizer.vocab, f"special token '{tok}' is not in the vocabulary" - - self.vocab_size = len(self._tokenizer.vocab) + def __init__(self, vocab_path, special_tokens=[], tokenizer_type="Bert"): + self.tokenizer_type = tokenizer_type + if tokenizer_type == "Bert": + self.spec_convert_dict = {"[BOS]": "[unused0]", "[EOS]": "[unused1]"} + self.spec_revert_dict = {v: k for k, + v in self.spec_convert_dict.items()} + special_tokens = [self.spec_convert_dict.get(tok, tok) + for tok in special_tokens] + self.special_tokens = ("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]") + self.special_tokens += tuple(x for x in special_tokens if x not in self.special_tokens) + + self._tokenizer = BertTokenizer(vocab_path, never_split=self.special_tokens) + for tok in self.special_tokens: + assert tok in self._tokenizer.vocab, f"special token '{tok}' is not in the vocabulary" + self.vocab_size = len(self._tokenizer.vocab) + elif tokenizer_type == "GPT2": + self.spec_convert_dict = {"[UNK]": ""} + self.spec_revert_dict = {v: k for k, + v in self.spec_convert_dict.items()} + special_tokens = [tok for tok in special_tokens + if tok not in self.spec_convert_dict] + vocab_file = os.path.join(vocab_path, "vocab.json") + merges_file = os.path.join(vocab_path, "merges.txt") + self._tokenizer = GPT2Tokenizer(vocab_file, merges_file, special_tokens=special_tokens) + self.num_specials = len(special_tokens) + self.vocab_size = len(self._tokenizer) + else: + raise ValueError def tokenize(self, text): return self._tokenizer.tokenize(text) def convert_tokens_to_ids(self, tokens): - tokens = [self.spec_convert_dict.get(tok, tok) for tok in tokens] - ids = self._tokenizer.convert_tokens_to_ids(tokens) - return ids + if self.tokenizer_type == "Bert": + tokens = [self.spec_convert_dict.get(tok, tok) for tok in tokens] + ids = self._tokenizer.convert_tokens_to_ids(tokens) + return ids + else: + tokens = [self.spec_convert_dict.get(tok, tok) for tok in tokens] + ids = self._tokenizer.convert_tokens_to_ids(tokens) + ids = [(i + self.num_specials) % self.vocab_size for i in ids] + return ids def convert_ids_to_tokens(self, ids): - tokens = self._tokenizer.convert_ids_to_tokens(ids) - tokens = [self.spec_revert_dict.get(tok, tok) for tok in tokens] - return tokens + if self.tokenizer_type == "Bert": + tokens = self._tokenizer.convert_ids_to_tokens(ids) + tokens = [self.spec_revert_dict.get(tok, tok) for tok in tokens] + return tokens + else: + ids = [(i - self.num_specials) % self.vocab_size for i in ids] + tokens = self._tokenizer.convert_ids_to_tokens(ids) + tokens = [self.spec_revert_dict.get(tok, tok) for tok in tokens] + return tokens def decode(self, ids, ignore_tokens=[]): tokens = self.convert_ids_to_tokens(ids) if len(ignore_tokens) > 0: ignore_tokens = set(ignore_tokens) tokens = [tok for tok in tokens if tok not in ignore_tokens] - string = " ".join(tokens).replace(" ##", "") + if self.tokenizer_type == "Bert": + string = " ".join(tokens).replace(" ##", "") + else: + string = "".join(tokens) + string = bytearray([self._tokenizer.byte_decoder[c] + for c in string]).decode("utf-8") string = clean_string(string) return string @@ -400,3 +434,195 @@ def _is_punctuation(char): if cat.startswith("P"): return True return False + +# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for OpenAI GPT.""" + + +try: + from functools import lru_cache +except ImportError: + # Just a dummy decorator to get the checks to run on python2 + # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now. + def lru_cache(): + return lambda func: func + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + _chr = unichr if sys.version_info[0] == 2 else chr + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [_chr(n) for n in cs] + return dict(zip(bs, cs)) + +def get_pairs(word): + """Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + +class GPT2Tokenizer(object): + """ + GPT-2 BPE tokenizer. Peculiarities: + - Byte-level BPE + """ + + def __init__(self, vocab_file, merges_file, errors='replace', special_tokens=None, max_len=None): + self.max_len = max_len if max_len is not None else int(1e12) + self.encoder = json.load(open(vocab_file)) + self.decoder = {v:k for k,v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v:k for k, v in self.byte_encoder.items()} + bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_data] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + + # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + self.special_tokens = {} + self.special_tokens_decoder = {} + self.set_special_tokens(special_tokens) + + def __len__(self): + return len(self.encoder) + len(self.special_tokens) + + def set_special_tokens(self, special_tokens): + """ Add a list of additional tokens to the encoder. + The additional tokens are indexed starting from the last index of the + current vocabulary in the order of the `special_tokens` list. + """ + if not special_tokens: + self.special_tokens = {} + self.special_tokens_decoder = {} + return + self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens)) + self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()} + logger.info("Special tokens {}".format(self.special_tokens)) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def tokenize(self, text): + """ Tokenize a string. """ + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[ord(b)] for b in token if ord(b) in self.byte_encoder) + if token == '': + continue + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def convert_tokens_to_ids(self, tokens): + """ Converts a sequence of tokens into ids using the vocab. """ + ids = [] + if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)): + if tokens in self.special_tokens: + return self.special_tokens[tokens] + else: + return self.encoder.get(tokens, 0) + for token in tokens: + if token in self.special_tokens: + ids.append(self.special_tokens[token]) + else: + ids.append(self.encoder.get(token, 0)) + if len(ids) > self.max_len: + logger.warning( + "Token indices sequence length is longer than the specified maximum " + " sequence length for this OpenAI GPT model ({} > {}). Running this" + " sequence through the model will result in indexing errors".format(len(ids), self.max_len) + ) + return ids + + def convert_ids_to_tokens(self, ids, skip_special_tokens=False): + """Converts a sequence of ids in BPE tokens using the vocab.""" + tokens = [] + for i in ids: + if i in self.special_tokens_decoder: + if not skip_special_tokens: + tokens.append(self.special_tokens_decoder[i]) + else: + tokens.append(self.decoder[i]) + return tokens + + def encode(self, text): + return self.convert_tokens_to_ids(self.tokenize(text)) + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) + return text diff --git a/PaddleNLP/Research/Dialogue-PLATO/metrics/metrics.py b/PaddleNLP/Research/Dialogue-PLATO/plato/metrics/metrics.py similarity index 100% rename from PaddleNLP/Research/Dialogue-PLATO/metrics/metrics.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/metrics/metrics.py diff --git a/PaddleNLP/Research/Dialogue-PLATO/metrics/metrics_tracker.py b/PaddleNLP/Research/Dialogue-PLATO/plato/metrics/metrics_tracker.py similarity index 88% rename from PaddleNLP/Research/Dialogue-PLATO/metrics/metrics_tracker.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/metrics/metrics_tracker.py index 2c47249e..eb621a46 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/metrics/metrics_tracker.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/metrics/metrics_tracker.py @@ -16,6 +16,7 @@ MetricsTracker class """ from collections import defaultdict +import math class MetricsTracker(object): @@ -66,6 +67,9 @@ class MetricsTracker(object): for key, val in self.metrics_val.items(): metric_str = f"{key.upper()}-{val:.3f}" metric_strs.append(metric_str) + if "token_nll" in self.metrics_val: + metric_str = f"TOKEN_PPL-{math.exp(self.metrics_val['token_nll']):.3f}" + metric_strs.append(metric_str) metric_strs = " ".join(metric_strs) return metric_strs @@ -74,5 +78,8 @@ class MetricsTracker(object): for key, val in self.metrics_avg.items(): metric_str = f"{key.upper()}-{val:.3f}" metric_strs.append(metric_str) + if "token_nll" in self.metrics_avg: + metric_str = f"TOKEN_PPL-{math.exp(self.metrics_avg['token_nll']):.3f}" + metric_strs.append(metric_str) metric_strs = " ".join(metric_strs) return metric_strs diff --git a/PaddleNLP/Research/Dialogue-PLATO/plato/models/__init__.py b/PaddleNLP/Research/Dialogue-PLATO/plato/models/__init__.py new file mode 100644 index 00000000..a3e7df80 --- /dev/null +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/models/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Loading models. +""" + +import plato.models.unified_transformer diff --git a/PaddleNLP/Research/Dialogue-PLATO/plato/models/generator.py b/PaddleNLP/Research/Dialogue-PLATO/plato/models/generator.py new file mode 100644 index 00000000..f28df424 --- /dev/null +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/models/generator.py @@ -0,0 +1,445 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Generator class. +""" + +import bisect +import math +import sys + +import numpy as np +import paddle.fluid as fluid +import paddle.fluid.layers as layers +from paddle.fluid.framework import Variable + +from plato.args import str2bool +import plato.modules.functions as F + + +def repeat(var, times): + if isinstance(var, list): + return [repeat(x, times) for x in var] + elif isinstance(var, dict): + return {k: repeat(v, times) for k, v in var.items()} + elif isinstance(var, Variable): + var = F.unsqueeze(var, [1]) + expand_times = [1] * len(var.shape) + expand_times[1] = times + dtype = var.dtype + var = layers.cast(var, "float32") + var = layers.expand(var, expand_times) + shape = [var.shape[0] * var.shape[1]] + var.shape[2:] + var = layers.reshape(var, shape) + var = layers.cast(var, dtype) + return var + else: + return var + + +def gather(var, idx): + if isinstance(var, list): + return [gather(x, idx) for x in var] + elif isinstance(var, dict): + return {k: gather(v, idx) for k, v in var.items()} + elif isinstance(var, Variable): + out = layers.gather(var, idx) + return out + else: + return var + + +class Generator(object): + """ Genrator class. """ + + _registry = dict() + + @classmethod + def register(cls, name): + Generator._registry[name] = cls + return + + @staticmethod + def by_name(name): + return Generator._registry[name] + + @staticmethod + def create(hparams, *args, **kwargs): + """ Create generator. """ + generator_cls = Generator.by_name(hparams.generator) + return generator_cls(hparams, *args, **kwargs) + + @classmethod + def add_cmdline_argument(cls, parser): + group = parser.add_argument_group("Generator") + group.add_argument("--generator", type=str, default="BeamSearch", + choices=["TopKSampling", "TopPSampling", "GreedySampling", + "BeamSearch"]) + group.add_argument("--min_gen_len", type=int, default=1, + help="The minimum length of generated response.") + group.add_argument("--max_gen_len", type=int, default=30, + help="The maximum length of generated response.") + args, _ = parser.parse_known_args() + generator_cls = cls.by_name(args.generator) + generator_cls.add_cmdline_argument(group) + return group + + def __init__(self, hparams, bpe): + self.vocab_size = bpe.vocab_size + self.bos_id = bpe.bos_id + self.eos_id = bpe.eos_id + self.unk_id = bpe.unk_id + self.pad_id = bpe.pad_id + self.min_gen_len = hparams.min_gen_len + self.max_gen_len = hparams.max_gen_len + assert 1 <= self.min_gen_len <= self.max_gen_len + return + + def __call__(self, step_fn, state): + """ + Running generation. + + @param : step_fn : decoding one step + @type : function + + @param : state : initial state + @type : dict + """ + raise NotImplementedError + + +class Sampling(Generator): + """ Sampling Generator. """ + + @classmethod + def add_cmdline_argument(cls, group): + group.add_argument("--ignore_unk", type=str2bool, default=True, + help="Whether to ignore unkown token in generation.") + group.add_argument("--sampling_temperature", type=float, default=1.0) + return group + + def __init__(self, hparams, bpe): + super().__init__(hparams, bpe) + self.ignore_unk = hparams.ignore_unk + self.temperature = hparams.sampling_temperature + return + + def _sampling(self, scores): + """ Sampling function. """ + raise NotImplementedError + + def __call__(self, step_fn, state): + """ + Running generation. + + @param : step_fn : decoding one step + @type : function + + @param : state : initial state + @type : dict + """ + batch_size = state["batch_size"] + vocab_size = self.vocab_size + + pos_index = layers.range(0, batch_size, 1, dtype="int64") + pos_index = layers.scale(pos_index, vocab_size) + + # shape: [batch_size, beam_size, 1] + predictions = layers.fill_constant(shape=[batch_size, 1], + dtype="int64", + value=self.bos_id) + sequence_scores = layers.fill_constant(shape=[batch_size], + dtype="float32", + value=0.0) + + unk_penalty = np.zeros(vocab_size, dtype="float32") + unk_penalty[self.unk_id] = -1e10 + unk_penalty = layers.assign(unk_penalty) + + eos_penalty = np.zeros(vocab_size, dtype="float32") + eos_penalty[self.eos_id] = -1e10 + eos_penalty = layers.assign(eos_penalty) + + scores_after_end = np.full(vocab_size, -1e10, dtype="float32") + scores_after_end[self.pad_id] = 0 + scores_after_end = layers.assign(scores_after_end) + + # initial input + for step in range(1, self.max_gen_len + 1): + pre_ids = predictions[:, -1:] + state["pred_token"] = F.unsqueeze(pre_ids, [2]) + if step > 1: + state["pred_mask"] = 1 - F.equal(state["pred_token"], self.pad_id) + state["pred_pos"] = state["pred_pos"] + 1 + scores, state = step_fn(state) + + # Generate next + # scores shape: [batch_size, vocab_size] + if self.ignore_unk: + scores = scores + unk_penalty + + if step <= self.min_gen_len: + scores = scores + eos_penalty + + # previous token is [PAD] or [EOS] + # shape: [batch_size, 1] + pre_eos_mask = F.equal(pre_ids, self.eos_id) + F.equal(pre_ids, self.pad_id) + scores = scores * (1 - pre_eos_mask) + \ + layers.expand(pre_eos_mask, [1, vocab_size]) * scores_after_end + + scores = scores / self.temperature + preds = self._sampling(scores) + + predictions = layers.concat([predictions, F.unsqueeze(preds, [1])], axis=1) + + scores = layers.reshape(scores, [batch_size * vocab_size]) + preds = preds + pos_index + scores = gather(scores, preds) + sequence_scores = sequence_scores + scores + + results = { + "preds": predictions, + "scores": sequence_scores + } + return results + + +class GreedySampling(Sampling): + """ Greedy sampling. """ + + @classmethod + def add_cmdline_argument(cls, group): + return Sampling.add_cmdline_argument(group) + + def _sampling(self, logits): + """ Implement greedy sampling. """ + preds = layers.argmax(logits, axis=1) + return preds + + +class TopKSampling(Sampling): + """ Top-k sampling. """ + + @classmethod + def add_cmdline_argument(cls, group): + Sampling.add_cmdline_argument(group) + group.add_argument("--top_k_ratio", type=float, default=None) + group.add_argument("--top_k_num", type=int, default=None) + return group + + def __init__(self, hparams, bpe): + super().__init__(hparams, bpe) + assert hparams.top_k_ratio is not None or hparams.top_k_num is not None + if hparams.top_k_num is not None: + self.top_k_num = hparams.top_k_num + else: + self.top_k_num = math.floor(hparams.top_k_ratio * self.vocab_size) + assert self.top_k_num >= 1 + return + + def _sampling(self, logits): + """ Implement top-k sampling. """ + probs = layers.softmax(logits, axis=1) + probs, indices = layers.topk(probs, self.top_k_num) + probs = probs / layers.reduce_sum(probs, dim=1, keep_dim=True) + preds = [] + for p, ids in zip(probs.numpy(), indices.numpy()): + o = np.random.choice(ids, p=p) + preds.append(o) + preds = np.array(preds, dtype="int64") + return fluid.dygraph.to_variable(preds) + + +class TopPSampling(Sampling): + """ Top-p sampling. """ + + @classmethod + def add_cmdline_argument(cls, group): + Sampling.add_cmdline_argument(group) + group.add_argument("--top_p_ratio", type=float, default=1.0) + return group + + def __init__(self, hparams, bpe): + super().__init__(hparams, bpe) + self.top_p_ratio = hparams.top_p_ratio + return + + def _sampling(self, logits): + """ Implement top-k sampling. """ + probs = layers.softmax(logits, axis=1) + preds = [] + for p in probs.numpy(): + ids = np.argsort(-p) + p = p[ids] + c_p = np.cumsum(p) + i = bisect.bisect_right(c_p, self.top_p_ratio) + 1 + o = np.random.choice(ids[:i], p=p[:i]/np.sum(p[:i])) + preds.append(o) + preds = np.array(preds, dtype="int64") + return fluid.dygraph.to_variable(preds) + + +class BeamSearch(Generator): + """ BeamSearch generator. """ + + @classmethod + def add_cmdline_argument(cls, group): + group.add_argument("--beam_size", type=int, default=5, + help="The beam size in beam search.") + group.add_argument("--length_average", type=str2bool, default=False, + help="Whether to use length average.") + group.add_argument("--length_penalty", type=float, default=-1.0, + help="The parameter(alpha) of length penalty.") + group.add_argument("--ignore_unk", type=str2bool, default=True, + help="Whether to ignore unkown token in generation.") + return group + + def __init__(self, hparams, bpe): + super().__init__(hparams, bpe) + self.beam_size = hparams.beam_size + self.length_average = hparams.length_average + self.length_penalty = hparams.length_penalty + self.ignore_unk = hparams.ignore_unk + return + + def __call__(self, step_fn, state): + """ + Running beam search. + + @param : step_fn : decoding one step + @type : function + + @param : state : initial state + @type : dict + """ + batch_size = state["batch_size"] + beam_size = self.beam_size + + # shape: [batch_size, 1] + pos_index = layers.range(0, batch_size, 1, dtype="int64") + pos_index = layers.scale(pos_index, beam_size) + pos_index = F.unsqueeze(pos_index, [1]) + + # shape: [batch_size, beam_size, 1] + predictions = layers.fill_constant(shape=[batch_size, beam_size, 1], + dtype="int64", + value=self.bos_id) + + # initial input + state["pred_token"] = predictions[:, :1] + # shape: [batch_size, vocab_size] + scores, state = step_fn(state) + + unk_penalty = np.zeros(self.vocab_size, dtype="float32") + unk_penalty[self.unk_id] = -1e10 + unk_penalty = layers.assign(unk_penalty) + + eos_penalty = np.zeros(self.vocab_size, dtype="float32") + eos_penalty[self.eos_id] = -1e10 + eos_penalty = layers.assign(eos_penalty) + + scores_after_end = np.full(self.vocab_size, -1e10, dtype="float32") + scores_after_end[self.pad_id] = 0 + scores_after_end = layers.assign(scores_after_end) + + if self.ignore_unk: + scores = scores + unk_penalty + scores = scores + eos_penalty + + # shape: [batch_size, beam_size] + sequence_scores, preds = layers.topk(scores, self.beam_size) + + predictions = layers.concat([predictions, F.unsqueeze(preds, [2])], axis=2) + state = repeat(state, beam_size) + + parent_idx_list = [] + pred_list = [] + + for step in range(2, self.max_gen_len + 1): + pre_ids = predictions[:, :, -1:] + state["pred_token"] = layers.reshape(pre_ids, shape=[batch_size * beam_size, 1, 1]) + state["pred_mask"] = 1 - F.equal(state["pred_token"], self.pad_id) + state["pred_pos"] = state["pred_pos"] + 1 + scores, state = step_fn(state) + + # Generate next + # scores shape: [batch_size, beam_size, vocab_size] + if self.ignore_unk: + scores = scores + unk_penalty + + if step <= self.min_gen_len: + scores = scores + eos_penalty + + scores = layers.reshape(scores, shape=[batch_size, beam_size, self.vocab_size]) + + # previous token is [PAD] or [EOS] + pre_eos_mask = F.equal(pre_ids, self.eos_id) + F.equal(pre_ids, self.pad_id) + + scores = scores * (1 - pre_eos_mask) + \ + layers.expand(pre_eos_mask, [1, 1, self.vocab_size]) * scores_after_end + if self.length_average: + scaled_value = pre_eos_mask + (1 - pre_eos_mask) * (1 - 1 / step) + sequence_scores = F.unsqueeze(sequence_scores, [2]) * scaled_value + scaled_value = pre_eos_mask + (1 - pre_eos_mask) * (1 / step) + scores = scores * scaled_value + elif self.length_penalty >= 0.0: + scaled_value = pre_eos_mask + (1 - pre_eos_mask) * \ + (math.pow((4 + step) / (5 + step), self.length_penalty)) + sequence_scores = layers.elementwise_mul(scaled_value, sequence_scores, axis=0) + scaled_value = pre_eos_mask + (1 - pre_eos_mask) * \ + (math.pow(1 / (5 + step), self.length_penalty)) + scores = scores * scaled_value + scores = layers.elementwise_add(scores, sequence_scores, axis=0) + scores = layers.reshape(scores, shape=[batch_size, beam_size * self.vocab_size]) + + topk_scores, topk_indices = layers.topk(scores, beam_size) + vocab_size = layers.fill_constant(shape=[1], dtype="int64", value=self.vocab_size) + parent_idx = layers.elementwise_floordiv(topk_indices, vocab_size) + preds = layers.elementwise_mod(topk_indices, vocab_size) + + # Gather state / sequence_scores + parent_idx = layers.elementwise_add(parent_idx, pos_index, axis=0) + parent_idx = layers.reshape(parent_idx, [batch_size * beam_size]) + state = gather(state, parent_idx) + sequence_scores = topk_scores + + predictions = layers.reshape(predictions, shape=[batch_size * beam_size, step]) + predictions = gather(predictions, parent_idx) + predictions = layers.reshape(predictions, shape=[batch_size, beam_size, step]) + predictions = layers.concat([predictions, F.unsqueeze(preds, [2])], axis=2) + + pre_ids = predictions[:, :, -1] + pre_eos_mask = F.equal(pre_ids, self.eos_id) + F.equal(pre_ids, self.pad_id) + sequence_scores = sequence_scores * pre_eos_mask + layers.scale(1 - pre_eos_mask, -1e10) + + _, indices = layers.argsort(sequence_scores, axis=1) + indices = indices + pos_index + indices = layers.reshape(indices, [-1]) + sequence_scores = layers.reshape(sequence_scores, [batch_size * beam_size]) + predictions = layers.reshape(predictions, [batch_size * beam_size, -1]) + sequence_scores = gather(sequence_scores, indices) + predictions = layers.gather(predictions, indices) + sequence_scores = layers.reshape(sequence_scores, [batch_size, beam_size]) + predictions = layers.reshape(predictions, [batch_size, beam_size, -1]) + + results = { + "preds": predictions[:, -1], + "scores": sequence_scores[:, -1] + } + return results + +BeamSearch.register("BeamSearch") +GreedySampling.register("GreedySampling") +TopKSampling.register("TopKSampling") +TopPSampling.register("TopPSampling") diff --git a/PaddleNLP/Research/Dialogue-PLATO/models/model_base.py b/PaddleNLP/Research/Dialogue-PLATO/plato/models/model_base.py similarity index 81% rename from PaddleNLP/Research/Dialogue-PLATO/models/model_base.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/models/model_base.py index 8c7187fe..9d801e92 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/models/model_base.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/models/model_base.py @@ -23,14 +23,39 @@ class ModelBase(fluid.dygraph.Layer): """ Basic model wrapper for static graph and dygrpah. """ + _registry = dict() + + @classmethod + def register(cls, name): + ModelBase._registry[name] = cls + return + + @staticmethod + def by_name(name): + return ModelBase._registry[name] + + @staticmethod + def create(name_scope, hparams, *args, **kwargs): + model_cls = ModelBase.by_name(hparams.model) + return model_cls(name_scope, hparams, *args, **kwargs) @classmethod def add_cmdline_argument(cls, parser): """ Add cmdline argument. """ group = parser.add_argument_group("Model") group.add_argument("--init_checkpoint", type=str, default=None) + group.add_argument("--model", type=str, default="UnifiedTransformer", + choices=["UnifiedTransformer"]) + args, _ = parser.parse_known_args() + model_cls = ModelBase.by_name(args.model) + model_cls.add_cmdline_argument(group) return group + def __init__(self, name_scope, hparams): + super().__init__(name_scope) + self.init_checkpoint = hparams.init_checkpoint + return + def __call__(self, *args, **kwargs): """ Re-implement __call__ function in dygraph mode. """ if not self._built: diff --git a/PaddleNLP/Research/Dialogue-PLATO/models/unified_transformer.py b/PaddleNLP/Research/Dialogue-PLATO/plato/models/unified_transformer.py similarity index 75% rename from PaddleNLP/Research/Dialogue-PLATO/models/unified_transformer.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/models/unified_transformer.py index 92a06ced..a95eb9df 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/models/unified_transformer.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/models/unified_transformer.py @@ -16,27 +16,27 @@ UnifiedTransformer """ import numpy as np +import paddle import paddle.fluid as fluid +from paddle.fluid.dygraph import FC import paddle.fluid.layers as layers -from args import str2bool -from modules.embedder import Embedder -import modules.functions as F -from modules.transformer_block import TransformerBlock -from models.model_base import ModelBase -from paddle.fluid.dygraph import LayerNorm -from paddle.fluid.dygraph import FC +from plato.args import str2bool +from plato.modules.embedder import Embedder +import plato.modules.functions as F +from plato.modules.layer_norm import LayerNorm +from plato.modules.transformer_block import TransformerBlock +from plato.models.model_base import ModelBase class UnifiedTransformer(ModelBase): """ - Implement of unified transformer. + Implement unified transformer. """ @classmethod - def add_cmdline_argument(cls, parser): + def add_cmdline_argument(cls, group): """ Add cmdline argument. """ - group = ModelBase.add_cmdline_argument(parser) group.add_argument("--num_token_embeddings", type=int, default=-1, help="The number of tokens in vocabulary. " "It will be automatically calculated after loading vocabulary.") @@ -80,6 +80,8 @@ class UnifiedTransformer(ModelBase): group.add_argument("--two_layer_predictor", type=str2bool, default=False, help="Use two layer predictor. " "Traditional BERT use two FC layers to predict masked token.") + group.add_argument("--bidirectional_context", type=str2bool, default=True, + help="Whether to use bidirectional self-attention in context tokens.") group.add_argument("--label_smooth", type=float, default=0.0, help="Use soft label to calculate NLL loss and BoW loss.") group.add_argument("--initializer_range", type=float, default=0.02, @@ -93,11 +95,9 @@ class UnifiedTransformer(ModelBase): help="The maximum norm of gradient.") return group - def __init__(self, name_scope, generator, hparams, dtype="float32"): - super().__init__(name_scope) + def __init__(self, name_scope, hparams, generator, dtype="float32"): + super().__init__(name_scope, hparams) self.generator = generator - self.init_checkpoint = hparams.init_checkpoint - self.batch_size = hparams.batch_size self.num_token_embeddings = hparams.num_token_embeddings self.num_pos_embeddings = hparams.num_pos_embeddings self.num_type_embeddings = hparams.num_type_embeddings @@ -117,12 +117,10 @@ class UnifiedTransformer(ModelBase): self.weight_sharing = hparams.weight_sharing self.pos_trainable = hparams.pos_trainable self.two_layer_predictor = hparams.two_layer_predictor + self.bidirectional_context = hparams.bidirectional_context self.label_smooth = hparams.label_smooth self.initializer_range = hparams.initializer_range - if self.use_discriminator and self.batch_size == 1: - print("Warmming: If you use discriminator loss in traning, the batch_size must be greater than 1.") - self.embedder = Embedder(self.full_name(), self.hidden_dim, self.num_token_embeddings, @@ -151,22 +149,23 @@ class UnifiedTransformer(ModelBase): self.layers.append(layer) self.add_sublayer(f"layer_{i}", layer) - self.post_network = FC(name_scope=self.full_name() + ".post_network", - size=self.num_latent, - bias_attr=False) + if self.num_latent > 0: + self.post_network = FC(name_scope=self.full_name() + ".post_network", + size=self.num_latent, + bias_attr=False) - if self.use_discriminator: - self.dis_ratio = hparams.dis_ratio - self.discriminator = FC(name_scope=self.full_name() + ".discriminator", - size=1, - act="sigmoid") + if self.use_discriminator: + self.dis_ratio = hparams.dis_ratio + self.discriminator = FC(name_scope=self.full_name() + ".discriminator", + size=1, + act="sigmoid") if self.two_layer_predictor: self.pre_predictor = FC(name_scope=self.full_name() + ".pre_predictor", size=self.hidden_dim, num_flatten_dims=2, act="gelu") - if self.with_bow: + if self.num_latent > 0 and self.with_bow: self.pre_bow_predictor = FC(name_scope=self.full_name() + ".pre_bow_predictor", size=self.hidden_dim, act="gelu") @@ -175,7 +174,7 @@ class UnifiedTransformer(ModelBase): size=self.num_token_embeddings, num_flatten_dims=2, bias_attr=False) - if self.with_bow: + if self.num_latent > 0 and self.with_bow: self.bow_predictor = FC(name_scope=self.full_name() + ".bow_predictor", size=self.num_token_embeddings, bias_attr=False) @@ -199,20 +198,21 @@ class UnifiedTransformer(ModelBase): def _create_parameters(self): """ Create model's paramters. """ - sequence_mask = np.tri(self.num_pos_embeddings, self.num_pos_embeddings, dtype=self._dtype) - self.mask_embed = self.create_parameter( - attr=fluid.ParamAttr( - name="mask_embed", - initializer=fluid.initializer.NormalInitializer(scale=self.initializer_range)), - shape=[1, 1, self.hidden_dim], - dtype=self._dtype) - self.latent_embeddings = self.create_parameter( - attr=fluid.ParamAttr( - name="latent_embeddings", - initializer=fluid.initializer.NormalInitializer(scale=self.initializer_range)), - shape=[self.num_latent, self.hidden_dim], - dtype=self._dtype) + if self.num_latent > 0: + self.mask_embed = self.create_parameter( + attr=fluid.ParamAttr( + name="mask_embed", + initializer=fluid.initializer.NormalInitializer(scale=self.initializer_range)), + shape=[1, 1, self.hidden_dim], + dtype=self._dtype) + self.latent_embeddings = self.create_parameter( + attr=fluid.ParamAttr( + name="latent_embeddings", + initializer=fluid.initializer.NormalInitializer(scale=self.initializer_range)), + shape=[self.num_latent, self.hidden_dim], + dtype=self._dtype) + sequence_mask = np.tri(self.num_pos_embeddings, self.num_pos_embeddings, dtype=self._dtype) self.sequence_mask = self.create_parameter( attr=fluid.ParamAttr( name="sequence_mask", @@ -226,24 +226,39 @@ class UnifiedTransformer(ModelBase): """ Load saved paramters. """ if self.init_checkpoint is not None: print(f"Loading parameters from {self.init_checkpoint}") - models, optimizers = fluid.dygraph.load_persistables(self.init_checkpoint) - parameters = self.parameters() - parameters = {param.name: param for param in parameters} + if hasattr(fluid, "load_dygraph"): + # >= 1.6.0 compatible + models, optimizers = fluid.load_dygraph(self.init_checkpoint) + else: + models, optimizers = fluid.dygraph.load_persistables(self.init_checkpoint) + parameters = {param.name: param for param in self.parameters()} for name, param in models.items(): if name in parameters: if param.shape != parameters[name].shape: print(f"part of parameter({name}) random normlize initialize") + if hasattr(param, "numpy"): + arr = param.numpy() + else: + value = param.value() + tensor = value.get_tensor() + arr = np.array(tensor) z = np.random.normal(scale=self.initializer_range, size=parameters[name].shape).astype("float32") - z[:param.shape[0]] = param.numpy() + if name == "Model/UnifiedTransformer_0/Embedder_0/Embedding_0.w_0": + z[-param.shape[0]:] = arr + else: + z[:param.shape[0]] = arr z = fluid.dygraph.to_variable(z) models[name] = z for name in parameters: - if name not in models and parameters[name].trainable: - print(f"parameter({name}) random normlize initialize") - z = np.random.normal(scale=self.initializer_range, - size=parameters[name].shape).astype("float32") - models[name] = fluid.dygraph.to_variable(z) + if name not in models: + if parameters[name].trainable: + print(f"parameter({name}) random normlize initialize") + z = np.random.normal(scale=self.initializer_range, + size=parameters[name].shape).astype("float32") + models[name] = fluid.dygraph.to_variable(z) + else: + models[name] = parameters[name] self.load_dict(models) print(f"Loaded parameters from {self.init_checkpoint}") @@ -308,7 +323,8 @@ class UnifiedTransformer(ModelBase): mask_embed = self.embed_layer_norm(mask_embed) post_embed = layers.concat([mask_embed, embed], axis=1) - mask = self._create_mask(input_mask, append_head=True) + mask = self._create_mask(input_mask, auto_regressive=not self.bidirectional_context, + append_head=True) for layer in self.layers: post_embed = layer(post_embed, mask, None) @@ -321,17 +337,29 @@ class UnifiedTransformer(ModelBase): def _discriminator_network(self, input_mask, embed, batch_size, src_len, tgt_len, pos_embed): """ Basic discriminator network implement. """ + # if batch_size <= 1: + # raise ValueError("Warmming: If you use discriminator loss in traning, the batch_size must be greater than 1.") + src_embed = embed[:, :src_len] tgt_embed = embed[:, src_len:] - neg_tgt_embed = layers.reverse(tgt_embed, axis=0) # concat([tgt_embed[1:], tgt_embed[:1]], axis=0) + if batch_size > 1: + neg_tgt_embed = layers.concat([tgt_embed[1:], tgt_embed[:1]], axis=0) + else: + # Cannot train discriminator if batch_size == 1 + neg_tgt_embed = tgt_embed neg_embed = layers.concat([src_embed, neg_tgt_embed], axis=1) # Create generation network mask src_mask = input_mask[:, :src_len] tgt_mask = input_mask[:, src_len:] - neg_tgt_mask = layers.reverse(tgt_mask, axis=0) # concat([tgt_mask[1:], tgt_mask[:1]], axis=0) + if batch_size > 1: + neg_tgt_mask = layers.concat([tgt_mask[1:], tgt_mask[:1]], axis=0) + else: + # Cannot train discriminator if batch_size == 1 + neg_tgt_mask = tgt_mask neg_mask = layers.concat([src_mask, neg_tgt_mask], axis=1) - mask = self._create_mask(neg_mask, append_head=True) + mask = self._create_mask(neg_mask, auto_regressive=not self.bidirectional_context, + append_head=True) mask_embed = self.mask_embed mask_embed = layers.expand(mask_embed, [batch_size, 1, 1]) @@ -350,21 +378,28 @@ class UnifiedTransformer(ModelBase): def _generation_network(self, input_mask, embed, batch_size, src_len, tgt_len, latent_embed): """ Basic generation network implement. """ - latent_embed = F.unsqueeze(latent_embed, [1]) - latent_embed = self.embed_layer_norm(latent_embed) - dec_embed = layers.concat([latent_embed, embed], axis=1) + if self.num_latent > 0: + latent_embed = F.unsqueeze(latent_embed, [1]) + latent_embed = self.embed_layer_norm(latent_embed) + dec_embed = layers.concat([latent_embed, embed], axis=1) + else: + dec_embed = embed # Create generation network mask src_mask = input_mask[:, :src_len] tgt_mask = input_mask[:, src_len:] - enc_mask = self._create_mask(src_mask, append_head=True) + enc_mask = self._create_mask(src_mask, auto_regressive=not self.bidirectional_context, + append_head=self.num_latent > 0) dec_mask = self._create_mask(tgt_mask, auto_regressive=True) mask = self._join_mask(enc_mask, dec_mask) for layer in self.layers: dec_embed = layer(dec_embed, mask, None) - latent_embed = dec_embed[:, 0] + if self.num_latent > 0: + latent_embed = dec_embed[:, 0] + else: + latent_embed = None dec_embed = dec_embed[:, -tgt_len:] if self.two_layer_predictor: dec_embed = self.pre_predictor(dec_embed) @@ -409,30 +444,33 @@ class UnifiedTransformer(ModelBase): src_len = src_token.shape[1] tgt_len = tgt_token.shape[1] - post_embed, post_probs, post_logits = self._posteriori_network( - input_mask, embed, batch_size, src_len, tgt_len) - outputs["post_logits"] = post_logits + if self.num_latent > 0: + post_embed, post_probs, post_logits = self._posteriori_network( + input_mask, embed, batch_size, src_len, tgt_len) + outputs["post_logits"] = post_logits - if self.use_discriminator: - pos_probs, neg_probs = self._discriminator_network( - input_mask, embed, batch_size, src_len, tgt_len, post_embed) - outputs["pos_probs"] = pos_probs - outputs["neg_probs"] = neg_probs + if self.use_discriminator: + pos_probs, neg_probs = self._discriminator_network( + input_mask, embed, batch_size, src_len, tgt_len, post_embed) + outputs["pos_probs"] = pos_probs + outputs["neg_probs"] = neg_probs - if is_training: - z = F.gumbel_softmax(post_logits, self.tau) + if is_training: + z = F.gumbel_softmax(post_logits, self.tau) + else: + indices = layers.argmax(post_logits, axis=1) + z = layers.one_hot(F.unsqueeze(indices, [1]), self.num_latent) + latent_embeddings = self.latent_embeddings + latent_embed = layers.matmul(z, latent_embeddings) + outputs["latent_embed"] = latent_embed else: - indices = layers.argmax(post_logits, axis=1) - z = layers.one_hot(F.unsqueeze(indices, [1]), self.num_latent) - latent_embeddings = self.latent_embeddings - latent_embed = layers.matmul(z, latent_embeddings) - outputs["latent_embed"] = latent_embed + latent_embed = None latent_embed, dec_probs = self._generation_network( input_mask, embed, batch_size, src_len, tgt_len, latent_embed) outputs["dec_probs"] = dec_probs - if self.with_bow: + if self.num_latent > 0 and self.with_bow: if self.two_layer_predictor: latent_embed = self.pre_bow_predictor(latent_embed) bow_logits = self.bow_predictor(latent_embed) @@ -445,7 +483,7 @@ class UnifiedTransformer(ModelBase): """ Calculate loss function by using inputs and outputs. """ metrics = {} - tgt_len = layers.reduce_sum(inputs["tgt_mask"]) - 1 + tgt_len = layers.reduce_sum(layers.reduce_sum(inputs["tgt_mask"], dim=1) - 1) tgt_len.stop_gradient = True label = inputs["tgt_token"][:, 1:] @@ -462,10 +500,9 @@ class UnifiedTransformer(ModelBase): nll = layers.reduce_mean(nll) metrics["nll"] = nll metrics["token_nll"] = token_nll - metrics["token_ppl"] = layers.exp(token_nll) loss = nll - if self.with_bow: + if self.num_latent > 0 and self.with_bow: bow_probs = F.unsqueeze(outputs["bow_probs"], [1]) bow_probs = layers.expand(bow_probs, [1, label.shape[1], 1]) if self.label_smooth > 0: @@ -480,13 +517,14 @@ class UnifiedTransformer(ModelBase): metrics["token_bow"] = token_bow loss = loss + bow - if self.use_discriminator: + if self.num_latent > 0 and self.use_discriminator: dis = 0.0 - (layers.log(outputs["pos_probs"]) + layers.log(1.0 - outputs["neg_probs"])) dis = layers.reduce_mean(dis) metrics["dis"] = dis loss = loss + dis * self.dis_ratio metrics["loss"] = loss + metrics["token_num"] = tgt_len return metrics def _optimize(self, loss): @@ -518,37 +556,45 @@ class UnifiedTransformer(ModelBase): src_embed = self.embedder(src_token, src_pos, src_type, src_turn) src_embed = self.embed_layer_norm(src_embed) - src_embed = F.unsqueeze(src_embed, [1]) - src_embed = layers.expand(src_embed, [1, self.num_latent, 1, 1]) - src_embed = layers.reshape(src_embed, [-1, seq_len, self.hidden_dim]) + mask = self._create_mask(src_mask, append_head=self.num_latent > 0) + + if self.num_latent > 0: + src_embed = F.unsqueeze(src_embed, [1]) + src_embed = layers.expand(src_embed, [1, self.num_latent, 1, 1]) + src_embed = layers.reshape(src_embed, [-1, seq_len, self.hidden_dim]) - latent_embed = self.latent_embeddings - latent_embed = F.unsqueeze(latent_embed, [1]) - latent_embed = layers.expand(latent_embed, [batch_size, 1, 1]) - latent_embed = self.embed_layer_norm(latent_embed) + latent_embed = self.latent_embeddings + latent_embed = F.unsqueeze(latent_embed, [1]) + latent_embed = layers.expand(latent_embed, [batch_size, 1, 1]) + latent_embed = self.embed_layer_norm(latent_embed) - enc_out = layers.concat([latent_embed, src_embed], axis=1) - mask = self._create_mask(src_mask, append_head=True) - mask = F.unsqueeze(mask, [1]) - mask = layers.expand(mask, [1, self.num_latent, 1, 1]) - mask = layers.reshape(mask, [-1, seq_len + 1, seq_len + 1]) + enc_out = layers.concat([latent_embed, src_embed], axis=1) + + mask = F.unsqueeze(mask, [1]) + mask = layers.expand(mask, [1, self.num_latent, 1, 1]) + mask = layers.reshape(mask, [-1, seq_len + 1, seq_len + 1]) + else: + enc_out = src_embed cache = {} for l, layer in enumerate(self.layers): cache[f"layer_{l}"] = {} enc_out = layer(enc_out, mask, cache[f"layer_{l}"]) - # state[f"mask_embed_{l}"] = enc_out[:, 0] state["cache"] = cache state["mask"] = mask[:, :1] - shape = [batch_size * self.num_latent, 1, 1] + if self.num_latent > 0: + state["batch_size"] = batch_size * self.num_latent + shape = [batch_size * self.num_latent, 1, 1] + else: + state["batch_size"] = batch_size + shape = [batch_size, 1, 1] state["pred_mask"] = layers.ones(shape, self._dtype) state["pred_pos"] = layers.zeros(shape, "int64") state["pred_type"] = layers.zeros(shape, "int64") state["pred_turn"] = layers.zeros(shape, "int64") - state["batch_size"] = batch_size * self.num_latent - if "tgt_token" in inputs: + if "tgt_token" in inputs and self.num_latent > 0: tgt_token = inputs["tgt_token"][:, :-1] tgt_mask = inputs["tgt_mask"][:, :-1] tgt_pos = inputs["tgt_pos"][:, :-1] @@ -669,21 +715,32 @@ class UnifiedTransformer(ModelBase): def _infer(self, inputs): """ Real inference process of model. """ + results = {} + # Initial decode state. state = self._init_state(inputs) - batch_size = state["batch_size"] // self.num_latent - results = {} if "post_probs" in state: results["post_probs"] = state.pop("post_probs") + # Generation process. gen_results = self.generator(self._decode, state) results.update(gen_results) - results["scores"] = layers.reshape(results["scores"], [batch_size, self.num_latent]) - results["log_p"] = results["scores"] - results["src"] = layers.reshape(inputs["src_token"], [batch_size, -1]) - results["tgt"] = layers.reshape(inputs["tgt_token"], [batch_size, -1]) - results["preds"] = layers.reshape(results["preds"], [batch_size, self.num_latent, -1]) - if self.use_discriminator: - results["scores"] = self._ranking(inputs, results["preds"]) + if self.num_latent > 0: + batch_size = state["batch_size"] // self.num_latent + results["scores"] = layers.reshape(results["scores"], [batch_size, self.num_latent]) + results["log_p"] = results["scores"] + results["src"] = layers.reshape(inputs["src_token"], [batch_size, -1]) + if "tgt_token" in inputs: + results["tgt"] = layers.reshape(inputs["tgt_token"], [batch_size, -1]) + results["preds"] = layers.reshape(results["preds"], [batch_size, self.num_latent, -1]) + if self.use_discriminator: + results["scores"] = self._ranking(inputs, results["preds"]) + else: + batch_size = state["batch_size"] + if "tgt_token" in inputs: + results["tgt"] = layers.reshape(inputs["tgt_token"], [batch_size, -1]) return results + + +UnifiedTransformer.register("UnifiedTransformer") diff --git a/PaddleNLP/Research/Dialogue-PLATO/modules/embedder.py b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/embedder.py similarity index 98% rename from PaddleNLP/Research/Dialogue-PLATO/modules/embedder.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/modules/embedder.py index fe895f0e..d67c4a29 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/modules/embedder.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/embedder.py @@ -16,11 +16,11 @@ Embedder class. """ import paddle.fluid as fluid -import paddle.fluid.layers as layers - -import modules.functions as F from paddle.fluid.dygraph import Embedding from paddle.fluid.dygraph import Layer +import paddle.fluid.layers as layers + +import plato.modules.functions as F class Embedder(Layer): diff --git a/PaddleNLP/Research/Dialogue-PLATO/modules/feedforward.py b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/feedforward.py similarity index 98% rename from PaddleNLP/Research/Dialogue-PLATO/modules/feedforward.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/modules/feedforward.py index 57b13254..b083c006 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/modules/feedforward.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/feedforward.py @@ -16,11 +16,11 @@ FeedForward class. """ import paddle.fluid as fluid -import paddle.fluid.layers as layers - -import modules.functions as F from paddle.fluid.dygraph import FC from paddle.fluid.dygraph import Layer +import paddle.fluid.layers as layers + +import plato.modules.functions as F class FeedForward(Layer): diff --git a/PaddleNLP/Research/Dialogue-PLATO/modules/functions.py b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/functions.py similarity index 90% rename from PaddleNLP/Research/Dialogue-PLATO/modules/functions.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/modules/functions.py index 19fd14da..d6b418e3 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/modules/functions.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/functions.py @@ -22,6 +22,7 @@ import paddle.fluid.layers as layers def unsqueeze(input, axes): """ Implement unsqueeze in dygraph mode. """ + # return layers.unsqueeze(input, axes) # op:unsqueeze has bug in dygraph axes = [axis if axis >= 0 else axis + len(input.shape) + 1 for axis in axes] axes = sorted(axes, reverse=True) @@ -33,8 +34,9 @@ def unsqueeze(input, axes): def gumbel_softmax(input, tau=1, eps=1e-10): """ Basic implement of gumbel_softmax. """ - U = layers.uniform_random(input.shape, dtype=input.dtype, min=0.0, max=1.0) - U.stop_gradient = True + U = fluid.dygraph.to_variable(np.random.rand(*input.shape)) + # U = layers.uniform_random(input.shape, dtype=input.dtype, min=0.0, max=1.0) + # U.stop_gradient = True gumbel = 0.0 - layers.log(eps - layers.log(U + eps)) y = input + gumbel return layers.softmax(y / tau) diff --git a/PaddleNLP/Research/Dialogue-PLATO/plato/modules/layer_norm.py b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/layer_norm.py new file mode 100644 index 00000000..af439b12 --- /dev/null +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/layer_norm.py @@ -0,0 +1,91 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +LayerNorm layer. +""" + +# from paddle.fluid.dygraph import LayerNorm + +from six.moves import reduce + +import paddle.fluid as fluid +import paddle.fluid.layers as layers +from paddle.fluid.dygraph import Layer +import logging + +class LayerNorm(Layer): + """ Implement LayerNorm in dygraph mode. """ + + def __init__(self, + name_scope, + scale=True, + shift=True, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None): + super().__init__(name_scope) + self._scale = scale + self._shift = shift + self._begin_norm_axis = begin_norm_axis + self._epsilon = epsilon + self._param_attr = param_attr + self._bias_attr = bias_attr + self._act = act + return + + def _build_once(self, input): + """ Create parameters. """ + self._dtype = self._helper.input_dtype(input) + input_shape = input.shape + param_shape = [ + reduce(lambda x, y: x * y, input_shape[self._begin_norm_axis:]) + ] + if self._scale: + self._scale_w = self.create_parameter( + attr=self._param_attr, + shape=param_shape, + dtype=self._dtype, + default_initializer=fluid.initializer.Constant(1.0)) + else: + if self._param_attr: + logging.warn("param_attr are only avaliable with scale is True") + + if self._shift: + assert self._bias_attr is not False + self._bias_w = self.create_parameter( + attr=self._bias_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=True) + else: + if self._bias_attr: + logging.warn("bias_attr are only avaliable with shift is True") + return + + def forward(self, x): + """ Forward process of LayerNorm. """ + mean = layers.reduce_mean(x, + dim=list(range(self._begin_norm_axis, len(x.shape))), + keep_dim=True) + shift_x = layers.elementwise_sub(x=x, y=mean, axis=0) + variance = layers.reduce_mean(layers.square(shift_x), + dim=list(range(self._begin_norm_axis, len(x.shape))), + keep_dim=True) + r_stdev = layers.rsqrt(variance + self._epsilon) + norm_x = layers.elementwise_mul(x=shift_x, y=r_stdev, axis=0) + out = layers.elementwise_mul(x=norm_x, y=self._scale_w, axis=-1) + out = layers.elementwise_add(x=out, y=self._bias_w, axis=-1) + return out diff --git a/PaddleNLP/Research/Dialogue-PLATO/modules/multihead_attention.py b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/multihead_attention.py similarity index 99% rename from PaddleNLP/Research/Dialogue-PLATO/modules/multihead_attention.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/modules/multihead_attention.py index fdf3bbee..1fee956c 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/modules/multihead_attention.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/multihead_attention.py @@ -16,11 +16,11 @@ MultiheadAttention class. """ import paddle.fluid as fluid -import paddle.fluid.layers as layers - -import modules.functions as F from paddle.fluid.dygraph import Layer from paddle.fluid.dygraph import FC +import paddle.fluid.layers as layers + +import plato.modules.functions as F class MultiheadAttention(Layer): diff --git a/PaddleNLP/Research/Dialogue-PLATO/modules/parallel.py b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/parallel.py similarity index 98% rename from PaddleNLP/Research/Dialogue-PLATO/modules/parallel.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/modules/parallel.py index 98bca3ac..7fcde90a 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/modules/parallel.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/parallel.py @@ -24,7 +24,7 @@ from paddle.fluid.dygraph import layers from paddle.fluid.dygraph import parallel_helper import paddle.fluid.framework as framework from paddle.fluid.layers import collective -import paddle.fluid.dygraph.base as base +from paddle.fluid.dygraph.base import to_variable, no_grad ParallelStrategy = core.ParallelStrategy @@ -179,7 +179,7 @@ class DataParallel(layers.Layer): if not self._is_data_parallel_mode(): return loss - loss_scale = base.to_variable( + loss_scale = to_variable( np.array([self._strategy.nranks]).astype("float32")) loss_scale.stop_gradient = True loss = loss / loss_scale @@ -214,6 +214,7 @@ class DataParallel(layers.Layer): for g_var, g_shape in zip(origin_grad_vars, grad_shapes): nn.reshape(x=g_var, shape=g_shape, inplace=True) + @no_grad def apply_collective_grads(self): """ AllReduce the Parameters' gradient. diff --git a/PaddleNLP/Research/Dialogue-PLATO/modules/transformer_block.py b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/transformer_block.py similarity index 94% rename from PaddleNLP/Research/Dialogue-PLATO/modules/transformer_block.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/modules/transformer_block.py index 53b248bf..b105c75d 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/modules/transformer_block.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/modules/transformer_block.py @@ -16,14 +16,14 @@ TransformerBlock class. """ import paddle.fluid as fluid -import paddle.fluid.layers as layers - -from modules.feedforward import FeedForward -from modules.multihead_attention import MultiheadAttention -import modules.functions as F from paddle.fluid.dygraph import FC from paddle.fluid.dygraph import Layer -from paddle.fluid.dygraph import LayerNorm +import paddle.fluid.layers as layers + +from plato.modules.feedforward import FeedForward +from plato.modules.layer_norm import LayerNorm +from plato.modules.multihead_attention import MultiheadAttention +import plato.modules.functions as F class TransformerBlock(Layer): diff --git a/PaddleNLP/Research/Dialogue-PLATO/trainer.py b/PaddleNLP/Research/Dialogue-PLATO/plato/trainer.py similarity index 66% rename from PaddleNLP/Research/Dialogue-PLATO/trainer.py rename to PaddleNLP/Research/Dialogue-PLATO/plato/trainer.py index 323b831d..f464323f 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/trainer.py +++ b/PaddleNLP/Research/Dialogue-PLATO/plato/trainer.py @@ -22,16 +22,17 @@ import sys import time import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.dygraph as dygraph from tqdm import tqdm -from args import str2bool -from dataloader import DataLoader -from metrics.metrics_tracker import MetricsTracker -from metrics.metrics import bleu -from metrics.metrics import distinct -import modules.parallel as parallel +from plato.args import str2bool +from plato.data.data_loader import DataLoader +from plato.metrics.metrics_tracker import MetricsTracker +from plato.metrics.metrics import bleu +from plato.metrics.metrics import distinct +import plato.modules.parallel as parallel def get_logger(log_path, name="default"): @@ -54,7 +55,10 @@ def get_logger(log_path, name="default"): def evaluate_generation_result(results): tgt = [result["tgt"].split(" ") for result in results] - pred = [result["preds"][np.argmax(result["scores"])] for result in results] + pred = [result["preds"][np.argmax(result["scores"])] + if isinstance(result["preds"], list) + else result["preds"] + for result in results] pred = [p.split(" ") for p in pred] metrics = {} metrics_tracker = MetricsTracker() @@ -78,7 +82,12 @@ def evaluate_generation_result(results): def save(model, model_path): if isinstance(model, parallel.DataParallel): model = model._layers - dygraph.save_persistables(model.state_dict(), model_path, optimizers=model.optimizer) + if hasattr(fluid, "save_dygraph"): + # >= 1.6.0 compatible + fluid.save_dygraph(model.state_dict(), model_path) + fluid.save_dygraph(model.optimizer.state_dict(), model_path) + else: + dygraph.save_persistables(model.state_dict(), model_path, optimizers=model.optimizer) return @@ -115,10 +124,11 @@ class Trainer(object): # Use data distributed if hparams.use_data_distributed: strategy = parallel.prepare_context() - parallel_model = parallel.DataParallel(model, strategy) - model.before_backward_fn = parallel_model.scale_loss - model.after_backward_fn = parallel_model.apply_collective_grads - model = parallel_model + if strategy is not None: + parallel_model = parallel.DataParallel(model, strategy) + model.before_backward_fn = parallel_model.scale_loss + model.after_backward_fn = parallel_model.apply_collective_grads + model = parallel_model self.model = model self.to_tensor = to_tensor @@ -143,7 +153,8 @@ class Trainer(object): self.train_summary = {} self.valid_summary = {} - self.metrics_tracker = MetricsTracker() + self.batch_metrics_tracker = MetricsTracker() + self.token_metrics_tracker = MetricsTracker() self.best_valid_metric = float("inf" if self.is_decreased_valid_metric else "-inf") self.epoch = 0 @@ -167,33 +178,44 @@ class Trainer(object): """ self.epoch += 1 num_batches = len(train_iter) - self.metrics_tracker.clear() + self.batch_metrics_tracker.clear() + self.token_metrics_tracker.clear() times = [] for batch_id, (batch, batch_size) in enumerate(train_iter, 1): batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items())) batch["epoch"] = self.epoch batch["num_steps"] = self.batch_num - # measure data loading time # Do a training iteration start_time = time.time() metrics = self.model(batch, is_training=True) + token_num = metrics.pop("token_num", None) elapsed = time.time() - start_time times.append(elapsed) - self.metrics_tracker.update(metrics, batch_size) + batch_metrics = {k: v for k, v in metrics.items() if "token" not in k} + token_metrics = {k: v for k, v in metrics.items() if "token" in k} + self.batch_metrics_tracker.update(batch_metrics, batch_size) + self.token_metrics_tracker.update(token_metrics, token_num) self.batch_num += 1 if self.log_steps and batch_id % self.log_steps == 0: - metrics_message = self.metrics_tracker.value() + batch_metrics_message = self.batch_metrics_tracker.value() + token_metrics_message = self.token_metrics_tracker.value() message_prefix = f"[Train][{self.epoch}][{batch_id}/{num_batches}]" avg_time = f"AVG_Time-{sum(times[-self.log_steps:]) / self.log_steps:.3f}" - message = " ".join([message_prefix, metrics_message, avg_time]) + message = " ".join([message_prefix, batch_metrics_message, token_metrics_message, + avg_time]) self.logger.info(message) if self.save_summary: with self.summary_logger.mode("train"): - for k, v in self.metrics_tracker.items(): + for k, v in self.batch_metrics_tracker.items(): + if k not in self.train_summary: + self.train_summary[k] = self.summary_logger.scalar(k) + scalar = self.train_summary[k] + scalar.add_record(self.batch_num, v) + for k, v in self.token_metrics_tracker.items(): if k not in self.train_summary: self.train_summary[k] = self.summary_logger.scalar(k) scalar = self.train_summary[k] @@ -226,9 +248,11 @@ class Trainer(object): """ self.logger.info("Generation starts ...") infer_save_file = os.path.join(self.save_dir, f"infer_{self.epoch}.result.json") + # Inference infer_results = [] batch_cnt = 0 + begin_time = time.time() for batch, batch_size in tqdm(data_iter, total=num_batches): batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items())) @@ -264,7 +288,8 @@ class Trainer(object): infer_metrics_tracker = evaluate_generation_result(infer_results) metrics_message = infer_metrics_tracker.summary() message_prefix = f"[Infer][{self.epoch}]" - message = " ".join([message_prefix, metrics_message]) + time_cost = f"TIME-{time.time() - begin_time:.3f}" + message = " ".join([message_prefix, metrics_message, time_cost]) self.logger.info(message) return @@ -282,42 +307,56 @@ class Trainer(object): need_save = need_save and parallel.Env().local_rank == 0 # Evaluation - metrics_tracker = MetricsTracker() + begin_time = time.time() + batch_metrics_tracker = MetricsTracker() + token_metrics_tracker = MetricsTracker() for batch, batch_size in data_iter: batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items())) metrics = self.model(batch, is_training=False) - metrics_tracker.update(metrics, batch_size) - metrics_message = metrics_tracker.summary() + token_num = int(metrics.pop("token_num")) + batch_metrics = {k: v for k, v in metrics.items() if "token" not in k} + token_metrics = {k: v for k, v in metrics.items() if "token" in k} + batch_metrics_tracker.update(batch_metrics, batch_size) + token_metrics_tracker.update(token_metrics, token_num) + batch_metrics_message = batch_metrics_tracker.summary() + token_metrics_message = token_metrics_tracker.summary() message_prefix = f"[Valid][{self.epoch}]" - message = " ".join([message_prefix, metrics_message]) + time_cost = f"TIME-{time.time() - begin_time:.3f}" + message = " ".join([message_prefix, batch_metrics_message, token_metrics_message, time_cost]) self.logger.info(message) - # Check valid metric - cur_valid_metric = metrics_tracker.get(self.valid_metric_name) - if self.is_decreased_valid_metric: - is_best = cur_valid_metric < self.best_valid_metric - else: - is_best = cur_valid_metric > self.best_valid_metric - if is_best and need_save: - # Save current best model - self.best_valid_metric = cur_valid_metric - best_model_path = os.path.join(self.save_dir, "best.model") - save(self.model, best_model_path) - self.logger.info( - f"Saved best model to '{best_model_path}' with new best valid metric " - f"{self.valid_metric_name.upper()}-{self.best_valid_metric:.3f}") - - # Save checkpoint - if self.save_checkpoint and need_save: - model_file = os.path.join(self.save_dir, f"epoch_{self.epoch}.model") - save(self.model, model_file) - - if self.save_summary and need_save: - with self.summary_logger.mode("valid"): - for k, v in self.metrics_tracker.items(): - if k not in self.valid_summary: - self.valid_summary[k] = self.summary_logger.scalar(k) - scalar = self.valid_summary[k] - scalar.add_record(self.batch_num, v) + if need_save: + # Check valid metric + cur_valid_metric = batch_metrics_tracker.get(self.valid_metric_name) + if self.is_decreased_valid_metric: + is_best = cur_valid_metric < self.best_valid_metric + else: + is_best = cur_valid_metric > self.best_valid_metric + if is_best: + # Save current best model + self.best_valid_metric = cur_valid_metric + best_model_path = os.path.join(self.save_dir, "best.model") + save(self.model, best_model_path) + self.logger.info( + f"Saved best model to '{best_model_path}' with new best valid metric " + f"{self.valid_metric_name.upper()}-{self.best_valid_metric:.3f}") + + # Save checkpoint + if self.save_checkpoint: + model_file = os.path.join(self.save_dir, f"epoch_{self.epoch}.model") + save(self.model, model_file) + + if self.save_summary: + with self.summary_logger.mode("valid"): + for k, v in self.batch_metrics_tracker.items(): + if k not in self.valid_summary: + self.valid_summary[k] = self.summary_logger.scalar(k) + scalar = self.valid_summary[k] + scalar.add_record(self.batch_num, v) + for k, v in self.token_metrics_tracker.items(): + if k not in self.valid_summary: + self.valid_summary[k] = self.summary_logger.scalar(k) + scalar = self.valid_summary[k] + scalar.add_record(self.batch_num, v) return diff --git a/PaddleNLP/Research/Dialogue-PLATO/preprocess.py b/PaddleNLP/Research/Dialogue-PLATO/preprocess.py index d93b3b30..0e8c2bd8 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/preprocess.py +++ b/PaddleNLP/Research/Dialogue-PLATO/preprocess.py @@ -18,10 +18,10 @@ Preprocess script. import os import argparse -from args import str2bool -from args import parse_args -from dataset import Dataset -from field import BPETextField +from plato.args import str2bool +from plato.args import parse_args +from plato.data.dataset import Dataset +from plato.data.field import BPETextField def main(): @@ -35,15 +35,15 @@ def main(): raw_train_file = os.path.join(args.data_dir, "dial.train") raw_valid_file = os.path.join(args.data_dir, "dial.valid") raw_test_file = os.path.join(args.data_dir, "dial.test") - train_file = raw_train_file + ".jsonl" - valid_file = raw_valid_file + ".jsonl" - test_file = raw_test_file + ".jsonl" + train_file = raw_train_file + f".{args.tokenizer_type}.jsonl" + valid_file = raw_valid_file + f".{args.tokenizer_type}.jsonl" + test_file = raw_test_file + f".{args.tokenizer_type}.jsonl" bpe = BPETextField(args.BPETextField) BUILD_EXAMPLES_FN = { "multi": bpe.build_examples_multi_turn, - "multi_knowledge": bpe.build_examples_multi_turn_with_knoledge + "multi_knowledge": bpe.build_examples_multi_turn_with_knowledge } build_examples_fn = BUILD_EXAMPLES_FN[args.data_type] diff --git a/PaddleNLP/Research/Dialogue-PLATO/run.py b/PaddleNLP/Research/Dialogue-PLATO/run.py index bf4e3155..b0daeb8b 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/run.py +++ b/PaddleNLP/Research/Dialogue-PLATO/run.py @@ -22,16 +22,16 @@ import os import numpy as np import paddle.fluid as fluid -from args import parse_args -from args import str2bool -from dataloader import DataLoader -from dataset import Dataset -from dataset import LazyDataset -from field import BPETextField -from trainer import Trainer -from models.unified_transformer import UnifiedTransformer -from models.generator import BeamSearch -import modules.parallel as parallel +from plato.args import parse_args +from plato.args import str2bool +from plato.data.data_loader import DataLoader +from plato.data.dataset import Dataset +from plato.data.dataset import LazyDataset +from plato.data.field import BPETextField +from plato.trainer import Trainer +from plato.models.model_base import ModelBase +from plato.models.generator import Generator +import plato.modules.parallel as parallel def main(): @@ -39,21 +39,28 @@ def main(): parser.add_argument("--do_train", type=str2bool, default=False, help="Whether to run trainning.") - parser.add_argument("--do_valid", type=str2bool, default=False, + parser.add_argument("--do_test", type=str2bool, default=False, help="Whether to run evaluation on the test dataset.") parser.add_argument("--do_infer", type=str2bool, default=False, help="Whether to run inference on the test dataset.") parser.add_argument("--num_infer_batches", type=int, default=None, help="The number of batches need to infer.\n" "Stay 'None': infer on entrie test dataset.") + parser.add_argument("--hparams_file", type=str, default=None, + help="Loading hparams setting from file(.json format).") BPETextField.add_cmdline_argument(parser) Dataset.add_cmdline_argument(parser) Trainer.add_cmdline_argument(parser) - UnifiedTransformer.add_cmdline_argument(parser) - BeamSearch.add_cmdline_argument(parser) + ModelBase.add_cmdline_argument(parser) + Generator.add_cmdline_argument(parser) hparams = parse_args(parser) + if hparams.hparams_file and os.path.exists(hparams.hparams_file): + print(f"Loading hparams from {hparams.hparams_file} ...") + hparams.load(hparams.hparams_file) + print(f"Loaded hparams from {hparams.hparams_file}") + print(json.dumps(hparams, indent=2)) if not os.path.exists(hparams.save_dir): @@ -63,7 +70,7 @@ def main(): bpe = BPETextField(hparams.BPETextField) hparams.Model.num_token_embeddings = bpe.vocab_size - generator = BeamSearch(bpe, hparams.Generator) + generator = Generator.create(hparams.Generator, bpe=bpe) COLLATE_FN = { "multi": bpe.collate_fn_multi_turn, @@ -74,22 +81,22 @@ def main(): # Loading datasets if hparams.do_train: raw_train_file = os.path.join(hparams.data_dir, "dial.train") - train_file = raw_train_file + ".jsonl" + train_file = raw_train_file + f".{hparams.tokenizer_type}.jsonl" assert os.path.exists(train_file), f"{train_file} isn't exist" train_dataset = LazyDataset(train_file) - train_loader = DataLoader(train_dataset, hparams.Trainer, collate_fn=collate_fn) + train_loader = DataLoader(train_dataset, hparams.Trainer, collate_fn=collate_fn, is_train=True) raw_valid_file = os.path.join(hparams.data_dir, "dial.valid") - valid_file = raw_valid_file + ".jsonl" + valid_file = raw_valid_file + f".{hparams.tokenizer_type}.jsonl" assert os.path.exists(valid_file), f"{valid_file} isn't exist" valid_dataset = LazyDataset(valid_file) valid_loader = DataLoader(valid_dataset, hparams.Trainer, collate_fn=collate_fn) - if hparams.do_infer or hparams.do_valid: + if hparams.do_infer or hparams.do_test: raw_test_file = os.path.join(hparams.data_dir, "dial.test") - test_file = raw_test_file + ".jsonl" + test_file = raw_test_file + f".{hparams.tokenizer_type}.jsonl" assert os.path.exists(test_file), f"{test_file} isn't exist" test_dataset = LazyDataset(test_file) - test_loader = DataLoader(test_dataset, hparams.Trainer, collate_fn=collate_fn, is_test=True) + test_loader = DataLoader(test_dataset, hparams.Trainer, collate_fn=collate_fn, is_test=hparams.do_infer) def to_tensor(array): array = np.expand_dims(array, -1) @@ -102,7 +109,7 @@ def main(): with fluid.dygraph.guard(place): # Construct Model - model = UnifiedTransformer("Model", generator, hparams) + model = ModelBase.create("Model", hparams, generator=generator) # Construct Trainer trainer = Trainer(model, to_tensor, hparams.Trainer) @@ -112,7 +119,7 @@ def main(): for epoch in range(hparams.num_epochs): trainer.train_epoch(train_loader, valid_loader) - if hparams.do_valid: + if hparams.do_test: # Validation process trainer.evaluate(test_loader, need_save=False) diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/infer.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/infer.sh index 6fd5505e..76610eba 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/infer.sh +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/infer.sh @@ -2,7 +2,7 @@ set -ux SAVE_DIR=outputs/DSTC7_AVSD.infer -VOCAB_PATH=data/vocab.txt +VOCAB_PATH=model/Bert/vocab.txt DATA_DIR=data/DSTC7_AVSD INIT_CHECKPOINT=outputs/DSTC7_AVSD/best.model DATA_TYPE=multi_knowledge @@ -15,13 +15,11 @@ export FLAGS_fraction_of_gpu_memory_to_use=0.1 export FLAGS_eager_delete_scope=True export FLAGS_eager_delete_tensor_gb=0.0 -if [[ ! -e $DATA_DIR/dial.test.jsonl ]]; then - python -u \ - ./preprocess.py \ - --vocab_path $VOCAB_PATH \ - --data_dir $DATA_DIR \ - --data_type $DATA_TYPE -fi +python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE python -u \ ./run.py \ @@ -29,7 +27,7 @@ python -u \ --vocab_path $VOCAB_PATH \ --data_dir $DATA_DIR \ --data_type $DATA_TYPE \ - --batch_size 2 \ + --batch_size 4 \ --num_type_embeddings 3 \ --use_discriminator true \ --init_checkpoint $INIT_CHECKPOINT \ diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/train.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/train.sh index 8009fa28..b2bd742d 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/train.sh +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DSTC7_AVSD/train.sh @@ -2,7 +2,7 @@ set -ux SAVE_DIR=outputs/DSTC7_AVSD -VOCAB_PATH=data/vocab.txt +VOCAB_PATH=model/Bert/vocab.txt DATA_DIR=data/DSTC7_AVSD INIT_CHECKPOINT=model/PLATO DATA_TYPE=multi_knowledge @@ -33,7 +33,7 @@ python -u \ --vocab_path $VOCAB_PATH \ --data_dir $DATA_DIR \ --data_type $DATA_TYPE \ - --batch_size 8 \ + --batch_size 4 \ --valid_steps 2000 \ --num_type_embeddings 3 \ --use_discriminator true \ diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_infer.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_infer.sh new file mode 100644 index 00000000..65a8f1be --- /dev/null +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_infer.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -ux + +SAVE_DIR=outputs/DailyDialog.baseline.infer +VOCAB_PATH=model/Bert/vocab.txt +DATA_DIR=data/DailyDialog +INIT_CHECKPOINT=outputs/DailyDialog.baseline/best.model +DATA_TYPE=multi + +# CUDA environment settings. +export CUDA_VISIBLE_DEVICES=0 + +# Paddle environment settings. +export FLAGS_fraction_of_gpu_memory_to_use=0.1 +export FLAGS_eager_delete_scope=True +export FLAGS_eager_delete_tensor_gb=0.0 + +python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE + +python -u \ + ./run.py \ + --do_infer true \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE \ + --batch_size 48 \ + --num_latent 0 \ + --num_type_embeddings 2 \ + --init_checkpoint $INIT_CHECKPOINT \ + --length_average true \ + --save_dir $SAVE_DIR diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_train.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_train.sh new file mode 100644 index 00000000..f7593df3 --- /dev/null +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/baseline_train.sh @@ -0,0 +1,49 @@ +#!/bin/bash +set -ux + +SAVE_DIR=outputs/DailyDialog.baseline +VOCAB_PATH=model-baseline/Bert/vocab.txt +DATA_DIR=data/DailyDialog +INIT_CHECKPOINT=model-baseline/PLATO.baseline +DATA_TYPE=multi +USE_VISUALDL=false + +# CUDA environment settings. +export CUDA_VISIBLE_DEVICES=2 + +# Paddle environment settings. +export FLAGS_fraction_of_gpu_memory_to_use=0.1 +export FLAGS_eager_delete_scope=True +export FLAGS_eager_delete_tensor_gb=0.0 + +python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE + +if [[ "$USE_VISUALDL" = true ]]; then + visualdl --logdir=$SAVE_DIR/summary --port=8083 --host=`hostname` & + VISUALDL_PID=$! +fi + +python -u \ + ./run.py \ + --do_train true \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE \ + --batch_size 2 \ + --valid_steps 2000 \ + --num_type_embeddings 2 \ + --num_latent 0 \ + --num_epoch 20 \ + --lr 1e-5 \ + --save_checkpoint false \ + --save_summary $USE_VISUALDL \ + --init_checkpoint $INIT_CHECKPOINT \ + --save_dir $SAVE_DIR + +if [[ $USE_VISUALDL = true ]]; then + kill $VISUALDL_PID +fi diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/infer.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/infer.sh index 70df8eb6..7857a175 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/infer.sh +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/infer.sh @@ -2,7 +2,7 @@ set -ux SAVE_DIR=outputs/DailyDialog.infer -VOCAB_PATH=data/vocab.txt +VOCAB_PATH=model/Bert/vocab.txt DATA_DIR=data/DailyDialog INIT_CHECKPOINT=outputs/DailyDialog/best.model DATA_TYPE=multi @@ -15,13 +15,11 @@ export FLAGS_fraction_of_gpu_memory_to_use=0.1 export FLAGS_eager_delete_scope=True export FLAGS_eager_delete_tensor_gb=0.0 -if [[ ! -e $DATA_DIR/dial.test.jsonl ]]; then - python -u \ - ./preprocess.py \ - --vocab_path $VOCAB_PATH \ - --data_dir $DATA_DIR \ - --data_type $DATA_TYPE -fi +python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE python -u \ ./run.py \ @@ -29,8 +27,9 @@ python -u \ --vocab_path $VOCAB_PATH \ --data_dir $DATA_DIR \ --data_type $DATA_TYPE \ - --batch_size 2 \ + --batch_size 4 \ --num_type_embeddings 2 \ + --num_latent 20 \ --use_discriminator true \ --init_checkpoint $INIT_CHECKPOINT \ --save_dir $SAVE_DIR diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/multi_gpu_train.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/multi_gpu_train.sh new file mode 100644 index 00000000..446b1496 --- /dev/null +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/multi_gpu_train.sh @@ -0,0 +1,55 @@ +#!/bin/bash +set -ux + +SAVE_DIR=outputs/DailyDialog +VOCAB_PATH=model/Bert/vocab.txt +DATA_DIR=data/DailyDialog +INIT_CHECKPOINT=model/PLATO +DATA_TYPE=multi +USE_VISUALDL=false + +# CUDA environment settings. +export CUDA_VISIBLE_DEVICES=0,1 + +# Paddle environment settings. +export FLAGS_fraction_of_gpu_memory_to_use=0.1 +export FLAGS_eager_delete_scope=True +export FLAGS_eager_delete_tensor_gb=0.0 + +if [[ ! -e $DATA_DIR/dial.train.jsonl ]]; then + python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE +fi + +if [[ "$USE_VISUALDL" = true ]]; then + visualdl --logdir=$SAVE_DIR/summary --port=8083 --host=`hostname` & + VISUALDL_PID=$! +fi + +python -m \ + paddle.distributed.launch \ + --log_dir $SAVE_DIR \ + --started_port 8888 \ + ./run.py \ + --use_data_distributed true \ + --do_train true \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE \ + --batch_size 6 \ + --valid_steps 2000 \ + --num_type_embeddings 2 \ + --use_discriminator true \ + --num_epoch 20 \ + --lr 1e-5 \ + --save_checkpoint false \ + --save_summary $USE_VISUALDL \ + --init_checkpoint $INIT_CHECKPOINT \ + --save_dir $SAVE_DIR + +if [[ $USE_VISUALDL = true ]]; then + kill $VISUALDL_PID +fi diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/topk_infer.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/topk_infer.sh new file mode 100644 index 00000000..a550f40c --- /dev/null +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/topk_infer.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -ux + +SAVE_DIR=outputs/DailyDialog.infer +VOCAB_PATH=model/Bert/vocab.txt +DATA_DIR=data/DailyDialog +INIT_CHECKPOINT=outputs/DailyDialog/best.model +DATA_TYPE=multi + +# CUDA environment settings. +export CUDA_VISIBLE_DEVICES=0 + +# Paddle environment settings. +export FLAGS_fraction_of_gpu_memory_to_use=0.1 +export FLAGS_eager_delete_scope=True +export FLAGS_eager_delete_tensor_gb=0.0 + +if [[ ! -e $DATA_DIR/dial.test.jsonl ]]; then + python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE +fi + +python -u \ + ./run.py \ + --do_infer true \ + --generator TopKSampling \ + --top_k_num 10 \ + --sampling_temperate 0.8 \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE \ + --batch_size 16 \ + --num_type_embeddings 2 \ + --use_discriminator true \ + --init_checkpoint $INIT_CHECKPOINT \ + --save_dir $SAVE_DIR diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/train.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/train.sh index 640a66ae..cc53d39b 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/train.sh +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/DailyDialog/train.sh @@ -2,7 +2,7 @@ set -ux SAVE_DIR=outputs/DailyDialog -VOCAB_PATH=data/vocab.txt +VOCAB_PATH=model/Bert/vocab.txt DATA_DIR=data/DailyDialog INIT_CHECKPOINT=model/PLATO DATA_TYPE=multi @@ -16,13 +16,11 @@ export FLAGS_fraction_of_gpu_memory_to_use=0.1 export FLAGS_eager_delete_scope=True export FLAGS_eager_delete_tensor_gb=0.0 -if [[ ! -e $DATA_DIR/dial.train.jsonl ]]; then - python -u \ - ./preprocess.py \ - --vocab_path $VOCAB_PATH \ - --data_dir $DATA_DIR \ - --data_type $DATA_TYPE -fi +python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE if [[ "$USE_VISUALDL" = true ]]; then visualdl --logdir=$SAVE_DIR/summary --port=8083 --host=`hostname` & @@ -35,7 +33,7 @@ python -u \ --vocab_path $VOCAB_PATH \ --data_dir $DATA_DIR \ --data_type $DATA_TYPE \ - --batch_size 12 \ + --batch_size 6 \ --valid_steps 2000 \ --num_type_embeddings 2 \ --use_discriminator true \ diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/infer.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/infer.sh index 88dd5b7f..06aa1e3f 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/infer.sh +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/infer.sh @@ -2,7 +2,7 @@ set -ux SAVE_DIR=outputs/PersonaChat.infer -VOCAB_PATH=data/vocab.txt +VOCAB_PATH=model/Bert/vocab.txt DATA_DIR=data/PersonaChat INIT_CHECKPOINT=outputs/PersonaChat/best.model DATA_TYPE=multi_knowledge @@ -15,13 +15,11 @@ export FLAGS_fraction_of_gpu_memory_to_use=0.1 export FLAGS_eager_delete_scope=True export FLAGS_eager_delete_tensor_gb=0.0 -if [[ ! -e $DATA_DIR/dial.test.jsonl ]]; then - python -u \ - ./preprocess.py \ - --vocab_path $VOCAB_PATH \ - --data_dir $DATA_DIR \ - --data_type $DATA_TYPE -fi +python -u \ + ./preprocess.py \ + --vocab_path $VOCAB_PATH \ + --data_dir $DATA_DIR \ + --data_type $DATA_TYPE python -u \ ./run.py \ diff --git a/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/train.sh b/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/train.sh index d76ab2b7..480024f4 100644 --- a/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/train.sh +++ b/PaddleNLP/Research/Dialogue-PLATO/scripts/PersonaChat/train.sh @@ -2,7 +2,7 @@ set -ux SAVE_DIR=outputs/PersonaChat -VOCAB_PATH=data/vocab.txt +VOCAB_PATH=model/Bert/vocab.txt DATA_DIR=data/PersonaChat INIT_CHECKPOINT=model/PLATO DATA_TYPE=multi_knowledge @@ -33,7 +33,7 @@ python -u \ --vocab_path $VOCAB_PATH \ --data_dir $DATA_DIR \ --data_type $DATA_TYPE \ - --batch_size 12 \ + --batch_size 4 \ --valid_steps 2000 \ --num_type_embeddings 3 \ --use_discriminator true \ -- GitLab