From 3fb0b4911d5be66ef157df3d46d046e16ffc7b36 Mon Sep 17 00:00:00 2001 From: liyukun01 Date: Wed, 20 May 2020 16:18:36 +0800 Subject: [PATCH] Fixed citation format --- README.eng.md | 24 ++++++++++++++++++------ README.md | 25 +++++++++++++++++++------ experimental/seq2seq/README.md | 6 +++--- 3 files changed, 40 insertions(+), 15 deletions(-) diff --git a/README.eng.md b/README.eng.md index bc8b6bc..5d24c36 100644 --- a/README.eng.md +++ b/README.eng.md @@ -234,24 +234,36 @@ Knowledge distillation is good way to compress and accelerate ERNIE. For details about distillation, see [here](./distill/README.md) -### Citation +# Citation -please cite [ERNIE 2.0](https://arxiv.org/abs/1907.12412): +### ERNIE 1.0 +``` +@article{sun2019ernie, + title={Ernie: Enhanced representation through knowledge integration}, + author={Sun, Yu and Wang, Shuohuan and Li, Yukun and Feng, Shikun and Chen, Xuyi and Zhang, Han and Tian, Xin and Zhu, Danxiang and Tian, Hao and Wu, Hua}, + journal={arXiv preprint arXiv:1904.09223}, + year={2019} +} +``` +### ERNIE 2.0 ``` -@article{SunERNIE, +@article{sun2019ernie20, title={ERNIE 2.0: A Continual Pre-training Framework for Language Understanding}, author={Sun, Yu and Wang, Shuohuan and Li, Yukun and Feng, Shikun and Tian, Hao and Wu, Hua and Wang, Haifeng}, + journal={arXiv preprint arXiv:1907.12412}, + year={2019} } ``` -and [ERNIE Gen](https://arxiv.org/abs/2001.11314) +### ERNIE-GEN ``` -@article{Xiao2020ERNIE, +@article{xiao2020ernie-gen, title={ERNIE-GEN: An Enhanced Multi-Flow Pre-training and Fine-tuning Framework for Natural Language Generation}, author={Xiao, Dongling and Zhang, Han and Li, Yukun and Sun, Yu and Tian, Hao and Wu, Hua and Wang, Haifeng}, - year={2020}, + journal={arXiv preprint arXiv:2001.11314}, + year={2020} } ``` diff --git a/README.md b/README.md index 90d720f..62f8749 100644 --- a/README.md +++ b/README.md @@ -235,23 +235,36 @@ ids = np.expand_dims(ids, -1) # ids.shape==[BATCH, SEQLEN, 1] 知识蒸馏是进行ERNIE模型压缩、加速的有效方式;关于知识蒸馏的实现细节请参见[这里](./distill/README.md)。 -### 引用 +# 文献引用 -[ERNIE 2.0](https://arxiv.org/abs/1907.12412) +### ERNIE 1.0 ``` -@article{SunERNIE, +@article{sun2019ernie, + title={Ernie: Enhanced representation through knowledge integration}, + author={Sun, Yu and Wang, Shuohuan and Li, Yukun and Feng, Shikun and Chen, Xuyi and Zhang, Han and Tian, Xin and Zhu, Danxiang and Tian, Hao and Wu, Hua}, + journal={arXiv preprint arXiv:1904.09223}, + year={2019} +} +``` + +### ERNIE 2.0 +``` +@article{sun2019ernie20, title={ERNIE 2.0: A Continual Pre-training Framework for Language Understanding}, author={Sun, Yu and Wang, Shuohuan and Li, Yukun and Feng, Shikun and Tian, Hao and Wu, Hua and Wang, Haifeng}, + journal={arXiv preprint arXiv:1907.12412}, + year={2019} } ``` -[ERNIE Gen](https://arxiv.org/abs/2001.11314) +### ERNIE-GEN ``` -@article{Xiao2020ERNIE, +@article{xiao2020ernie-gen, title={ERNIE-GEN: An Enhanced Multi-Flow Pre-training and Fine-tuning Framework for Natural Language Generation}, author={Xiao, Dongling and Zhang, Han and Li, Yukun and Sun, Yu and Tian, Hao and Wu, Hua and Wang, Haifeng}, - year={2020}, + journal={arXiv preprint arXiv:2001.11314}, + year={2020} } ``` diff --git a/experimental/seq2seq/README.md b/experimental/seq2seq/README.md index 42f02f8..dc75330 100644 --- a/experimental/seq2seq/README.md +++ b/experimental/seq2seq/README.md @@ -1,13 +1,13 @@ -# ERNIE Gen +# ERNIE-GEN [ERNIE-GEN](https://arxiv.org/pdf/2001.11314.pdf) is a multi-flow language generation framework for both pre-training and fine-tuning. Only finetune strategy is illustrated in this section. ## Finetune -We use Abstractive Summarization task CNN/DailyMail to illustate usage of ERNIE Gen, you can download preprocessed finetune data from [here](https://ernie-github.cdn.bcebos.com/data-cnndm.tar.gz) +We use Abstractive Summarization task CNN/DailyMail to illustate usage of ERNIE-GEN, you can download preprocessed finetune data from [here](https://ernie-github.cdn.bcebos.com/data-cnndm.tar.gz) -To starts finetuning ERNIE Gen, run: +To starts finetuning ERNIE-GEN, run: ```script python3 -m paddle.distributed.launch \ -- GitLab