From a29003b70819c4c4ed5d65654590cdf2aa2e731e Mon Sep 17 00:00:00 2001
From: Hongyu Liu <43953930+phlrain@users.noreply.github.com>
Date: Mon, 1 Jul 2019 15:36:26 +0800
Subject: [PATCH] Add seq2seq padding (#2603)
* change seq2seq to padding impl; test=develop
* add bleu result; test=develop
* fix formate; test=develop
* fix formate; test=develop
---
.../rnn_search/.run_ce.sh | 5 -
.../rnn_search/README.md | 152 ++--
.../rnn_search/_ce.py | 63 --
.../rnn_search/args.py | 123 ++--
.../rnn_search/attention_model.py | 681 ++++++++++++------
.../rnn_search/base_model.py | 502 +++++++++++++
.../rnn_search/data/download_en-vi.sh | 33 +
.../rnn_search/images/bi_rnn.png | Bin 171455 -> 0 bytes
.../rnn_search/images/decoder_attention.png | Bin 83630 -> 0 bytes
.../rnn_search/images/encoder_attention.png | Bin 47539 -> 0 bytes
.../rnn_search/infer.py | 218 +++---
.../rnn_search/infer.sh | 21 +
.../rnn_search/no_attention_model.py | 127 ----
.../rnn_search/reader.py | 210 ++++++
.../rnn_search/run.sh | 22 +
.../rnn_search/train.py | 265 ++++---
16 files changed, 1658 insertions(+), 764 deletions(-)
delete mode 100755 PaddleNLP/unarchived/neural_machine_translation/rnn_search/.run_ce.sh
delete mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/_ce.py
create mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/base_model.py
create mode 100755 PaddleNLP/unarchived/neural_machine_translation/rnn_search/data/download_en-vi.sh
delete mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/images/bi_rnn.png
delete mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/images/decoder_attention.png
delete mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/images/encoder_attention.png
create mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/infer.sh
delete mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/no_attention_model.py
create mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/reader.py
create mode 100644 PaddleNLP/unarchived/neural_machine_translation/rnn_search/run.sh
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/.run_ce.sh b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/.run_ce.sh
deleted file mode 100755
index 6be159cb..00000000
--- a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/.run_ce.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-###!/bin/bash
-####This file is only used for continuous evaluation.
-
-model_file='train.py'
-python $model_file --pass_num 1 --learning_rate 0.001 --save_interval 10 --enable_ce | python _ce.py
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/README.md b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/README.md
index 556ea6f5..991ee9cc 100644
--- a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/README.md
+++ b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/README.md
@@ -10,8 +10,11 @@
├── args.py # 训练、预测以及模型参数
├── train.py # 训练主程序
├── infer.py # 预测主程序
+├── run.sh # 默认配置的启动脚本
+├── infer.sh # 默认配置的解码脚本
├── attention_model.py # 带注意力机制的翻译模型配置
-└── no_attention_model.py # 无注意力机制的翻译模型配置
+└── base_model.py # 无注意力机制的翻译模型配置
+
```
## 简介
@@ -19,116 +22,93 @@
近年来,深度学习技术的发展不断为机器翻译任务带来新的突破。直接用神经网络将源语言映射到目标语言,即端到端的神经网络机器翻译(End-to-End Neural Machine Translation, End-to-End NMT)模型逐渐成为主流,此类模型一般简称为NMT模型。
-本目录包含一个经典的机器翻译模型[RNN Search](https://arxiv.org/pdf/1409.0473.pdf)的Paddle Fluid实现。事实上,RNN search是一个较为传统的NMT模型,在现阶段,其表现已被很多新模型(如[Transformer](https://arxiv.org/abs/1706.03762))超越。但除机器翻译外,该模型是许多序列到序列(sequence to sequence, 以下简称Seq2Seq)类模型的基础,很多解决其他NLP问题的模型均以此模型为基础;因此其在NLP领域具有重要意义,并被广泛用作Baseline.
+本目录包含两个经典的机器翻译模型一个base model(不带attention机制),一个带attention机制的翻译模型 .在现阶段,其表现已被很多新模型(如[Transformer](https://arxiv.org/abs/1706.03762))超越。但除机器翻译外,该模型是许多序列到序列(sequence to sequence, 以下简称Seq2Seq)类模型的基础,很多解决其他NLP问题的模型均以此模型为基础;因此其在NLP领域具有重要意义,并被广泛用作Baseline.
本目录下此范例模型的实现,旨在展示如何用Paddle Fluid实现一个带有注意力机制(Attention)的RNN模型来解决Seq2Seq类问题,以及如何使用带有Beam Search算法的解码器。如果您仅仅只是需要在机器翻译方面有着较好翻译效果的模型,则建议您参考[Transformer的Paddle Fluid实现](https://github.com/PaddlePaddle/models/tree/develop/fluid/neural_machine_translation/transformer)。
## 模型概览
RNN Search模型使用了经典的编码器-解码器(Encoder-Decoder)的框架结构来解决Seq2Seq类问题。这种方法先用编码器将源序列编码成vector,再用解码器将该vector解码为目标序列。这其实模拟了人类在进行翻译类任务时的行为:先解析源语言,理解其含义,再根据该含义来写出目标语言的语句。编码器和解码器往往都使用RNN来实现。关于此方法的具体原理和数学表达式,可以参考[深度学习101](http://paddlepaddle.org/documentation/docs/zh/1.2/beginners_guide/basics/machine_translation/index.html).
-本模型中,在编码器方面,我们的实现使用了双向循环神经网络(Bi-directional Recurrent Neural Network);在解码器方面,我们使用了带注意力(Attention)机制的RNN解码器,并同时提供了一个不带注意力机制的解码器实现作为对比;而在预测方面我们使用柱搜索(beam search)算法来生成翻译的目标语句。以下将分别介绍用到的这些方法。
-
-### 双向循环神经网络
-这里介绍Bengio团队在论文\[[2](#参考文献),[4](#参考文献)\]中提出的一种双向循环网络结构。该结构的目的是输入一个序列,得到其在每个时刻的特征表示,即输出的每个时刻都用定长向量表示到该时刻的上下文语义信息。
-具体来说,该双向循环神经网络分别在时间维以顺序和逆序——即前向(forward)和后向(backward)——依次处理输入序列,并将每个时间步RNN的输出拼接成为最终的输出层。这样每个时间步的输出节点,都包含了输入序列中当前时刻完整的过去和未来的上下文信息。下图展示的是一个按时间步展开的双向循环神经网络。该网络包含一个前向和一个后向RNN,其中有六个权重矩阵:输入到前向隐层和后向隐层的权重矩阵($W_1, W_3$),隐层到隐层自己的权重矩阵($W_2,W_5$),前向隐层和后向隐层到输出层的权重矩阵($W_4, W_6$)。注意,该网络的前向隐层和后向隐层之间没有连接。
-
-
-
-图1. 按时间步展开的双向循环神经网络
-
-
-
-
-图2. 使用双向LSTM的编码器
-
-
-### 注意力机制
-如果编码阶段的输出是一个固定维度的向量,会带来以下两个问题:1)不论源语言序列的长度是5个词还是50个词,如果都用固定维度的向量去编码其中的语义和句法结构信息,对模型来说是一个非常高的要求,特别是对长句子序列而言;2)直觉上,当人类翻译一句话时,会对与当前译文更相关的源语言片段上给予更多关注,且关注点会随着翻译的进行而改变。而固定维度的向量则相当于,任何时刻都对源语言所有信息给予了同等程度的关注,这是不合理的。因此,Bahdanau等人\[[4](#参考文献)\]引入注意力(attention)机制,可以对编码后的上下文片段进行解码,以此来解决长句子的特征学习问题。下面介绍在注意力机制下的解码器结构。
-
-与简单的解码器不同,这里$z_i$的计算公式为 (由于Github原生不支持LaTeX公式,请您移步[这里](http://www.paddlepaddle.org/documentation/docs/zh/1.2/beginners_guide/basics/machine_translation/index.html)查看):
-
-$$z_{i+1}=\phi _{\theta '}\left ( c_i,u_i,z_i \right )$$
-
-可见,源语言句子的编码向量表示为第$i$个词的上下文片段$c_i$,即针对每一个目标语言中的词$u_i$,都有一个特定的$c_i$与之对应。$c_i$的计算公式如下:
-
-$$c_i=\sum _{j=1}^{T}a_{ij}h_j, a_i=\left[ a_{i1},a_{i2},...,a_{iT}\right ]$$
+本模型中,在编码器方面,我们采用了基于LSTM的多层的encoder;在解码器方面,我们使用了带注意力(Attention)机制的RNN decoder,并同时提供了一个不带注意力机制的解码器实现作为对比;而在预测方面我们使用柱搜索(beam search)算法来生成翻译的目标语句。以下将分别介绍用到的这些方法。
-从公式中可以看出,注意力机制是通过对编码器中各时刻的RNN状态$h_j$进行加权平均实现的。权重$a_{ij}$表示目标语言中第$i$个词对源语言中第$j$个词的注意力大小,$a_{ij}$的计算公式如下:
-
-$$a_{ij} = {exp(e_{ij}) \over {\sum_{k=1}^T exp(e_{ik})}}$$
-$$e_{ij} = {align(z_i, h_j)}$$
-
-其中,$align$可以看作是一个对齐模型,用来衡量目标语言中第$i$个词和源语言中第$j$个词的匹配程度。具体而言,这个程度是通过解码RNN的第$i$个隐层状态$z_i$和源语言句子的第$j$个上下文片段$h_j$计算得到的。传统的对齐模型中,目标语言的每个词明确对应源语言的一个或多个词(hard alignment);而在注意力模型中采用的是soft alignment,即任何两个目标语言和源语言词间均存在一定的关联,且这个关联强度是由模型计算得到的实数,因此可以融入整个NMT框架,并通过反向传播算法进行训练。
-
-
-
-图3. 基于注意力机制的解码器
-
+## 数据介绍
-### 柱搜索算法
+本教程使用[IWSLT'15 English-Vietnamese data ](https://nlp.stanford.edu/projects/nmt/)数据集中的英语到越南语的数据作为训练语料,tst2012的数据作为开发集,tst2013的数据作为测试集
-柱搜索([beam search](http://en.wikipedia.org/wiki/Beam_search))是一种启发式图搜索算法,用于在图或树中搜索有限集合中的最优扩展节点,通常用在解空间非常大的系统(如机器翻译、语音识别)中,原因是内存无法装下图或树中所有展开的解。如在机器翻译任务中希望翻译“`你好`”,就算目标语言字典中只有3个词(``, ``, `hello`),也可能生成无限句话(`hello`循环出现的次数不定),为了找到其中较好的翻译结果,我们可采用柱搜索算法。
+### 数据获取
+```sh
+cd data && sh download_en-vi.sh
+```
-柱搜索算法使用广度优先策略建立搜索树,在树的每一层,按照启发代价(heuristic cost)(本教程中,为生成词的log概率之和)对节点进行排序,然后仅留下预先确定的个数(文献中通常称为beam width、beam size、柱宽度等)的节点。只有这些节点会在下一层继续扩展,其他节点就被剪掉了,也就是说保留了质量较高的节点,剪枝了质量较差的节点。因此,搜索所占用的空间和时间大幅减少,但缺点是无法保证一定获得最优解。
-使用柱搜索算法的解码阶段,目标是最大化生成序列的概率。思路是:
+## 训练模型
-1. 每一个时刻,根据源语言句子的编码信息$c$、生成的第$i$个目标语言序列单词$u_i$和$i$时刻RNN的隐层状态$z_i$,计算出下一个隐层状态$z_{i+1}$。
-2. 将$z_{i+1}$通过`softmax`归一化,得到目标语言序列的第$i+1$个单词的概率分布$p_{i+1}$。
-3. 根据$p_{i+1}$采样出单词$u_{i+1}$。
-4. 重复步骤1~3,直到获得句子结束标记``或超过句子的最大生成长度为止。
+`run.sh`包含训练程序的主函数,要使用默认参数开始训练,只需要简单地执行:
+```sh
+python run.sh
+```
-注意:$z_{i+1}$和$p_{i+1}$的计算公式同解码器中的一样。且由于生成时的每一步都是通过贪心法实现的,因此并不能保证得到全局最优解。
+```sh
+ python train.py \
+ --src_lang en --tar_lang vi \
+ --attention True \
+ --num_layers 2 \
+ --hidden_size 512 \
+ --src_vocab_size 17191 \
+ --tar_vocab_size 7709 \
+ --batch_size 128 \
+ --dropout 0.2 \
+ --init_scale 0.1 \
+ --max_grad_norm 5.0 \
+ --train_data_prefix data/en-vi/train \
+ --eval_data_prefix data/en-vi/tst2012 \
+ --test_data_prefix data/en-vi/tst2013 \
+ --vocab_prefix data/en-vi/vocab \
+ --use_gpu True
-## 数据介绍
+```
-本教程使用[WMT-14](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/)数据集中的[bitexts(after selection)](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/bitexts.tgz)作为训练集,[dev+test data](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz)作为测试集和生成集。
-### 数据预处理
+训练程序会在每个epoch训练结束之后,save一次模型
-我们的预处理流程包括两步:
-- 将每个源语言到目标语言的平行语料库文件合并为一个文件:
- - 合并每个`XXX.src`和`XXX.trg`文件为`XXX`。
- - `XXX`中的第$i$行内容为`XXX.src`中的第$i$行和`XXX.trg`中的第$i$行连接,用'\t'分隔。
-- 创建训练数据的“源字典”和“目标字典”。每个字典都有**DICTSIZE**个单词,包括:语料中词频最高的(DICTSIZE - 3)个单词,和3个特殊符号``(序列的开始)、``(序列的结束)和``(未登录词)。
+当模型训练完成之后, 可以利用infer.py的脚本进行预测,默认使用beam search的方法进行预测,加载第10个epoch的模型进行预测,对test的数据集进行解码
+```sh
+python infer.sh
+```
+如果想预测别的数据文件,只需要将 --infer_file参数进行修改
-### 示例数据
+```sh
+ python infer.py \
+ --src_lang en --tar_lang vi \
+ --num_layers 2 \
+ --hidden_size 512 \
+ --src_vocab_size 17191 \
+ --tar_vocab_size 7709 \
+ --batch_size 128 \
+ --dropout 0.2 \
+ --init_scale 0.1 \
+ --max_grad_norm 5.0 \
+ --vocab_prefix data/en-vi/vocab \
+ --infer_file data/en-vi/tst2013.en \
+ --reload_model model_new/epoch_10/ \
+ --use_gpu True
-因为完整的数据集数据量较大,为了验证训练流程,PaddlePaddle接口paddle.dataset.wmt14中默认提供了一个经过预处理的[较小规模的数据集](http://paddlepaddle.bj.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz)。
+```
-该数据集有193319条训练数据,6003条测试数据,词典长度为30000。因为数据规模限制,使用该数据集训练出来的模型效果无法保证。
+## 效果
-## 训练模型
+单个模型 beam_size = 10
-`train.py`包含训练程序的主函数,要使用默认参数开始训练,只需要简单地执行:
```sh
-python train.py
-```
-您可以使用命令行参数来设置模型训练时的参数。要显示所有可用的命令行参数,执行:
-```sh
-python train.py -h
-```
-这样会显示所有的命令行参数的描述,以及其默认值。默认的模型是带有注意力机制的。您也可以尝试运行无注意力机制的模型,命令如下:
-```sh
-python train.py --no_attention
-```
-训练好的模型默认会被保存到`./models`路径下。您可以用命令行参数`--save_dir`来指定模型的保存路径。默认每个pass结束时会保存一个模型。
+no attention
-## 生成预测结果
+tst2012 BLEU: 11.58
+tst2013 BLEU: 12.20
-在模型训练好后,可以用`infer.py`来生成预测结果。同样的,使用默认参数,只需要执行:
-```sh
-python infer.py
-```
-您也可以同样用命令行来指定各参数。注意,预测时的参数设置必须与训练时完全一致,否则载入模型会失败。您可以用`--pass_num`参数来选择读取哪个pass结束时保存的模型。同时您可以使用`--beam_width`参数来选择beam search宽度。
-## 参考文献
-1. Koehn P. [Statistical machine translation](https://books.google.com.hk/books?id=4v_Cx1wIMLkC&printsec=frontcover&hl=zh-CN&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false)[M]. Cambridge University Press, 2009.
-2. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](http://www.aclweb.org/anthology/D/D14/D14-1179.pdf)[C]//Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2014: 1724-1734.
-3. Chung J, Gulcehre C, Cho K H, et al. [Empirical evaluation of gated recurrent neural networks on sequence modeling](https://arxiv.org/abs/1412.3555)[J]. arXiv preprint arXiv:1412.3555, 2014.
-4. Bahdanau D, Cho K, Bengio Y. [Neural machine translation by jointly learning to align and translate](https://arxiv.org/abs/1409.0473)[C]//Proceedings of ICLR 2015, 2015.
-5. Papineni K, Roukos S, Ward T, et al. [BLEU: a method for automatic evaluation of machine translation](http://dl.acm.org/citation.cfm?id=1073135)[C]//Proceedings of the 40th annual meeting on association for computational linguistics. Association for Computational Linguistics, 2002: 311-318.
+with attention
-
-
本教程 由 PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。
+tst2012 BLEU: 22.21
+tst2013 BLEU: 25.30
+```
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/_ce.py b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/_ce.py
deleted file mode 100644
index e00ac492..00000000
--- a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/_ce.py
+++ /dev/null
@@ -1,63 +0,0 @@
-####this file is only used for continuous evaluation test!
-
-import os
-import sys
-sys.path.append(os.environ['ceroot'])
-from kpi import CostKpi, DurationKpi, AccKpi
-
-#### NOTE kpi.py should shared in models in some way!!!!
-
-train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=False)
-test_cost_kpi = CostKpi('test_cost', 0.005, 0, actived=False)
-train_duration_kpi = DurationKpi('train_duration', 0.06, 0, actived=False)
-
-tracking_kpis = [
- train_cost_kpi,
- test_cost_kpi,
- train_duration_kpi,
-]
-
-
-def parse_log(log):
- '''
- This method should be implemented by model developers.
-
- The suggestion:
-
- each line in the log should be key, value, for example:
-
- "
- train_cost\t1.0
- test_cost\t1.0
- train_cost\t1.0
- train_cost\t1.0
- train_acc\t1.2
- "
- '''
- for line in log.split('\n'):
- fs = line.strip().split('\t')
- print(fs)
- if len(fs) == 3 and fs[0] == 'kpis':
- print("-----%s" % fs)
- kpi_name = fs[1]
- kpi_value = float(fs[2])
- yield kpi_name, kpi_value
-
-
-def log_to_ce(log):
- kpi_tracker = {}
- for kpi in tracking_kpis:
- kpi_tracker[kpi.name] = kpi
-
- for (kpi_name, kpi_value) in parse_log(log):
- print(kpi_name, kpi_value)
- kpi_tracker[kpi_name].add_record(kpi_value)
- kpi_tracker[kpi_name].persist()
-
-
-if __name__ == '__main__':
- log = sys.stdin.read()
- print("*****")
- print(log)
- print("****")
- log_to_ce(log)
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/args.py b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/args.py
index 16f97488..494289a7 100644
--- a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/args.py
+++ b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/args.py
@@ -23,76 +23,95 @@ import distutils.util
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
- "--embedding_dim",
- type=int,
- default=512,
- help="The dimension of embedding table. (default: %(default)d)")
+ "--train_data_prefix", type=str, help="file prefix for train data")
parser.add_argument(
- "--encoder_size",
- type=int,
- default=512,
- help="The size of encoder bi-rnn unit. (default: %(default)d)")
+ "--eval_data_prefix", type=str, help="file prefix for eval data")
parser.add_argument(
- "--decoder_size",
- type=int,
- default=512,
- help="The size of decoder rnn unit. (default: %(default)d)")
+ "--test_data_prefix", type=str, help="file prefix for test data")
parser.add_argument(
- "--batch_size",
- type=int,
- default=32,
- help="The sequence number of a mini-batch data. (default: %(default)d)")
+ "--vocab_prefix", type=str, help="file prefix for vocab")
+ parser.add_argument("--src_lang", type=str, help="source language suffix")
+ parser.add_argument("--tar_lang", type=str, help="target language suffix")
+
parser.add_argument(
- "--dict_size",
- type=int,
- default=30000,
- help="The dictionary capacity. Dictionaries of source sequence and "
- "target dictionary have same capacity. (default: %(default)d)")
+ "--attention",
+ type=bool,
+ default=False,
+ help="Whether use attention model")
+
parser.add_argument(
- "--pass_num",
- type=int,
- default=5,
- help="The pass number to train. In inference mode, load the saved model"
- " at the end of given pass.(default: %(default)d)")
+ "--optimizer",
+ type=str,
+ default='adam',
+ help="optimizer to use, only supprt[sgd|adam]")
+
parser.add_argument(
"--learning_rate",
type=float,
- default=0.01,
- help="Learning rate used to train the model. (default: %(default)f)")
+ default=0.001,
+ help="learning rate for optimizer")
+
parser.add_argument(
- "--no_attention",
- action='store_true',
- help="If set, run no attention model instead of attention model.")
+ "--num_layers",
+ type=int,
+ default=1,
+ help="layers number of encoder and decoder")
parser.add_argument(
- "--beam_size",
+ "--hidden_size",
type=int,
- default=3,
- help="The width for beam search. (default: %(default)d)")
+ default=100,
+ help="hidden size of encoder and decoder")
+ parser.add_argument("--src_vocab_size", type=int, help="source vocab size")
+ parser.add_argument("--tar_vocab_size", type=int, help="target vocab size")
+
+ parser.add_argument(
+ "--batch_size", type=int, help="batch size of each step")
+
parser.add_argument(
- "--use_gpu",
- type=distutils.util.strtobool,
- default=True,
- help="Whether to use gpu or not. (default: %(default)d)")
+ "--max_epoch", type=int, default=12, help="max epoch for the training")
+
parser.add_argument(
- "--max_length",
+ "--max_len",
type=int,
default=50,
- help="The maximum sequence length for translation result."
- "(default: %(default)d)")
+ help="max length for source and target sentence")
parser.add_argument(
- "--save_dir",
+ "--dropout", type=float, default=0.0, help="drop probability")
+ parser.add_argument(
+ "--init_scale",
+ type=float,
+ default=0.0,
+ help="init scale for parameter")
+ parser.add_argument(
+ "--max_grad_norm",
+ type=float,
+ default=5.0,
+ help="max grad norm for global norm clip")
+
+ parser.add_argument(
+ "--model_path",
type=str,
- default="model",
- help="Specify the path to save trained models.")
+ default='./model',
+ help="model path for model to save")
+
parser.add_argument(
- "--save_interval",
- type=int,
- default=1,
- help="Save the trained model every n passes."
- "(default: %(default)d)")
+ "--reload_model", type=str, help="reload model to inference")
+
+ parser.add_argument(
+ "--infer_file", type=str, help="file name for inference")
+ parser.add_argument(
+ "--infer_output_file",
+ type=str,
+ default='./infer_output',
+ help="file name for inference output")
+ parser.add_argument(
+ "--beam_size", type=int, default=10, help="file name for inference")
+
parser.add_argument(
- "--enable_ce",
- action='store_true',
- help="If set, run the task with continuous evaluation logs.")
+ '--use_gpu',
+ type=bool,
+ default=False,
+ help='Whether using gpu [True|False]')
+
args = parser.parse_args()
return args
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/attention_model.py b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/attention_model.py
index 0c726977..eba1d5f3 100644
--- a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/attention_model.py
+++ b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/attention_model.py
@@ -1,220 +1,471 @@
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import paddle.fluid.layers as layers
import paddle.fluid as fluid
-from paddle.fluid.contrib.decoder.beam_search_decoder import *
-
-
-def lstm_step(x_t, hidden_t_prev, cell_t_prev, size):
- def linear(inputs):
- return fluid.layers.fc(input=inputs, size=size, bias_attr=True)
-
- forget_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
- input_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
- output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
- cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t]))
-
- cell_t = fluid.layers.sums(input=[
- fluid.layers.elementwise_mul(
- x=forget_gate, y=cell_t_prev), fluid.layers.elementwise_mul(
- x=input_gate, y=cell_tilde)
- ])
-
- hidden_t = fluid.layers.elementwise_mul(
- x=output_gate, y=fluid.layers.tanh(x=cell_t))
-
- return hidden_t, cell_t
-
-
-def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim,
- target_dict_dim, is_generating, beam_size, max_length):
- """Construct a seq2seq network."""
-
- def bi_lstm_encoder(input_seq, gate_size):
- # A bi-directional lstm encoder implementation.
- # Linear transformation part for input gate, output gate, forget gate
- # and cell activation vectors need be done outside of dynamic_lstm.
- # So the output size is 4 times of gate_size.
- input_forward_proj = fluid.layers.fc(input=input_seq,
- size=gate_size * 4,
- act='tanh',
- bias_attr=False)
- forward, _ = fluid.layers.dynamic_lstm(
- input=input_forward_proj, size=gate_size * 4, use_peepholes=False)
- input_reversed_proj = fluid.layers.fc(input=input_seq,
- size=gate_size * 4,
- act='tanh',
- bias_attr=False)
- reversed, _ = fluid.layers.dynamic_lstm(
- input=input_reversed_proj,
- size=gate_size * 4,
- is_reverse=True,
- use_peepholes=False)
- return forward, reversed
-
- # The encoding process. Encodes the input words into tensors.
- src_word_idx = fluid.layers.data(
- name='source_sequence', shape=[1], dtype='int64', lod_level=1)
-
- src_embedding = fluid.layers.embedding(
- input=src_word_idx,
- size=[source_dict_dim, embedding_dim],
- dtype='float32')
-
- src_forward, src_reversed = bi_lstm_encoder(
- input_seq=src_embedding, gate_size=encoder_size)
-
- encoded_vector = fluid.layers.concat(
- input=[src_forward, src_reversed], axis=1)
-
- encoded_proj = fluid.layers.fc(input=encoded_vector,
- size=decoder_size,
- bias_attr=False)
-
- backward_first = fluid.layers.sequence_pool(
- input=src_reversed, pool_type='first')
-
- decoder_boot = fluid.layers.fc(input=backward_first,
- size=decoder_size,
- bias_attr=False,
- act='tanh')
-
- cell_init = fluid.layers.fill_constant_batch_size_like(
- input=decoder_boot,
- value=0.0,
- shape=[-1, decoder_size],
- dtype='float32')
- cell_init.stop_gradient = False
-
- # Create a RNN state cell by providing the input and hidden states, and
- # specifies the hidden state as output.
- h = InitState(init=decoder_boot, need_reorder=True)
- c = InitState(init=cell_init)
-
- state_cell = StateCell(
- inputs={'x': None,
- 'encoder_vec': None,
- 'encoder_proj': None},
- states={'h': h,
- 'c': c},
- out_state='h')
-
- def simple_attention(encoder_vec, encoder_proj, decoder_state):
- # The implementation of simple attention model
- decoder_state_proj = fluid.layers.fc(input=decoder_state,
- size=decoder_size,
- bias_attr=False)
- decoder_state_expand = fluid.layers.sequence_expand(
- x=decoder_state_proj, y=encoder_proj)
- # concated lod should inherit from encoder_proj
- mixed_state = encoder_proj + decoder_state_expand
- attention_weights = fluid.layers.fc(input=mixed_state,
- size=1,
- bias_attr=False)
- attention_weights = fluid.layers.sequence_softmax(
- input=attention_weights)
- weigths_reshape = fluid.layers.reshape(x=attention_weights, shape=[-1])
- scaled = fluid.layers.elementwise_mul(
- x=encoder_vec, y=weigths_reshape, axis=0)
- context = fluid.layers.sequence_pool(input=scaled, pool_type='sum')
- return context
-
- @state_cell.state_updater
- def state_updater(state_cell):
- # Define the updater of RNN state cell
- current_word = state_cell.get_input('x')
- encoder_vec = state_cell.get_input('encoder_vec')
- encoder_proj = state_cell.get_input('encoder_proj')
- prev_h = state_cell.get_state('h')
- prev_c = state_cell.get_state('c')
- context = simple_attention(encoder_vec, encoder_proj, prev_h)
- decoder_inputs = fluid.layers.concat(
- input=[context, current_word], axis=1)
- h, c = lstm_step(decoder_inputs, prev_h, prev_c, decoder_size)
- state_cell.set_state('h', h)
- state_cell.set_state('c', c)
-
- # Define the decoding process
- if not is_generating:
- # Training process
- trg_word_idx = fluid.layers.data(
- name='target_sequence', shape=[1], dtype='int64', lod_level=1)
-
- trg_embedding = fluid.layers.embedding(
- input=trg_word_idx,
- size=[target_dict_dim, embedding_dim],
- dtype='float32')
-
- # A decoder for training
- decoder = TrainingDecoder(state_cell)
-
- with decoder.block():
- current_word = decoder.step_input(trg_embedding)
- encoder_vec = decoder.static_input(encoded_vector)
- encoder_proj = decoder.static_input(encoded_proj)
- decoder.state_cell.compute_state(inputs={
- 'x': current_word,
- 'encoder_vec': encoder_vec,
- 'encoder_proj': encoder_proj
- })
- h = decoder.state_cell.get_state('h')
- decoder.state_cell.update_states()
- out = fluid.layers.fc(input=h,
- size=target_dict_dim,
- bias_attr=True,
- act='softmax')
- decoder.output(out)
-
- label = fluid.layers.data(
- name='label_sequence', shape=[1], dtype='int64', lod_level=1)
- cost = fluid.layers.cross_entropy(input=decoder(), label=label)
- avg_cost = fluid.layers.mean(x=cost)
- feeding_list = ["source_sequence", "target_sequence", "label_sequence"]
- return avg_cost, feeding_list
-
- else:
- # Inference
- init_ids = fluid.layers.data(
- name="init_ids", shape=[1], dtype="int64", lod_level=2)
- init_scores = fluid.layers.data(
- name="init_scores", shape=[1], dtype="float32", lod_level=2)
-
- # A beam search decoder
- decoder = BeamSearchDecoder(
- state_cell=state_cell,
- init_ids=init_ids,
- init_scores=init_scores,
- target_dict_dim=target_dict_dim,
- word_dim=embedding_dim,
- input_var_dict={
- 'encoder_vec': encoded_vector,
- 'encoder_proj': encoded_proj
- },
- topk_size=50,
- sparse_emb=True,
- max_len=max_length,
- beam_size=beam_size,
- end_id=1,
- name=None)
-
- decoder.decode()
-
- translation_ids, translation_scores = decoder()
- feeding_list = ["source_sequence"]
-
- return translation_ids, translation_scores, feeding_list
+from paddle.fluid.layers.control_flow import StaticRNN
+import numpy as np
+from paddle.fluid import ParamAttr
+from paddle.fluid.contrib.layers import basic_lstm, BasicLSTMUnit
+from base_model import BaseModel
+
+INF = 1. * 1e5
+alpha = 0.6
+
+
+class AttentionModel(BaseModel):
+ def __init__(self,
+ hidden_size,
+ src_vocab_size,
+ tar_vocab_size,
+ batch_size,
+ num_layers=1,
+ init_scale=0.1,
+ dropout=None,
+ batch_first=True):
+ super(AttentionModel, self).__init__(
+ hidden_size,
+ src_vocab_size,
+ tar_vocab_size,
+ batch_size,
+ num_layers=num_layers,
+ init_scale=init_scale,
+ dropout=dropout,
+ batch_first=batch_first)
+
+ def _build_decoder(self,
+ enc_last_hidden,
+ enc_last_cell,
+ mode='train',
+ beam_size=10):
+
+ dec_input = layers.transpose(self.tar_emb, [1, 0, 2])
+ dec_unit_list = []
+ for i in range(self.num_layers):
+ new_name = "dec_layers_" + str(i)
+ dec_unit_list.append(
+ BasicLSTMUnit(
+ new_name,
+ self.hidden_size,
+ ParamAttr(initializer=fluid.initializer.UniformInitializer(
+ low=-self.init_scale, high=self.init_scale)),
+ ParamAttr(initializer=fluid.initializer.Constant(0.0)), ))
+
+
+ attention_weight = layers.create_parameter([self.hidden_size * 2, self.hidden_size], dtype="float32", name="attention_weight", \
+ default_initializer=fluid.initializer.UniformInitializer(low=-self.init_scale, high=self.init_scale))
+
+ memory_weight = layers.create_parameter([self.hidden_size, self.hidden_size], dtype="float32", name="memory_weight", \
+ default_initializer=fluid.initializer.UniformInitializer(low=-self.init_scale, high=self.init_scale))
+
+ def dot_attention(query, memory, mask=None):
+ attn = layers.matmul(query, memory, transpose_y=True)
+
+ if mask:
+ attn = layers.transpose(attn, [1, 0, 2])
+ attn = layers.elementwise_add(attn, mask * 1000000000, -1)
+ attn = layers.transpose(attn, [1, 0, 2])
+ weight = layers.softmax(attn)
+ weight_memory = layers.matmul(weight, memory)
+
+ return weight_memory, weight
+
+ max_src_seq_len = layers.shape(self.src)[1]
+ src_mask = layers.sequence_mask(
+ self.src_sequence_length, maxlen=max_src_seq_len, dtype='float32')
+
+ softmax_weight = layers.create_parameter([self.hidden_size, self.tar_vocab_size], dtype="float32", name="softmax_weight", \
+ default_initializer=fluid.initializer.UniformInitializer(low=-self.init_scale, high=self.init_scale))
+
+ def decoder_step(currrent_in, pre_feed, pre_hidden_array,
+ pre_cell_array, enc_memory):
+ new_hidden_array = []
+ new_cell_array = []
+
+ step_input = layers.concat([currrent_in, pre_feed], 1)
+
+ for i in range(self.num_layers):
+ pre_hidden = pre_hidden_array[i]
+ pre_cell = pre_cell_array[i]
+
+ new_hidden, new_cell = dec_unit_list[i](step_input, pre_hidden,
+ pre_cell)
+
+ new_hidden_array.append(new_hidden)
+ new_cell_array.append(new_cell)
+
+ step_input = new_hidden
+
+ memory_mask = src_mask - 1.0
+ enc_memory = layers.matmul(enc_memory, memory_weight)
+ att_in = layers.unsqueeze(step_input, [1])
+ dec_att, _ = dot_attention(att_in, enc_memory)
+ dec_att = layers.squeeze(dec_att, [1])
+ concat_att_out = layers.concat([dec_att, step_input], 1)
+ concat_att_out = layers.matmul(concat_att_out, attention_weight)
+
+ return concat_att_out, new_hidden_array, new_cell_array
+
+ if mode == "train":
+ dec_rnn = StaticRNN()
+ with dec_rnn.step():
+ step_input = dec_rnn.step_input(dec_input)
+ input_feed = dec_rnn.memory(
+ batch_ref=dec_input, shape=[-1, self.hidden_size])
+ step_input = layers.concat([step_input, input_feed], 1)
+
+ for i in range(self.num_layers):
+ pre_hidden = dec_rnn.memory(init=enc_last_hidden[i])
+ pre_cell = dec_rnn.memory(init=enc_last_cell[i])
+
+ new_hidden, new_cell = dec_unit_list[i](
+ step_input, pre_hidden, pre_cell)
+
+ dec_rnn.update_memory(pre_hidden, new_hidden)
+ dec_rnn.update_memory(pre_cell, new_cell)
+
+ step_input = new_hidden
+
+ if self.dropout != None and self.dropout > 0.0:
+ print("using dropout", self.dropout)
+ step_input = fluid.layers.dropout(
+ step_input,
+ dropout_prob=self.dropout,
+ dropout_implementation='upscale_in_train')
+ memory_mask = src_mask - 1.0
+ enc_memory = layers.matmul(self.enc_output, memory_weight)
+ att_in = layers.unsqueeze(step_input, [1])
+ dec_att, _ = dot_attention(att_in, enc_memory, memory_mask)
+ dec_att = layers.squeeze(dec_att, [1])
+ concat_att_out = layers.concat([dec_att, step_input], 1)
+ concat_att_out = layers.matmul(concat_att_out, attention_weight)
+ #concat_att_out = layers.tanh( concat_att_out )
+
+ dec_rnn.update_memory(input_feed, concat_att_out)
+
+ dec_rnn.step_output(concat_att_out)
+
+ dec_rnn_out = dec_rnn()
+ dec_output = layers.transpose(dec_rnn_out, [1, 0, 2])
+
+ dec_output = layers.matmul(dec_output, softmax_weight)
+
+ return dec_output
+ elif mode == 'beam_search':
+
+ max_length = max_src_seq_len * 2
+ #max_length = layers.fill_constant( [1], dtype='int32', value = 10)
+ pre_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+ full_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+
+ score = layers.fill_constant([1], dtype='float32', value=0.0)
+
+ #eos_ids = layers.fill_constant( [1, 1], dtype='int64', value=2)
+
+ pre_hidden_array = []
+ pre_cell_array = []
+ pre_feed = layers.fill_constant(
+ [beam_size, self.hidden_size], dtype='float32', value=0)
+ for i in range(self.num_layers):
+ pre_hidden_array.append(
+ layers.expand(enc_last_hidden[i], [beam_size, 1]))
+ pre_cell_array.append(
+ layers.expand(enc_last_cell[i], [beam_size, 1]))
+
+ eos_ids = layers.fill_constant([beam_size], dtype='int64', value=2)
+ init_score = np.zeros((beam_size)).astype('float32')
+ init_score[1:] = -INF
+ pre_score = layers.assign(init_score)
+ #pre_score = layers.fill_constant( [1,], dtype='float32', value= 0.0)
+ tokens = layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=1)
+
+ enc_memory = layers.expand(self.enc_output, [beam_size, 1, 1])
+
+ pre_tokens = layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=1)
+
+ finished_seq = layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=0)
+ finished_scores = layers.fill_constant(
+ [beam_size], dtype='float32', value=-INF)
+ finished_flag = layers.fill_constant(
+ [beam_size], dtype='float32', value=0.0)
+
+ step_idx = layers.fill_constant(shape=[1], dtype='int32', value=0)
+ cond = layers.less_than(
+ x=step_idx, y=max_length) # default force_cpu=True
+
+ parent_idx = layers.fill_constant([1], dtype='int32', value=0)
+ while_op = layers.While(cond)
+
+ def compute_topk_scores_and_seq(sequences,
+ scores,
+ scores_to_gather,
+ flags,
+ beam_size,
+ select_beam=None,
+ generate_id=None):
+ scores = layers.reshape(scores, shape=[1, -1])
+ _, topk_indexs = layers.topk(scores, k=beam_size)
+
+ topk_indexs = layers.reshape(topk_indexs, shape=[-1])
+
+ # gather result
+
+ top_seq = layers.gather(sequences, topk_indexs)
+ topk_flags = layers.gather(flags, topk_indexs)
+ topk_gather_scores = layers.gather(scores_to_gather,
+ topk_indexs)
+
+ if select_beam:
+ topk_beam = layers.gather(select_beam, topk_indexs)
+ else:
+ topk_beam = select_beam
+
+ if generate_id:
+ topk_id = layers.gather(generate_id, topk_indexs)
+ else:
+ topk_id = generate_id
+ return top_seq, topk_gather_scores, topk_flags, topk_beam, topk_id
+
+ def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished,
+ select_beam, generate_id):
+ curr_scores += curr_finished * -INF
+ return compute_topk_scores_and_seq(
+ curr_seq,
+ curr_scores,
+ curr_log_probs,
+ curr_finished,
+ beam_size,
+ select_beam,
+ generate_id=generate_id)
+
+ def grow_finished(finished_seq, finished_scores, finished_flag,
+ curr_seq, curr_scores, curr_finished):
+ finished_seq = layers.concat(
+ [
+ finished_seq, layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=1)
+ ],
+ axis=1)
+ curr_scores += (1.0 - curr_finished) * -INF
+ #layers.Print( curr_scores, message="curr scores")
+ curr_finished_seq = layers.concat(
+ [finished_seq, curr_seq], axis=0)
+ curr_finished_scores = layers.concat(
+ [finished_scores, curr_scores], axis=0)
+ curr_finished_flags = layers.concat(
+ [finished_flag, curr_finished], axis=0)
+
+ return compute_topk_scores_and_seq(
+ curr_finished_seq, curr_finished_scores,
+ curr_finished_scores, curr_finished_flags, beam_size)
+
+ def is_finished(alive_log_prob, finished_scores,
+ finished_in_finished):
+
+ max_out_len = 200
+ max_length_penalty = layers.pow(layers.fill_constant(
+ [1], dtype='float32', value=((5.0 + max_out_len) / 6.0)),
+ alpha)
+
+ lower_bound_alive_score = layers.slice(
+ alive_log_prob, starts=[0], ends=[1],
+ axes=[0]) / max_length_penalty
+
+ lowest_score_of_fininshed_in_finished = finished_scores * finished_in_finished
+ lowest_score_of_fininshed_in_finished += (
+ 1.0 - finished_in_finished) * -INF
+ lowest_score_of_fininshed_in_finished = layers.reduce_min(
+ lowest_score_of_fininshed_in_finished)
+
+ met = layers.less_than(lower_bound_alive_score,
+ lowest_score_of_fininshed_in_finished)
+ met = layers.cast(met, 'float32')
+ bound_is_met = layers.reduce_sum(met)
+
+ finished_eos_num = layers.reduce_sum(finished_in_finished)
+
+ finish_cond = layers.less_than(
+ finished_eos_num,
+ layers.fill_constant(
+ [1], dtype='float32', value=beam_size))
+
+ return finish_cond
+
+ def grow_top_k(step_idx, alive_seq, alive_log_prob, parant_idx):
+ pre_ids = alive_seq
+
+ dec_step_emb = layers.embedding(
+ input=pre_ids,
+ size=[self.tar_vocab_size, self.hidden_size],
+ dtype='float32',
+ is_sparse=False,
+ param_attr=fluid.ParamAttr(
+ name='target_embedding',
+ initializer=fluid.initializer.UniformInitializer(
+ low=-self.init_scale, high=self.init_scale)))
+
+ dec_att_out, new_hidden_array, new_cell_array = decoder_step(
+ dec_step_emb, pre_feed, pre_hidden_array, pre_cell_array,
+ enc_memory)
+
+ projection = layers.matmul(dec_att_out, softmax_weight)
+
+ logits = layers.softmax(projection)
+ current_log = layers.elementwise_add(
+ x=layers.log(logits), y=alive_log_prob, axis=0)
+ base_1 = layers.cast(step_idx, 'float32') + 6.0
+ base_1 /= 6.0
+ length_penalty = layers.pow(base_1, alpha)
+
+ len_pen = layers.pow((
+ (5. + layers.cast(step_idx + 1, 'float32')) / 6.), alpha)
+
+ current_log = layers.reshape(current_log, shape=[1, -1])
+
+ current_log = current_log / length_penalty
+ topk_scores, topk_indices = layers.topk(
+ input=current_log, k=beam_size)
+
+ topk_scores = layers.reshape(topk_scores, shape=[-1])
+
+ topk_log_probs = topk_scores * length_penalty
+
+ generate_id = layers.reshape(
+ topk_indices, shape=[-1]) % self.tar_vocab_size
+
+ selected_beam = layers.reshape(
+ topk_indices, shape=[-1]) // self.tar_vocab_size
+
+ topk_finished = layers.equal(generate_id, eos_ids)
+
+ topk_finished = layers.cast(topk_finished, 'float32')
+
+ generate_id = layers.reshape(generate_id, shape=[-1, 1])
+
+ pre_tokens_list = layers.gather(tokens, selected_beam)
+
+ full_tokens_list = layers.concat(
+ [pre_tokens_list, generate_id], axis=1)
+
+
+ return full_tokens_list, topk_log_probs, topk_scores, topk_finished, selected_beam, generate_id, \
+ dec_att_out, new_hidden_array, new_cell_array
+
+ with while_op.block():
+ topk_seq, topk_log_probs, topk_scores, topk_finished, topk_beam, topk_generate_id, attention_out, new_hidden_array, new_cell_array = \
+ grow_top_k( step_idx, pre_tokens, pre_score, parent_idx)
+ alive_seq, alive_log_prob, _, alive_beam, alive_id = grow_alive(
+ topk_seq, topk_scores, topk_log_probs, topk_finished,
+ topk_beam, topk_generate_id)
+
+ finished_seq_2, finished_scores_2, finished_flags_2, _, _ = grow_finished(
+ finished_seq, finished_scores, finished_flag, topk_seq,
+ topk_scores, topk_finished)
+
+ finished_cond = is_finished(alive_log_prob, finished_scores_2,
+ finished_flags_2)
+
+ layers.increment(x=step_idx, value=1.0, in_place=True)
+
+ layers.assign(alive_beam, parent_idx)
+ layers.assign(alive_id, pre_tokens)
+ layers.assign(alive_log_prob, pre_score)
+ layers.assign(alive_seq, tokens)
+ layers.assign(finished_seq_2, finished_seq)
+ layers.assign(finished_scores_2, finished_scores)
+ layers.assign(finished_flags_2, finished_flag)
+
+ # update init_hidden, init_cell, input_feed
+ new_feed = layers.gather(attention_out, parent_idx)
+ layers.assign(new_feed, pre_feed)
+ for i in range(self.num_layers):
+ new_hidden_var = layers.gather(new_hidden_array[i],
+ parent_idx)
+ layers.assign(new_hidden_var, pre_hidden_array[i])
+ new_cell_var = layers.gather(new_cell_array[i], parent_idx)
+ layers.assign(new_cell_var, pre_cell_array[i])
+
+ length_cond = layers.less_than(x=step_idx, y=max_length)
+ layers.logical_and(x=length_cond, y=finished_cond, out=cond)
+
+ tokens_with_eos = tokens
+
+ all_seq = layers.concat([tokens_with_eos, finished_seq], axis=0)
+ all_score = layers.concat([pre_score, finished_scores], axis=0)
+ _, topk_index = layers.topk(all_score, k=beam_size)
+ topk_index = layers.reshape(topk_index, shape=[-1])
+ final_seq = layers.gather(all_seq, topk_index)
+ final_score = layers.gather(all_score, topk_index)
+
+ return final_seq
+ elif mode == 'greedy_search':
+ max_length = max_src_seq_len * 2
+ #max_length = layers.fill_constant( [1], dtype='int32', value = 10)
+ pre_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+ full_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+
+ score = layers.fill_constant([1], dtype='float32', value=0.0)
+
+ eos_ids = layers.fill_constant([1, 1], dtype='int64', value=2)
+
+ pre_hidden_array = []
+ pre_cell_array = []
+ pre_feed = layers.fill_constant(
+ [1, self.hidden_size], dtype='float32', value=0)
+ for i in range(self.num_layers):
+ pre_hidden_array.append(enc_last_hidden[i])
+ pre_cell_array.append(enc_last_cell[i])
+ #pre_hidden_array.append( layers.fill_constant( [1, hidden_size], dtype='float32', value=0) )
+ #pre_cell_array.append( layers.fill_constant( [1, hidden_size], dtype='float32', value=0) )
+
+ step_idx = layers.fill_constant(shape=[1], dtype='int32', value=0)
+ cond = layers.less_than(
+ x=step_idx, y=max_length) # default force_cpu=True
+ while_op = layers.While(cond)
+
+ with while_op.block():
+
+ dec_step_emb = layers.embedding(
+ input=pre_ids,
+ size=[self.tar_vocab_size, self.hidden_size],
+ dtype='float32',
+ is_sparse=False,
+ param_attr=fluid.ParamAttr(
+ name='target_embedding',
+ initializer=fluid.initializer.UniformInitializer(
+ low=-self.init_scale, high=self.init_scale)))
+
+ dec_att_out, new_hidden_array, new_cell_array = decoder_step(
+ dec_step_emb, pre_feed, pre_hidden_array, pre_cell_array,
+ self.enc_output)
+
+ projection = layers.matmul(dec_att_out, softmax_weight)
+
+ logits = layers.softmax(projection)
+ logits = layers.log(logits)
+
+ current_log = layers.elementwise_add(logits, score, axis=0)
+
+ topk_score, topk_indices = layers.topk(input=current_log, k=1)
+
+ new_ids = layers.concat([full_ids, topk_indices])
+ layers.assign(new_ids, full_ids)
+ #layers.Print( full_ids, message="ful ids")
+ layers.assign(topk_score, score)
+ layers.assign(topk_indices, pre_ids)
+ layers.assign(dec_att_out, pre_feed)
+ for i in range(self.num_layers):
+ layers.assign(new_hidden_array[i], pre_hidden_array[i])
+ layers.assign(new_cell_array[i], pre_cell_array[i])
+
+ layers.increment(x=step_idx, value=1.0, in_place=True)
+
+ eos_met = layers.not_equal(topk_indices, eos_ids)
+ length_cond = layers.less_than(x=step_idx, y=max_length)
+ layers.logical_and(x=length_cond, y=eos_met, out=cond)
+
+ return full_ids
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/base_model.py b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/base_model.py
new file mode 100644
index 00000000..bebfc2f8
--- /dev/null
+++ b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/base_model.py
@@ -0,0 +1,502 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle.fluid.layers as layers
+import paddle.fluid as fluid
+from paddle.fluid.layers.control_flow import StaticRNN as PaddingRNN
+import numpy as np
+from paddle.fluid import ParamAttr
+from paddle.fluid.contrib.layers import basic_lstm, BasicLSTMUnit
+
+INF = 1. * 1e5
+alpha = 0.6
+
+
+class BaseModel(object):
+ def __init__(self,
+ hidden_size,
+ src_vocab_size,
+ tar_vocab_size,
+ batch_size,
+ num_layers=1,
+ init_scale=0.1,
+ dropout=None,
+ batch_first=True):
+
+ self.hidden_size = hidden_size
+ self.src_vocab_size = src_vocab_size
+ self.tar_vocab_size = tar_vocab_size
+ self.batch_size = batch_size
+ self.num_layers = num_layers
+ self.init_scale = init_scale
+ self.dropout = dropout
+ self.batch_first = batch_first
+
+ def _build_data(self):
+ self.src = layers.data(name="src", shape=[-1, 1, 1], dtype='int64')
+ self.src_sequence_length = layers.data(
+ name="src_sequence_length", shape=[-1], dtype='int32')
+
+ self.tar = layers.data(name="tar", shape=[-1, 1, 1], dtype='int64')
+ self.tar_sequence_length = layers.data(
+ name="tar_sequence_length", shape=[-1], dtype='int32')
+ self.label = layers.data(name="label", shape=[-1, 1, 1], dtype='int64')
+
+ def _emebdding(self):
+ self.src_emb = layers.embedding(
+ input=self.src,
+ size=[self.src_vocab_size, self.hidden_size],
+ dtype='float32',
+ is_sparse=False,
+ param_attr=fluid.ParamAttr(
+ name='source_embedding',
+ initializer=fluid.initializer.UniformInitializer(
+ low=-self.init_scale, high=self.init_scale)))
+ self.tar_emb = layers.embedding(
+ input=self.tar,
+ size=[self.tar_vocab_size, self.hidden_size],
+ dtype='float32',
+ is_sparse=False,
+ param_attr=fluid.ParamAttr(
+ name='target_embedding',
+ initializer=fluid.initializer.UniformInitializer(
+ low=-self.init_scale, high=self.init_scale)))
+
+ def _build_encoder(self):
+ self.enc_output, enc_last_hidden, enc_last_cell = basic_lstm( self.src_emb, None, None, self.hidden_size, num_layers=self.num_layers, batch_first=self.batch_first, \
+ dropout_prob=self.dropout, \
+ param_attr = ParamAttr( initializer=fluid.initializer.UniformInitializer(low=-self.init_scale, high=self.init_scale) ), \
+ bias_attr = ParamAttr( initializer = fluid.initializer.Constant(0.0) ), \
+ sequence_length=self.src_sequence_length)
+
+ return self.enc_output, enc_last_hidden, enc_last_cell
+
+ def _build_decoder(self,
+ enc_last_hidden,
+ enc_last_cell,
+ mode='train',
+ beam_size=10):
+ softmax_weight = layers.create_parameter([self.hidden_size, self.tar_vocab_size], dtype="float32", name="softmax_weight", \
+ default_initializer=fluid.initializer.UniformInitializer(low=-self.init_scale, high=self.init_scale))
+ if mode == 'train':
+ dec_output, dec_last_hidden, dec_last_cell = basic_lstm( self.tar_emb, enc_last_hidden, enc_last_cell, \
+ self.hidden_size, num_layers=self.num_layers, \
+ batch_first=self.batch_first, \
+ dropout_prob=self.dropout, \
+ param_attr = ParamAttr( initializer=fluid.initializer.UniformInitializer(low=-self.init_scale, high=self.init_scale) ), \
+ bias_attr = ParamAttr( initializer = fluid.initializer.Constant(0.0) ))
+
+ dec_output = layers.matmul(dec_output, softmax_weight)
+
+ return dec_output
+ elif mode == 'beam_search' or mode == 'greedy_search':
+ dec_unit_list = []
+ name = 'basic_lstm'
+ for i in range(self.num_layers):
+ new_name = name + "_layers_" + str(i)
+ dec_unit_list.append(
+ BasicLSTMUnit(
+ new_name, self.hidden_size, dtype='float32'))
+
+ def decoder_step(current_in, pre_hidden_array, pre_cell_array):
+ new_hidden_array = []
+ new_cell_array = []
+
+ step_in = current_in
+ for i in range(self.num_layers):
+ pre_hidden = pre_hidden_array[i]
+ pre_cell = pre_cell_array[i]
+
+ new_hidden, new_cell = dec_unit_list[i](step_in, pre_hidden,
+ pre_cell)
+
+ new_hidden_array.append(new_hidden)
+ new_cell_array.append(new_cell)
+
+ step_in = new_hidden
+
+ return step_in, new_hidden_array, new_cell_array
+
+ if mode == 'beam_search':
+ max_src_seq_len = layers.shape(self.src)[1]
+ max_length = max_src_seq_len * 2
+ #max_length = layers.fill_constant( [1], dtype='int32', value = 10)
+ pre_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+ full_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+
+ score = layers.fill_constant([1], dtype='float32', value=0.0)
+
+ #eos_ids = layers.fill_constant( [1, 1], dtype='int64', value=2)
+
+ pre_hidden_array = []
+ pre_cell_array = []
+ pre_feed = layers.fill_constant(
+ [beam_size, self.hidden_size], dtype='float32', value=0)
+ for i in range(self.num_layers):
+ pre_hidden_array.append(
+ layers.expand(enc_last_hidden[i], [beam_size, 1]))
+ pre_cell_array.append(
+ layers.expand(enc_last_cell[i], [beam_size, 1]))
+
+ eos_ids = layers.fill_constant(
+ [beam_size], dtype='int64', value=2)
+ init_score = np.zeros((beam_size)).astype('float32')
+ init_score[1:] = -INF
+ pre_score = layers.assign(init_score)
+ #pre_score = layers.fill_constant( [1,], dtype='float32', value= 0.0)
+ tokens = layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=1)
+
+ enc_memory = layers.expand(self.enc_output, [beam_size, 1, 1])
+
+ pre_tokens = layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=1)
+
+ finished_seq = layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=0)
+ finished_scores = layers.fill_constant(
+ [beam_size], dtype='float32', value=-INF)
+ finished_flag = layers.fill_constant(
+ [beam_size], dtype='float32', value=0.0)
+
+ step_idx = layers.fill_constant(
+ shape=[1], dtype='int32', value=0)
+ cond = layers.less_than(
+ x=step_idx, y=max_length) # default force_cpu=True
+
+ parent_idx = layers.fill_constant([1], dtype='int32', value=0)
+ while_op = layers.While(cond)
+
+ def compute_topk_scores_and_seq(sequences,
+ scores,
+ scores_to_gather,
+ flags,
+ beam_size,
+ select_beam=None,
+ generate_id=None):
+ scores = layers.reshape(scores, shape=[1, -1])
+ _, topk_indexs = layers.topk(scores, k=beam_size)
+
+ topk_indexs = layers.reshape(topk_indexs, shape=[-1])
+
+ # gather result
+
+ top_seq = layers.gather(sequences, topk_indexs)
+ topk_flags = layers.gather(flags, topk_indexs)
+ topk_gather_scores = layers.gather(scores_to_gather,
+ topk_indexs)
+
+ if select_beam:
+ topk_beam = layers.gather(select_beam, topk_indexs)
+ else:
+ topk_beam = select_beam
+
+ if generate_id:
+ topk_id = layers.gather(generate_id, topk_indexs)
+ else:
+ topk_id = generate_id
+ return top_seq, topk_gather_scores, topk_flags, topk_beam, topk_id
+
+ def grow_alive(curr_seq, curr_scores, curr_log_probs,
+ curr_finished, select_beam, generate_id):
+ curr_scores += curr_finished * -INF
+ return compute_topk_scores_and_seq(
+ curr_seq,
+ curr_scores,
+ curr_log_probs,
+ curr_finished,
+ beam_size,
+ select_beam,
+ generate_id=generate_id)
+
+ def grow_finished(finished_seq, finished_scores, finished_flag,
+ curr_seq, curr_scores, curr_finished):
+ finished_seq = layers.concat(
+ [
+ finished_seq, layers.fill_constant(
+ [beam_size, 1], dtype='int64', value=1)
+ ],
+ axis=1)
+ curr_scores += (1.0 - curr_finished) * -INF
+ #layers.Print( curr_scores, message="curr scores")
+ curr_finished_seq = layers.concat(
+ [finished_seq, curr_seq], axis=0)
+ curr_finished_scores = layers.concat(
+ [finished_scores, curr_scores], axis=0)
+ curr_finished_flags = layers.concat(
+ [finished_flag, curr_finished], axis=0)
+
+ return compute_topk_scores_and_seq(
+ curr_finished_seq, curr_finished_scores,
+ curr_finished_scores, curr_finished_flags, beam_size)
+
+ def is_finished(alive_log_prob, finished_scores,
+ finished_in_finished):
+
+ max_out_len = 200
+ max_length_penalty = layers.pow(layers.fill_constant(
+ [1], dtype='float32', value=(
+ (5.0 + max_out_len) / 6.0)),
+ alpha)
+
+ lower_bound_alive_score = layers.slice(
+ alive_log_prob, starts=[0], ends=[1],
+ axes=[0]) / max_length_penalty
+
+ lowest_score_of_fininshed_in_finished = finished_scores * finished_in_finished
+ lowest_score_of_fininshed_in_finished += (
+ 1.0 - finished_in_finished) * -INF
+ lowest_score_of_fininshed_in_finished = layers.reduce_min(
+ lowest_score_of_fininshed_in_finished)
+
+ met = layers.less_than(
+ lower_bound_alive_score,
+ lowest_score_of_fininshed_in_finished)
+ met = layers.cast(met, 'float32')
+ bound_is_met = layers.reduce_sum(met)
+
+ finished_eos_num = layers.reduce_sum(finished_in_finished)
+
+ finish_cond = layers.less_than(
+ finished_eos_num,
+ layers.fill_constant(
+ [1], dtype='float32', value=beam_size))
+
+ return finish_cond
+
+ def grow_top_k(step_idx, alive_seq, alive_log_prob, parant_idx):
+ pre_ids = alive_seq
+
+ dec_step_emb = layers.embedding(
+ input=pre_ids,
+ size=[self.tar_vocab_size, self.hidden_size],
+ dtype='float32',
+ is_sparse=False,
+ param_attr=fluid.ParamAttr(
+ name='target_embedding',
+ initializer=fluid.initializer.UniformInitializer(
+ low=-self.init_scale, high=self.init_scale)))
+
+ dec_att_out, new_hidden_array, new_cell_array = decoder_step(
+ dec_step_emb, pre_hidden_array, pre_cell_array)
+
+ projection = layers.matmul(dec_att_out, softmax_weight)
+
+ logits = layers.softmax(projection)
+ current_log = layers.elementwise_add(
+ x=layers.log(logits), y=alive_log_prob, axis=0)
+ base_1 = layers.cast(step_idx, 'float32') + 6.0
+ base_1 /= 6.0
+ length_penalty = layers.pow(base_1, alpha)
+
+ len_pen = layers.pow(((
+ 5. + layers.cast(step_idx + 1, 'float32')) / 6.), alpha)
+
+ current_log = layers.reshape(current_log, shape=[1, -1])
+
+ current_log = current_log / length_penalty
+ topk_scores, topk_indices = layers.topk(
+ input=current_log, k=beam_size)
+
+ topk_scores = layers.reshape(topk_scores, shape=[-1])
+
+ topk_log_probs = topk_scores * length_penalty
+
+ generate_id = layers.reshape(
+ topk_indices, shape=[-1]) % self.tar_vocab_size
+
+ selected_beam = layers.reshape(
+ topk_indices, shape=[-1]) // self.tar_vocab_size
+
+ topk_finished = layers.equal(generate_id, eos_ids)
+
+ topk_finished = layers.cast(topk_finished, 'float32')
+
+ generate_id = layers.reshape(generate_id, shape=[-1, 1])
+
+ pre_tokens_list = layers.gather(tokens, selected_beam)
+
+ full_tokens_list = layers.concat(
+ [pre_tokens_list, generate_id], axis=1)
+
+
+ return full_tokens_list, topk_log_probs, topk_scores, topk_finished, selected_beam, generate_id, \
+ dec_att_out, new_hidden_array, new_cell_array
+
+ with while_op.block():
+ topk_seq, topk_log_probs, topk_scores, topk_finished, topk_beam, topk_generate_id, attention_out, new_hidden_array, new_cell_array = \
+ grow_top_k( step_idx, pre_tokens, pre_score, parent_idx)
+ alive_seq, alive_log_prob, _, alive_beam, alive_id = grow_alive(
+ topk_seq, topk_scores, topk_log_probs, topk_finished,
+ topk_beam, topk_generate_id)
+
+ finished_seq_2, finished_scores_2, finished_flags_2, _, _ = grow_finished(
+ finished_seq, finished_scores, finished_flag, topk_seq,
+ topk_scores, topk_finished)
+
+ finished_cond = is_finished(
+ alive_log_prob, finished_scores_2, finished_flags_2)
+
+ layers.increment(x=step_idx, value=1.0, in_place=True)
+
+ layers.assign(alive_beam, parent_idx)
+ layers.assign(alive_id, pre_tokens)
+ layers.assign(alive_log_prob, pre_score)
+ layers.assign(alive_seq, tokens)
+ layers.assign(finished_seq_2, finished_seq)
+ layers.assign(finished_scores_2, finished_scores)
+ layers.assign(finished_flags_2, finished_flag)
+
+ # update init_hidden, init_cell, input_feed
+ new_feed = layers.gather(attention_out, parent_idx)
+ layers.assign(new_feed, pre_feed)
+ for i in range(self.num_layers):
+ new_hidden_var = layers.gather(new_hidden_array[i],
+ parent_idx)
+ layers.assign(new_hidden_var, pre_hidden_array[i])
+ new_cell_var = layers.gather(new_cell_array[i],
+ parent_idx)
+ layers.assign(new_cell_var, pre_cell_array[i])
+
+ length_cond = layers.less_than(x=step_idx, y=max_length)
+ layers.logical_and(x=length_cond, y=finished_cond, out=cond)
+
+ tokens_with_eos = tokens
+
+ all_seq = layers.concat([tokens_with_eos, finished_seq], axis=0)
+ all_score = layers.concat([pre_score, finished_scores], axis=0)
+ _, topk_index = layers.topk(all_score, k=beam_size)
+ topk_index = layers.reshape(topk_index, shape=[-1])
+ final_seq = layers.gather(all_seq, topk_index)
+ final_score = layers.gather(all_score, topk_index)
+
+ return final_seq
+ elif mode == 'greedy_search':
+ max_src_seq_len = layers.shape(self.src)[1]
+ max_length = max_src_seq_len * 2
+ #max_length = layers.fill_constant( [1], dtype='int32', value = 10)
+ pre_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+ full_ids = layers.fill_constant([1, 1], dtype='int64', value=1)
+
+ score = layers.fill_constant([1], dtype='float32', value=0.0)
+
+ eos_ids = layers.fill_constant([1, 1], dtype='int64', value=2)
+
+ pre_hidden_array = []
+ pre_cell_array = []
+ pre_feed = layers.fill_constant(
+ [1, self.hidden_size], dtype='float32', value=0)
+ for i in range(self.num_layers):
+ pre_hidden_array.append(enc_last_hidden[i])
+ pre_cell_array.append(enc_last_cell[i])
+ #pre_hidden_array.append( layers.fill_constant( [1, hidden_size], dtype='float32', value=0) )
+ #pre_cell_array.append( layers.fill_constant( [1, hidden_size], dtype='float32', value=0) )
+
+ step_idx = layers.fill_constant(
+ shape=[1], dtype='int32', value=0)
+ cond = layers.less_than(
+ x=step_idx, y=max_length) # default force_cpu=True
+ while_op = layers.While(cond)
+
+ with while_op.block():
+
+ dec_step_emb = layers.embedding(
+ input=pre_ids,
+ size=[self.tar_vocab_size, self.hidden_size],
+ dtype='float32',
+ is_sparse=False,
+ param_attr=fluid.ParamAttr(
+ name='target_embedding',
+ initializer=fluid.initializer.UniformInitializer(
+ low=-self.init_scale, high=self.init_scale)))
+
+ dec_att_out, new_hidden_array, new_cell_array = decoder_step(
+ dec_step_emb, pre_hidden_array, pre_cell_array)
+
+ projection = layers.matmul(dec_att_out, softmax_weight)
+
+ logits = layers.softmax(projection)
+ logits = layers.log(logits)
+
+ current_log = layers.elementwise_add(logits, score, axis=0)
+
+ topk_score, topk_indices = layers.topk(
+ input=current_log, k=1)
+
+ new_ids = layers.concat([full_ids, topk_indices])
+ layers.assign(new_ids, full_ids)
+ #layers.Print( full_ids, message="ful ids")
+ layers.assign(topk_score, score)
+ layers.assign(topk_indices, pre_ids)
+ layers.assign(dec_att_out, pre_feed)
+ for i in range(self.num_layers):
+ layers.assign(new_hidden_array[i], pre_hidden_array[i])
+ layers.assign(new_cell_array[i], pre_cell_array[i])
+
+ layers.increment(x=step_idx, value=1.0, in_place=True)
+
+ eos_met = layers.not_equal(topk_indices, eos_ids)
+ length_cond = layers.less_than(x=step_idx, y=max_length)
+ layers.logical_and(x=length_cond, y=eos_met, out=cond)
+
+ return full_ids
+
+ raise Exception("error")
+ else:
+ print("mode not supprt", mode)
+
+ def _compute_loss(self, dec_output):
+ loss = layers.softmax_with_cross_entropy(
+ logits=dec_output, label=self.label, soft_label=False)
+
+ loss = layers.reshape(loss, shape=[self.batch_size, -1])
+
+ max_tar_seq_len = layers.shape(self.tar)[1]
+ tar_mask = layers.sequence_mask(
+ self.tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32')
+ loss = loss * tar_mask
+ loss = layers.reduce_mean(loss, dim=[0])
+ loss = layers.reduce_sum(loss)
+
+ loss.permissions = True
+
+ return loss
+
+ def _beam_search(self, enc_last_hidden, enc_last_cell):
+ pass
+
+ def build_graph(self, mode='train', beam_size=10):
+ if mode == 'train' or mode == 'eval':
+ self._build_data()
+ self._emebdding()
+ enc_output, enc_last_hidden, enc_last_cell = self._build_encoder()
+ dec_output = self._build_decoder(enc_last_hidden, enc_last_cell)
+
+ loss = self._compute_loss(dec_output)
+ return loss
+ elif mode == "beam_search" or mode == 'greedy_search':
+ self._build_data()
+ self._emebdding()
+ enc_output, enc_last_hidden, enc_last_cell = self._build_encoder()
+ dec_output = self._build_decoder(
+ enc_last_hidden, enc_last_cell, mode=mode, beam_size=beam_size)
+
+ return dec_output
+ else:
+ print("not support mode ", mode)
+ raise Exception("not support mode: " + mode)
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/data/download_en-vi.sh b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/data/download_en-vi.sh
new file mode 100755
index 00000000..ae61044b
--- /dev/null
+++ b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/data/download_en-vi.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# IWSLT15 Vietnames to English is a small dataset contain 133k parallel data
+# this script download the data from stanford website
+#
+# Usage:
+# ./download_en-vi.sh output_path
+#
+# If output_path is not specified, a dir nameed "./en_vi" will be created and used as
+# output path
+
+set -ex
+OUTPUT_PATH="${1:-en-vi}"
+SITE_PATH="https://nlp.stanford.edu/projects/nmt/data"
+
+mkdir -v -p $OUTPUT_PATH
+
+# Download iwslt15 small dataset from standford website.
+echo "Begin to download training dataset train.en and train.vi."
+wget "$SITE_PATH/iwslt15.en-vi/train.en" -O "$OUTPUT_PATH/train.en"
+wget "$SITE_PATH/iwslt15.en-vi/train.vi" -O "$OUTPUT_PATH/train.vi"
+
+echo "Begin to download dev dataset tst2012.en and tst2012.vi."
+wget "$SITE_PATH/iwslt15.en-vi/tst2012.en" -O "$OUTPUT_PATH/tst2012.en"
+wget "$SITE_PATH/iwslt15.en-vi/tst2012.vi" -O "$OUTPUT_PATH/tst2012.vi"
+
+echo "Begin to download test dataset tst2013.en and tst2013.vi."
+wget "$SITE_PATH/iwslt15.en-vi/tst2013.en" -O "$OUTPUT_PATH/tst2013.en"
+wget "$SITE_PATH/iwslt15.en-vi/tst2013.vi" -O "$OUTPUT_PATH/tst2013.vi"
+
+echo "Begin to ownload vocab file vocab.en and vocab.vi."
+wget "$SITE_PATH/iwslt15.en-vi/vocab.en" -O "$OUTPUT_PATH/vocab.en"
+wget "$SITE_PATH/iwslt15.en-vi/vocab.vi" -O "$OUTPUT_PATH/vocab.vi"
+
diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/images/bi_rnn.png b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/images/bi_rnn.png
deleted file mode 100644
index 9d8efd50a49d0305586f550344472ab94c93bed3..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 171455
zcmeEtWmuG3+xCcpQU)Os3P_7X4<(&a(jqM}bTc3!tsqEAcT0!#Fer%9Fwza8phK5*
ze`|2>=X-v<$NTsD^LZQ|+nGK0eXo11YhBlQp4YiT)t<-`6HpOAAP{1O$1)la2(AkR
za)s_19{5HnD6b#J%7
z4;1_Y>gwr$F!6voxH9~E2N}4lnTxd(!rIY+4tqxvQ%5(1=zTEKzpr5Lq@wbl8#}oE
zGf`m5I6X|9IJr5vIPLAR?{)EOSA+)q{|@7S{I#o=rxTo01MceR=3)lc!-C=8i^1Ie
z?+?QM5R67x)x{btiiw?!qnVpM+yS8=BYGeF9}aVCb72!MxH&(Z%LHm>27^QS_$@4;
zrsi-{D3<^~AD4xRiG>A^#lQFYpRVWQ;*pc(l9QC-=jG<+mgN`Zlj0TRl7w+_NlNia
z3d#L@t%8Fq!oItQ--Ju2zmtbkfon
zt4_zNVq#|Pfc=sUyHJ1MTL$i8?G87Wb8)n%yO?QV>;GXCCT8XqLU5QMl-~qq3gt5~
z;fI>=n3zI&`Apzka5H``9)7O-|6FhWUo-eOQo%RiO`QH6kg%yBKaUVUuLYD_h}#s(
z%`IR8HR0t4vC413Z*C&Q4W`xXJ|`F=CpLKgE13TM3PdROpa0n!;KP457u*5tC>OAG
z#ywg(AUMf=3Nn&f9+Ru-SCh!JC+qsk3Euc$A*JLFV12{LP5Kl;-cl2`h?2Y6n^WS8
zLM?NYqcAjTSE`e6)mv%QZ-ui4(me=N)ZkjHKNnf3#)!KfX}RrlWjC%gRg{#kFD)&v
z7r7oKdu4j{9iNpO&o*Hb#y@`uqS*RG%pCppXL$r^{{0dHdEK)ibmhN363cP?=aWQ0
z&^yBad{X=I{1?uDK9LFDY530v2=4X&e;MTe>6Cm+p}{UrXZ-ouezWJscH``6*eq0B6{
zM2SwnCv};ZAcm4Ukp4JE;x27X7nt4l00qQ9C2$a1o!D*!OG&B^ZdBo_WdC3$BWkE0
zScxXS>|dV64zW@cJ5r}~6`&a6pE6s-hYaIg1PmMgo4G*x8Rk94fj^FB=XI=A+6oUhYj;KGnRv#UiClimDDhW1bFyt`!?y=^mv7ZmPJ9P=u+GK6D*I=PK!_E~5!S$5%
zOAtt$ghZSz*G}{KaWiGqDY&b}(asX4cFc&E*YX=5t?xnh;m(k1S*V6#XWE2$tcr3+
z$zwiW5OZk~d8P$fz_frLKCzgS8|dCi^eFmm=@YR8Wz^)F_9
z#y*~(9b2589F80{z*pUuhr8@P?T+SK_GP%+j(;!-_Qi%z>_u$ENvQq0D(7(X*Q}03
zcdBEPb-r@4X@=jKC$CvMQ7W6x6l-j`QX+>%qMG8h6Oyi4sN?r$lvKQh6;n(6W
z7CGwxCQIRCJvLIg{G@@7Vb!8iuv^V9BUd-$u0M6nD%HNO(r{K_{CSW}?tQk-H!Es)XE^j~I^8S%
zz;fCJ<<-ynPCYb4)o5fIO7_#9AINWyHJ^>RCorqwNa*yF`=<=WBmdy9=M!>(IIV0r
zUT?H#sP3jaT^z^_SsE!xv=?Sp8EDHC1c}R#Xz4McgixN=XXl;g{>%&XN=36nzDlYE
zET5;4elnU~x);Qr1E=Ys7fJVlyb^@&`bCi4Ck`Wf-f89e>z(``emycR);k)p*X{2L
zBd(>8D9j|x2NInR-ybfTJg3fWmPf19us!b<=pS03<+F;08RayGNc`Q_#j2V)Q~k2^
z@TLb-yH!0Ly!A8gsrJ!HH@frVtxMWp4U)x6C_HeE>1)eI5=tw;Ew!G?>G8n3$!ymj
zlnBrc8*FWUy&+JL<`RacdYbPvjeCE;Iu~sGpCb4dyUq!Hd)8cCEf4NbVdRl*m{17s
zI81u@Xyaq`gl)yA-u1=}hf%{9Gvn>l3hgp6OC&t{G2wYgmD+7))eO6P-mBc(UkEvW
z!JDjv_A<{;EL}l@oPx*uW|3JwyeL8-fi~wOQTIg$*80TH?Hrm8Exb)0&whFdgKz$5
zvH0wNsg5!5c&7XJT=eufhfS72sl=@
z-NwWyad1CctBD;UneBBucc{4n>4p(sO!tT0+p|`o%i0-;L4~??Gmg&ZYCavgk;2`G
zl~HJ=uBx_qJ>#jpf~3ZxqpD3)UP@y3X0o8O={Mb=qvHzXT-YDsM1^+301|9{2UBaP
zDW!`nR`6MGSeWcRGqA(+;XK+~vkksM>)^G&Zg?izEvq@#UHvPUjb2n?FviX+XMS_J
zq#}jd0okIQE(}{R%L=ygJ3pSaH*~IYy+l{y@ONJ2o|EaVmmb+B8hI5poc!r=)vGHw
zvhB8dH;}WAZW@@oikEE{h0L;Y!?`-^)YXRAiV8ih7FL>d>4EYeGbN^Y>lJ(c>0(J$
z&y|aR@gY+Gwo3FBFyO#)$SW8V!sKykc+Y&4`1hBGzY}$hHD}33GQ`%A^ACnIzR7An
z&?glbYv$b%(2-M(Tk=E0#*)D1<0~MPf$G(8=@|Re!s<>}4LGh}@+ZW?Qwb=p+(7dW
zUix+Q4Yv3PmP_ct+D(rwbozD&z
zQdi0wIc)KYG3OxsO?o&AOC_|e6tx!>V)9GNGYUVx>Sj?+;bSY9=QVA;wq4(RiZ+sP
z-F>A&2vI91zsPL06l87ixOys`EDztE#V&~dGu_$JqEUp
zPPY5d*!r1z7_&BPk?uCiO<{<5hvs5j!TlSlo#pKGD(a%+3{fJ3AGg0f(lu745YZC*
z4DBH~%Fr79To$&Wz3AN}9PD83Ve7q8R&O^EYQT|WUybI+jOshqx&_7VT!CJC%yKa<
z+~;*Nk^|lG>}8%VQ}*?6pHENHg(vW2LAk$yC!;{@@w6b{z>Oxh{}d^~_6;#mX5+Jb
zP_$>sfA%M_d3;`{+4uB$>@6H7Y>GwT4wkd~5|1yscgL||7IUNcl@cB*yezXor%d1l
zT)m5kE~~yzK`C2%
zK?5cMf6kOml>KNOnJYD#P0eb8f`G-%Vvl}mh##-fpRZTmT|RQkh}%7JxbEdpZ=&YYJ$=`2LJLVD_q&oF@Q}*|pgNmzJA9~W
z8n;<6C6D>zQD*un$L3Ek68`TV^M7Iw8LOEx;on|+^h$Mbvc2UH&OnL3SU0d${`9At
zJ#^ze9)q4s`TOvPCRafA0jS&%-!_cHs<3Hot$sG+Ko~4r#z@?Z`xNx+=iTbzYQrC_
z!yEGaqWB|h`;M7j%WU2v6r4tB2S(jHI_ZXn=5L2cYp&3VUhL%Is);|pVpS&}BJ)u3
z%aP@=KCqVlK^bvghAEc?Q!ZT>;)IxR3US-VJ$FX@WiRz}HlL6j-y^2t`!M0&xY6_|
zpYU&{DH@HB&M$pd`DOHLx`?OC+{!8lH%vEsMGY}YuII+LQ?>bWgXP)VJ(b(Awmuv8
zsG_=x=CjE(kGW9H;XB~0&R(ZY|x`gw0
zrxvX!T74_IzMgru!?N8@?bzBBjH%@yDkAdNClt`%Zq^u#GuUk%St)cc@77+NQ`2Y&
z%MkTW0kywxw66^eiu3R0G_MP%v6}xT=5OM}301@by
zo~OHGW*w`bF4c8>^xj`5|IWL9dV$pq158I^7e|||ZW$K#pFI7vCdfZwPReWY<;v7A
z&L^5u6dY0%p@Ug03i?xZ_UH`nb!+ffC>8gv>CZ2(F%y=VwGqGa#3yP9Ac{9Gz>W=%
z-W(AHyq75%TCDFm0Z&Z+`88zfSiwR`FEFPYPw~u5*vMvexC*Yc-OJn8Y5{%lSkLct
z*FHBV`sW-q_dmI|(mJdH3O3eA>x^Zl4~fdO!whQT{aMO!LlV+i_sn`AR?yIGgS}Cg
zxvcwHE3DX{#_FBtRvQHxi}H6xQhhH;gjy56C&$j(n4L5AuW5jX-6sqTvMR!%uxhpH
z=sbzezBr}hE-x!xgn^GzM3BSCGsYsyxX6ZtOj<<0B;|axToBHe&J{fu4u8pzZIQbMooT!qVAADO2ed#yW&pKJzw+S~)or>!ZU&rt3N
z%hEi!uR|0G|4xvg!VRc*2A_2d7XO=4>y|dIqbvZR&zFz7M~-un2M@It&~w3$lQO%7
zTjJPsn144G-jmRl!tvs}r{(MN?XzcGcvu>;NI*7~7Ma?EWwrKm1;VAyL^Oi^M$!F)
zzQ#Ip97NoiU%g1iuBhn!0R?uFYC1}E@7d}~k?&T7z;`QN*Xku<@kEKA
zA(y5bT;|!0NeB30ElgMB^{82x|&Gw+v-ac?U1aR&sZW=m=pU433u6E=o7e%
zNA#x5E#KcLLQ2YlFdbdOp%&KTF@W8RwzlXtN>eO8h-Fdbx1X$0Fc2pAXS%+g5XWB~
z;PHVjL{(qQ6RMwfQECWHElZ|E#WW_gMkttLip}5d+_zeeyK5HTpe7WKxtF-sss-OVzT+a~C|pIIDzk=@)-TICpd!UN1w%JJ&)-hGi)cyDMV_
zx9|QT_|gdqo_?o?z(fmm~qqb1aZU6vCV(Zt-#}|ZSnI*&<%c&bX_*x
zv{-cHDpG&nq0Gx&T%3JLtk95PR
zu117RU7Gw2maJ-iDblfqZzLTn4`HDT{jy@3F4DY<#P#53NJk11a1SoCjz|?g=Xok-
zDR
zRthqGsit4#M)32@cSH$*3eY<_PNY&qC5lgpn&iWyB8*VD7V%SoC0)zJZh*vp&@5uR
z`v(`;#HHUAkeNSuy!Y&;Y7J&GrNe2225G!kE6KwX3MFwZl+IeL>pEiYy@)DXf3Z3~
zz+sZcZ?Cs`2zbWF3GakuieY!M5R8zXE9{2TbmsiJrFU6>)ox*BXLiUvn+%`5i63WE4vl31
z%IB0REJV`Y&ADW#kXd)o7h2*PS}tLa-aw*|u9)6R%6Vs48fU{Vl1G+4D%tOz?k*}k
zR+5^}S4nOtNO#xU_4zd15y|Di2;={)Fi4z`YMZdX12_#gL&m?@zW($cU=DxmL%jz2@5@RYnPGgazsQsYb#c1a_+dR^WB4c
zC&h|DVoM4eFrRJGJ{;?k0P(4)F2N=josV19U?J;W3GT6q*2NYJ!hTpljlX$YPj*;j4vbK~wHftD;?U4bK
z^=@?gVsS}%mDz2nwP?Q4b=!uD&;$q>i#b+i*ZOniySPyEQhsW$@%ho%4__=!Yyd)C
z^=O51$plHa+E!3rUZ(i@iR*UDRobK|v7=6`=xEUrh>IzyXyO4VnwwpxoKO68Csw!C
zM%#1DGD7=X^HpVs&}-WcFOrq-o4@TUz(H9`jyoT$b7ymsSUJVrny)PG?0wuIdm
zZHBNO9UbH>Jl2-F&;qjtlBy@?q*&G2{xJO}puPSL
zTkX_A5zGA~{&N?C9bF%(hrsrIg0S0AJ#(+!Ww>g)&+=G>BjD7O)qpLxa;PiYGNdB2
zI*ENx9+J%Xo@{kkV97wdj!~uI54V%;-=8nv;4^CvXS-oF-S^9~uaG`Gp^m-(k9y+a
zn6clC-G>&u-}5V@?aK2=98`?EeZ%gkKKmV}?6)$D1X>FxQS2i*o}a+J!mZ|hg#y*2vlCfLYYVuE4(aH&_l
zx3hZ_KxIU}R7mqq9fkKz1m>s(-5iO3x!H0R4Ys%irZ2O3GE>rQjx!*#e|+%;70ag=
zjqYo!BzyfDS`m%y=U^Fi4lN_XsqL>kzDR!thN0~c5V!|o2TEr^j$F+OEjcVYd|K}?
zL$u9b#_W)tUecOgVAadfbYRMB)lYX!dD8qd&Gj2MDa(^y*0%wXAXtX6wXbvA{JXgI
z7)b+oM<_CrYLv#cmzz8=r@yMMCIy*IH-zSSm+ZV&JC*_n?6;kjvA*vWVV@s&%4{{Z
z9H(c`cp<>vtGU~ElgbZ$xp|(?Z4qLqP*JCK4b@Lx*s>vMKs?q#>o;xT6(V@>BglR<
zzl3SMjeh@}(MzM+Ywn(R^HT>zGr%Vni~ZRxWBo}bZ21)oz6|^j*6SCf=K&>+EXz)J
zV1MYxs3&!t$R9XUuSJSylDNMf>6<}iqw$OX^&<)pEZG2O`0veA6rgm$@>&E(MiZ8MSO=#sh~
zPA<}HaWveZ()2YkKgw;zf>&?H)h+vXSoUW=^F_IqnB{mI18FPD-=Jp5@6lj4d;7C2
zb~Vk}5J84N@8s`QM$}1RnM~b1R|6s3ojk4`#?%BWB3J^wmbi|qf2o20T!@7w2rD5x
zxwEd1RorJM3*xCP86Nk|Xx*ru3jC?FpPCFW;d^mUwHQhP4_WjO11c*9pr3lfv~Agr
z7d_DWnkkD^dy5nSM~8Z_pT<|oV8O)z$%}p`Kn`;^X_q{$R58fuTj9(cPU=e)=v)2p
z_4*#{<=(Wbs%yvH7g~2*QPP)DfpthQZc@}-UuC-cB-=Pq3BZtbl!=%sOk(OTbcK#q4t4)^GS=5{N{tKVM&1Wmb7Vc`zcF4Ok&8#Pz5kXaLyKv-c?rm)ZSJt|7(
zv)0%=EY=)Fx!G3fr+w~pBp|_+%k@a<^7XIf~mn5l9n$s8jQ4u1;apl}l{Y9_2sF0~0<$5Zmh$2P!fz@@9!(Xv`bJ1ta
zI&b^Q{O1BL(mhT1ZGjbfJ-q_b-#~b`T+NN$0h-Z>MFZf}L8p`bb9B_GUE23mJait2Ahf(vj(Bxa6j9VH6t%_=!k>c<^k52lK
zq>^$}@fTzPw?0Wfc@2}R9|E`d*!z|b#dO1a&$3c|TDbSJ^O92B`yzDu*8u~0RRVd{
z>Av--dLGyJ^xfUWaN-r`csu9wsOXWkgL7?f*Z!bxE}xXXcTu?Hqo*>>
z$~rhGKH^L*_R?eY^WzQ&0Oh%Ei0(uu3%eCla+|&ayflJXc;+)NYC~*Yyu(;AiR>8@
zt`?OsB|AMIC!e^~?!!yiNqt+v3G!t@zD#}h7oudODPYn0fLT#)k3wN_a-{YByRBol
zniWdgwalocTqWJcXM_SXAY>F5bl5ZFhb^^$L)zh12QBa3Kv(^f1J|wtW!vXi&3`6RT=KwW>=fCh|eeujVh;w3;6U`!p+M6<>Ft0K4g+jMl
zfPA?$d|SsDq@nVSU~lmhmTZ^1IiLhtbVk!l8seXd%cv44slHU%>QFbEs<9sWVR`S7
zyPe&(3<%E6iv~;QPxD#Pi4pv>nHRHnUzS&KYprlu3mFty*1y8(V&zH^^{$%$Y^@il
z^*};N7j~c|_pLp*C90i0Z)ln<=zM-lwaq5Cl{D
zBuQ}yz-4YPRi`&XI3UVNT=WOi%_qM(>sk^j*sQr+l_XW?Y*yrsZS25`^#53R8r+P*
zqdbGO{ne+gOTP2YIUiKwoCubouZe?}>AFtf61a`kL&gIzmJQJFFJZUf@F?;kSv^N-N-h+
zfAY?mhzb78I?O6_cZ~c_QL-uxP}>@?@Kk7T+{`{YPH9b0U{PUG%1y?V`%4w{^{59x
z*xbkpj6$a!{WB?@{&<8f*sLSj>`mN7#}^PH)6jjQ4MauG7n)g>BYI?is4s0!}U
zXCeL5n+t@&91_m{SmQyowEr&-O)WgRYp|lO_dQN*SLJ-=-R-gcTP)7_6k%5JY43tz
z8s^{83%sM}n}jHhr3%ur5Q#xOzHwKW57`&1D%Le9i!B)d%NXd__-vyE_=6tF7}!s1
zeB0sF7VR3%(l)y*2cX`03sh=U%Ib@C3)M
z*>6i^VNZ47n+Df~_Vq_DzaG2JM+l^VR)GMLjGGK}o&Aer)SiY@v#vFtpY8D;TV+@^
zy~5lAWKR)Qi)$FLJWTGR8V5V-;cPY+(U}bafhk(cl%;rRz-0dG`W>#%z80C-j`6o!
zRnGqoD2dcZTyU_f+$QXG{~*lm>J#twwA7iLHROCGFCg3QcKyyk4NW;e%slT(86ipJ
z)*9*%ta(`p8kUtx6wxk{$f|_;^Zw}(Mg7C<}2fiz39)9Z0RN3|0PTu@GkYI%Z%GtGFIz!
zKW{!=bF2&1G~j=mIiiu;Vqx$JwODO^B5l=V_5A}86-a;Jy;J&c+hY-F1)%wxo)2!^
zOoURQWq0zBJB$kQh!XA_*+MxDfu4lelmHO0IsTiRwe%O?@!kDKLq8>yzxPbenLX4^
zhGN?gX8Xr0#^-wUTm!NInE9xHU>(ojM5zt5u@AotNR?=MQIQvDiCj0Ww<
z6q6n2at%FDAIjPd%rEw}Opa6G7Q
z3XmTakX8tCv~GE6@CxVj6>*P^ha?HRJz(aLcz5yWo1{dC;vH^&>thwh4^nTvPGGY+
zUJyUG2JDi2y1;g<9LqyL=vp`{krKj=xr&6|IUMhA&`zsvGg}*70*}no0kpM{z;LTx
z&@i;h`Bd3pCy%dBd%CZn^2Qd(E==E;Agm%6Sj^>GOwEZMrZ-vWs+=|GZ&Pw$8wyB71B*p^tn{5ZiL;5S9Rhr6r28%<;#QsLmle1jm1i(Tx+g(9@u&bMP=?Ub-
zS4;2cB98nfvaP%gGA~}H9{?Ar?rW2{)%sz-kxVeMrSZz#^c!M;;8d4cDi8}}6>scI
zIv9F_?Y8=&y#N?Guziq9rOlx#ufn)pMr%9RUdm-L;d{VS!9Y!s_2xnt>Lj6AYBdf}
zf3rT*SYWf{suA2nsw!dvgvy2=zCa`h!AbyT)p}vpp_YRhi%748Y`Tm79`;p>n;)f6
z&;u-(AiT}Lw(#IAU&tSDH1Ug9``+cpdedX$(|%x|j&x29j3F#CLf`gDG8A-3O*R{_
z@>i~W)RR(P9Xn?2>7))yjui21tPcZd_t<>&;Mfpw)i6^4^t-~dI>jy8
zyiTUK#$I;bW^CxtIF_jg%tZa~;40X&H&hLLrfU`nyhtjT^zCc4H-%Doohciv@FjkN
zxYhoGg-2NThqrNiNx2242Z*s2DhAGAeI}>aH1mhMXRAS)|I;^@@zCrSP9CS)V7}ff
zpkCmM$-6x~h4)$O;;eCKYrR;wX+Kp51CajK$-?D;1asnfCAPvym_z@Ba>r2)$NJ&JM4g;x<9q59`
zd!-LOc?6y3b|W=xaDjKxeg%zK@vYWkW`0Pg$YB)D{6w|o)l}2Pxo^SL4oP9c3BK)#
zjatq?W~ii;$d!wzO3x&}cGyGuRySx4hY20hFDs6wMd$XP;2@aF0G0+&+6|SnHl!+E
zJA0QFp2BZEctupgHq&&
zt7Jg$44YMpf%<$hkAQ5;O`OloaYK*9vBy;<+Tl>@>kzKv2!g4oKq$nLOa
z%R(ej)UcK(vm(l4eGM4uOILU;in`THo6m2eQXM4*Lsru
zV&nUsZ`}Kmg@^iey#V$I<@=rvyXoGUslXvcHn4mM)R#&`6kBcgKzz+Ib`C!;FNM9O;t%ORQFvRP#S%Xh
z(?2b#1R)?*WS5A@#lVjWmQs-*rIbH6>Mr#;S}I^4zM>ht4t#8m_dI{yy3PpEvc35u
z<1>dLitpefG5RLJyroO^Zf4+!iFK+uE6w64bn}
z|C5r6Rdo1hrAq6+dlmJQ?=Utd$hfx1pT}ag90&u2O&nHkK`ae2TrM*j&r~
z@q!EjDdN`FDQ_}bZ57hqQJ<
zK#^sh-#R-zP847XpARJDxaOM}LPV(n^)rL-D-jSRa2jXaxpTrD-(UzT-wEuSOa5XL+$eJ1G-{1e#iSnHFBWvyB(1YR+QN^=|p*6N=yR6mSDxc2*CYlthm4%I5Va!eVGqJfj(C=Yq7KVEin6%bb2Ez
z=(^&xDusj8v9+!qztpkz!tAq&(-R50B};*ukCmJj9YJl
zK8ZU9LHGA9;i!+BUkF=#+yPSkLDHsigK?%=>3miSx&6lC{-UCu+eJ}vM=eP^|z{ZVl`%GU1HbY9CRtKxa4vh4@z
z@^{-9FSFkC1*-3ld0-@cs(>e}5DUe3%hGm0%oD6DOH%&u8|a53@i#L
zZ6z0w5%2?iJsij(sI2jG)qP?m9}KcU#~`#X(bF4XEuD$ulJDxRz>HzIoo<6_Cp_l$HA@YxN2i6A%
z5aM&OY?g3Qh!&6~q6JW43shAAvOsF!S_lkz=x@*q0=9GQX}J<{h;PmUQF@q4LThz7kWp`w{-oV3oSucSuzY*
z&Bi$6vpU?Tojd+yQapV29Dv|no5PPhe%qoZ>cmFP%p6QKkUSw4s9@#VNEoRyk8
z$bmhO)UNC=P1crJg97t`5HT5^DERd~Ab6qMpa^S>{!Lh8>YaiFejBlyl(z;Pg|98CT-cRfV%I`&N*b5}gALY%XaG3oOEkz_A!BjsqDA3efKRa+QJNj~u#EOJ`)^eWFpZ+9Lt4
zC)*fwP@fw<6spV%F!E12$gtGSUwDG+p{oGO-I-o^%h^Qy-u?brtw
z;QZtV9pJ|^8nS-9x%%k0H`hh}MBxXJqGIs=attFw^|aekl~380LGo-)XlLZE3Dyg;oPme(cZ)jNop|Bwtw;yy^s3jk(}yhrS?uo%*{!f5O$9rL
zGb)Gh(tD7Lx|VGf24%Ai7APUZ07R;>hu;&O1Qvo1Ckp^Br-E26*z-7;k+ywcHem^r
zLcoXuNXL;xiK=3UjL!^~#WnLbXran!W<2vY?%N44Dq&rqpub@&FH8$O@I
z5-)Bv87Gq0@FUl3&L&j69+LIpKr{qW90)r87GzI!%AVN+QQOw@lRzh9IUzTMOinGY
z3}~nPn)Iyz20r`A*lfMyalk-*u&j>1H*!d@25qPZC+i9N4n=BL0ak2QS=&kza6iHN_=stI-;`5q`oHHhH$-ch>O{81`FE;|D;g
zKQ_N2XI=7c<*JHsuye4|{`ySNcClKJX5x72M}@T-Q;K>a+HJ$B1l1uCHq9a?VZL7-=Rd&H)$I7bM3K)`MXg*2^@wg>U~wwL3su^4Dn_Spluj<
ztmIhpYM=s^h&hTM*c*_iojCkaJ*sD$TyEG%LYK|XRC19-YIE7M2dPjJc^mKgKtBZz
z$gD9Lwita%uv(eTfKv$uO2_3{iV_LAjLUwx~`u)kJlYYk}#~EWUY+K7>Z&ObjSy
z4;swaa;IeTSHRxR+!CQ|Is~pEh475|)CRpkz?~_$lP|eki_R%C`s7tfp{FGvLPkce
z=M<;(oX>eqhSz4eaC}Vgc(r;>kud!rB;i56Gd`jQNO7=(6F`v052a^>SYrciI8VGZ}lr5GnWt
z*sD2P#wL9BXV5=GXz#6_q~(V<^j8j-VI;4iQ~O<>;4mTHC;i2=n+T>9QpFF4z%|5^
zQ9&qkR8DT14n#qlNR#Q?fK-*|mEqG!RD$z;xGf-7XkQwjQ2?aaZd8CitoycYH|Fay
zpYC=<|HyA(125mKE~f%f*?3}^jHI^vAh~K;6xPSU0DQ=Gy+#n6yz?o9`Y$lNK97uL
z3J$>B`Rpf$@u-FAIbMMr!Fzy_U~T-3_4Z=@3Y)dSmg0ytdsSmnZ1aV`GOc^qoXp$w
z?Jf<4{J5qlcaf0^bo6sJA}}2w)yHkoFx8v@fpyRODXVV7qQb7V0)hyJ!P=p667tGq
zZcA`WTjLhOup~G&pxC}~t&UJkej$jb_9_Lv`^EW1ClgOWukuhU3&yIXa$x=vt+*HVQ?+4OOQIwY(ZN&SL@}hp;pRy
zv7M8(Ga9LqTUu|+0r>hIpgt+w-8g?oxQXgq?RnAR96uugnq5LC?61a0prdo5(nbH^
z)6eRchc4JN5L#SH1F?f6aaQ3l43#%6`W8Aqui*;h^;OCSHQPepSYthjLT9~Ky~^<7
zBjT3(TBcb*AF14+4;pm*@^t1ERnlSShflv^lzPG03$x6UW@m7g4ejwAw4akr3_|+P
zfL_sa$lERB&g->H$#)xOLUWnSHad?^g7V%`xe91d%{PGa5+8&>XE~^6qWD;5{Kn^)
zgrx^uVK|Jiejy`IbG^`OWUS;g1UXiFT~Mf49vO
z!-4!~(s=2LYzM5#a-RfjP#YSBcFM?mjhxQ_YdPHpO_Yg^vPL3QF;QZk9i@0K^SX=}
zK^US$&l)Uc)gkDin;8RV1quji+;zSwputmQT8J^AYV4Np(3&sby1@Kt
z2f=i7W|i?Od>SmWw!SCm=5OxEn3b)b-7IENF0=QayUajQ91e0{it~JX+u3(?r2Y4g
z4M6c-5CDp83X(qd51{ZL&|(H!Pw3f5>-p8?Dvv$qm#4!(x=IEnU-b8OZlFHc9H->~
z>x3tK8z+xj&?V@Sk8z%^gS2EeyuTZf_^k~Z@a=c2AlVhT_d_<;KZ)$iQn4KOioO|;
z4C=z(Xn}V93<<8hHOxs`GKL7LHbjmlMJLwq2M2u_xTt13R!VuCSe0AobZ5w37l&fR
zD50_*l$jV4<_h_@Y5=y1@;hNjK83Uu?w>>B
z%JG410oPrFrg$j-v)8ZAcNmcI`o=A>js&+(gbR(mFV{OIHL_ye#5Lc#EGyu?uIrk+
z`(pam)RK@XI8RvJr@8h-pt0>nBqBBHofbi`QlY~vP}FEvub0O&=xt@q*MnjLH^;xl
zmCZgaOj
zrMgbs2B86K6X<}oJ2UC+h_h&U;Xp63zWk@#5;+YoE&e*%#UYz~r^kb`-Si35l4G*&xo`_m0_W4RI
za3!-sLlU^wF85?+KfMT4I?#e%db{kH3B1O2X@PHf_`xwy+izGm5r<60>kP7}$XDS8
z5tZ$emOuolbq%T___z10{NZLgT}YKdFcGEc*;bUF`D%e336xmt^WAQjtq2OgB@=Lx
zb{Bo}oXu+;*m$k5UPs>g8kJOj^heMS6@phIYivfk`6X6ooBg=x6m1O+#I1HOGe&F!
zNAbPs4N!X`MIEjvFDknIRfvxeZDXV}#=u34#@ck76r
zk5Yq%!AR$8Bl4rzW^n$0n!!vG1n@rMyTZdK$Ft{km%`rS#DWv711H;==MTlRztx!!
z^yM9ZW3u)^xxaf7r<`ssT=tj6WCcHWxZt%teFa)Q%jVb~J{7ZbHEFCp)swx^MJNPs-3i
zaPRHAoNxgq+@vdyK>#c(4vNTOt+n9%*hZr({9yBE%c=QJQCAXZGr5uht5hu`%B@4q
zjEcwkHAc!H-5vnj$9$O=PMv8S2Z0`@ZiMD!)dDF)
zNnUKc2aXJs?k$WR7by&4&s41ZVq0l94qLRWJGeWL8+%+6*_a6vaQrSZ1XRRr(5`{6
z)&r?A2~Z-@J5HuZ36;gL)U_zkEQl5}+^XiSJq1+*9Lx~IWci=p8-P
zrQ)}a#2!QZ?zIBl25QT=msPjRyIVgv(JI|`3aJBgc#qYi$tdjy@|}s3p~4OY
z8D2K8J*Dyqhil5sd-`Ru6X|2W1(ThXjJLVIoF?`*o8x?A4(DV4WX)W|^RY-^sr?4k|S};?-Wz^=73S
zhZo}}A!%ZXD3y}fPgM_v-ImS0_ExPy=T?b&=T%@szlOssjgu;7i000r5;-|tQ9@Qb
z>=h=UosOfrjrbLQQWolOZu0RW@tHl{DC|{5zrTK0_)4{l_NW%<@*CI}Vv&yVTO~$o@))j%91=-EGa4t`%ro8qWq=5!KRm)>~oBd?I
zn0_DN!6~MJY$m%~zR#Ooek;0yxSw1o#4|($ep*MZO!`}4tB%`z&Jp5Fp@DkyK=K_e
zQA0$`V8(0e8962SncrdE3Dm$0oD8^d`x%uElv6wFm%t6@#Dd_ehxkB}!nW2TUn@&v
z+iZgwuFk(7<*}m#5yR^8xck})iCD$I^7kVty!&1ng7YQd_~(;q_%kV1%@?30{Ec-W
zra~yP95Vd34epsHa-80t8`*D};h9g#YoY993{4gVt~k?2XwVpT=%=-@`NbKkL=Y{N
z{4hIWf1C-Pm
zEMd>4nOCC&?nhPx&&jMh<=Bk!Gu#1#CcakFu??7Ma=7wrJ*YfL#~s_V)$ORrcZb
z-420N>^RA;U-Yy+uV9=fT;Eb~)+rGI#WSYT5#UKQkkPCFXs7KBd`z%66D;ejL?i9?
zO{zt$F2%~a{PoJ!$G)47EWgoIgCmcVs|=aoFkaF^ruVnf`m-bQ<)g!3pE|R2Or?ZQ
zvHS(NH&iGnXN2270i|TMVIiYs<5TxtR>GHH1;2If?HkjI7JcG&<2@efbO5a~{_1l8
zNUz=V9-&lp&0`zoJW>-VtyWOZO@cvHku4e9UKz=#%ugg<
zYm1DJgqPe^Kj(h_?)OmN?51CXDeL1ec899CnromJUZY|vW${+9$j&a6-!>ts;TP?!
z+J_ZCW(l3A@;Xv@7mB1Xf(dX`d!8Mvbn>H#2M739x(S%=5wm_iJ9!vH8+3@o=bXXJ
zXukp|Qdyzw*u#O~TpCrX=L&VJ!bTLB+c)20FDxd;(d(Ad$lB|*rE9=Zd41gvn0EB$
z+in=l1#YQzSdHzV-KqB6-?)
z#-}jw=$w*>fFsTVh40eaN91e6;!Qpc$Wg
zhx+aB9P?fi)gmFG&eA%?nmQ-@QXq9b4k>?y$;NyiuQWr0GiUdvUx3~g*g!NuTs3Ed4!}==`i{o%M+|*1iv<4$
zrS4Y#P=bESEDOTtdPK!>qZ(Hlg0?^YrCXby&)$x~z
z($H`Mog3{7Tmp=dqvZ9YrN%>h;0yuJ|Dox+(9KC|hKUV`Oi#
zBcl}AAtQUsR`!wcsO-J7Qudat?B9L#{r&a4UOmM*pU?Y!kL$j!>joj_r42uz*CjzM
zrBy>hs>4ThH{a`c(<7TXDp=h2_!-9W0cU^sy`ghE>zOcKBfY7S;IT_@WG;BM1hmsG
z#x@zlgUm}_o=F?B84TXW43!^!e4wkdqFGWK|9P!UDL}IAO2~2A<<}TJQ#YlsB<)?V
z3N(xfULB1Vqejn%_3+!zs@H~4&IJfjA)_6`46hgHNlWkk*x_M{GfJRvP*3dGC<#5f
zhO{wD;wreK6ttN7yk`wUXD%QmpK2pZOC&eEn2uXq2BKl;I7rBFZ}NwS+T{Q!Z(HM9
zRGDRp5q~S!&H`MeYiIV%gHB8Ta(_84vc=>Ix0G}3O{_$_`)b^Q8_8zvDz6K0hgsUo(Vom{T0S7m4YO5YwPm%_z*m1Wh%
zsA^t6JBUfVeht$B6GSguh*Dw1bgo%;9*(VPvFy+NbJON2<$V`rSw{?lUqtphenl|8
z;Pgz=>_=@O4;WooCMEem{ps0Q<@#+nR^BqR)xh8~D<$Tk|5HvO@#G-A7odS%@al-$
z<^Gti&*2WI(E_zzM4N)sZ~{-sGL!@T&p40d-7d_o=x9@#-;t5hAQx$m#Hcr|2eK8=
zq*w9%-(NTva{-ET@AQfH^tP
z+L1VN8@c1AE4)QF(Ik3Y)i4S+snv9OI(Lm%ZDjIXO&DTBXOjp6;XWbTk(;HB7`1)Q
zM>m<)YX&dx4^f<9N0oAeuSsL^JY8$H!n-c;OR+O;j!am!C79czCo~`li71WO1;(^v
zDzMyto{GJ-9@C-F+u+6nwvvdg-@m?g17*y_NG`k=0PVEhlT5GW
z!Yu9^JUB1(`kZXeRZ8|x89I7GZX<7~w;t5wl0z7}HQN#{h?#{1^z_xbz2aMmoWD)E
zL7DKl|2-H<4D((+fkR~aYD!<_-A-NjL}hv4n0x*6iClhxrwzw{lz^T){e1!hpb`)2
z{n^M2)DrN%Z@P*%xzUS}Y37u2XUYKY&oh*Aq1W%U+~D5&YRHNfuYL`Q!Ar$%^F}En
zq4+GZNvGY%%tLhcfc0SbL-TCL**sQ?r(QF{!W!KunuQMkh%ApW(Kj7yR&FgILM?CD
z47_IdeS0kY%6AHd!D&-J^v%l6y;EM>U7g03>h(AzHI
z&)xknTPB%xP!vikurZU!g^{L@dp1HRVQz&VPT?~6PE{1wC;kmFy-oSi?9xC^XuTvm
zvq7Tsxb=tmozsCBo(>`N9T}>G?oJ10`e1GMIp+ql{WEMZGocv~DJ3Mn0OGur*|dto
zvCI9Yz~rcsorW%t!J9s%>rt{ueA@B{O
z?z#@g>AR#neYOEi5o~~HYPwQX8Mj<9tbO}IJ5pU5kbS#wG-z+X%W64Tgi2Uan@Z{o
zJDP78xJ^t?9?Pud+Zl6atFHju@ws%sFOboG#-3KB*lCTUpsy}_`z
z>{`Gq$wwJZolByIb&8{t?#XGhXo_53MZk((>!*20?NAGS6|xy>qy5;C>jM(kuYr%o
zHVt^*s0V0Tm-RF@|H9S{oa<;>+uf06(
zSBrUH{ozXC0+esJG0ZT7m3KdwyJ`5w{Iiy{J12_Y2Reh4F!OUZ>eLFg?=*@Q^
z0I$DaLQZYHOArS)hjLk6L>-__ah9wyrRx^`0cEk#gjD`a5HC6H4aTfLq^vnx*5}$@
zLJ4{da|xO*JhWY~340(mvkIELjH4xdv!sgymxiBc1qT3yOzbaR?A2O`Ob^_WPELc^i~L{wOXO8p2ohL|S^f>pR_jQWrbS>`b%nPY{i_oM=fv}D)>|PC
zS&4qWPqT9}^q9CVa1)y|7=TP@;l%}F4kMFiAo9RV!*~@W{s7>C(swtu`?XW6S^Gea
z#c7@Ao^SQW)YVr^9z~ODadruy35NUqf!nKxSA5g1!LX*!`)JC;-SatmtIRnE=@Oez
z$RD>gv#OXUn^Ve4%^SJX$m%K~;4&=4-}6K>a1nY0tHl~u)=^Kbm2JLd|45_lV~h_H
zdx~rHNos3Gs2f+-V79{5@<1E1Gwt}trJjS`!WZ||s&>S7SCbq~cu5pAc51oa{<2Y2
zx2EjTLD*28qYt$B(f5^aXl|>HQ3x1-o802hRgO-L1WEy%+A|A6Ha8Z&-5mRj`5q^W
z(6~EBZo^s@Fb%(L%R$Ve&}F21B;R$
zwxHRZf2qU45mIPOBVceT3^+k=N(isrn=CSDY&iH6$|umR-}-EMEbEQ+UeeZ#t$g&S
zYnF8gujm+2Zb6Tq|Giv*oLNETKhyAlukN=$5ULWvTEW0>eMFy^Bz6XbQ6wRSZwg&7
z+`lf1F}6y+0j>~?k4@1;UjG>~?nqg~x3PuRq8P3w(GCfWPm&J4GMqkXI4=H_;JCnr
z+D};?TwhfxKc=n8h{i|9EFfO20R9bTYzl023bo6(JF^pN9vuKvFl~f
zgTwz+zil`Z7FWZjiqVIO!T{F(DS3PX2l(wUC;iGCkF~W)m~ng=y7G1E=~tqr78$UU
zNio1HK6LgvQ?)30NR7nT&05>7b@*&=x2LcEjuV?1g2e}Jftl6P`l^GKYL!0?TYt76
zNhDNHdv)%dePyuY42+EX7ON?#!>FJ$Lnvln(0o%+XV!xYeWh
zFWy};kMxz*rks)K7&`k^BM$&47D77ts+C)dUalhJ2EXfr7GKIB3XdKXmD(CHYU@ep
z!?WLw=4qusNjq1iv3krXR(>=8nF;siU$xKkp4T_M*t`rsGNwIqEldPPppGqX863-N
zp)E`tYYrq7NmoAFbgCJ5h3vsyh@iLB$r;MQct&F}im9Z%JsYF3?;tStgVz14#E
zaOwL#$Qq83uKKYrlZ+SQa32D{B}Ze8JV&}pw3T$~3t{3LXxDgl&?vB`)JDf7aL<}=B#VwbTkg-23z)q=
zJR_r%URE33-8G!7@!%0sThnJ!1nS-k&wph?(^5sFH{enBU5GU)^|ioLEiReBLRy9f
zn9v88@)4oozug$On4=W!k@Zc#!r;{v1=s6aFv?|F2}!l&J02>u6m)6St^0o5s;k9#
z^)KKhA4`|xUWah!M=DwHA~dg@gDaJ0wXapYV~sQH;sMB*;O%$XW!b5C{)k^$V3Bpcrlsk4Ouf
zs_ZaSvMA{=`^%a!WT&<{oTC&U7Ok3rCKWiuO7}XK>E*FP_=w23ZC-)_gsQH2-M%2z
zEr;=Id*z*yE=MFg;~$=#o}K$Dr$&;Ox3Kw2Sd5P|1mlSoQ$X7cNA@)&vwm6uQpS5F
zY~(MGDK9xozV8y$=t)~*7|GiD41EG~e+_4-OaeO1`}kA$`%qQNp0XUJZZIdYryRrs
z=0EA*xp7y#^WZDpYTsGrq;Wg-;RNCA{DkBB;}YGyqr5Jq=&}12ymMLHV;kIq+B8`m
z9kFT&uN!o@L^E$17^GW`S6KhXRx9B~*x3ANN$7H#zv_MW0A`_G`5#hk_%5~I>E072
zf}U0%h9DNe6YcH0DsgRe3L{2&Y!J19+Fq=-^Fe+cx;N6rGx%)ZQGKtmOW)der%r!C
zX2}wfcm2FCuxWQy5O`8yt($_)GBrw8m)#AQLFM)WL+fD|;qEG}TNHc^{V(mpeiyxmE^
zR9ShzaVhrK46kv-p5tgqO1GEhlhxY^>Xu6komDi0ACl_vh%1nIC!$e2SK|?v6?JvE
z%DcpPYRiU4P!p{)Ve!qtw6bvH3XRe%YaXR>#VuP_y!}9DTZ@X{^@fI<%WNXIY*!+(
za}to}+s`$-`+!S9DdbXat!Tb;V|qKLrZ*-uwe4e^=X0oB#|TWvtYEx~KKDa7{X>P)
znMOK2V5OMsE$NNP<}Qfc7+D(it^J+VdvtJoVDU+VOz~}g;6`R?<82;PrNBBI{Q_no
z@e6sn{~n--h@yc0@qY^J8_-SF8#okpeN;UWq%!yA;$8r*(Tva=wd3TC7-0AwRxk6OSisJN20gV3!%*$2{=
z{m2>g=k|Nnh1$hjIh?1f+6#AB6*T?~zk@K|gwY5s6qWas`I!zhmo150cW%3}P&MID
zPGxeR=A?Gh@O$vRG;zu?%xvz)_K9ETz3pTWvgGJ7?Rb0_n>2sBzce-&@wSv%f$9y2
zr&rE(qUmuLvPH_9r(DT=Fx4FprF-%2!msb3Wods(11ez8wNI^fudd_n7fik9QT=m)
zW!laue5}JxP4z0SR7Rrnh7S*YoN?y(I}8o!*>7&
z3WumlPTJWmT2tK15=Nc2Z>Y%iQOXa*
zu{58_r%A
z$EVfB7z1Bo2z{a}d0BRxud>T-`WEYf0q7))fhg!v{Hfm9EyqD}b{o;_^#0p)(ZVw!
zP}+*3|B?A@fA+)eZ1LJB6nZi-nx~hQvGUmLPaW^sTfrndU9ml9@Ke&VaeQ4YF5$WV
zD8TE^9dx063eUi^*3Kf3HpYskkr6j|M#)Ssg~v)OssxemzSr7ppX&
zE&y94Svxl98W-)*Jl|U~R-s3B@aFVbR6JR@&$*;ngA9{^9P^kGV_3t7l+Def(M|WmNno0$55e1CjSkmP}0u(xv42fZLi2i9~Epx6L1%
z1d)^bRv&xBa=}(@p$rO3BANR~WOP#bONv$wt=+%8ZJb+?iXo>X>V2`1p9&jSE6RF>
zPQaQru2Ja?xD8-{;IDyKdJEX~S^S6lDI`$CNark6u_78A0qxluyQaJ&+i7B7M&;(^
zIt<{3=p%Ck7CCFR_C8jafi*(a^{3{isSmE)9V)^3&36ec>##Ia8pYb$P1?O7p5U5S(MWPxs}tB>F|mP8k$OUeIbBPsM9p
zzw)=(G{cv=DamDkiuaj7`lF5edE$H9<(TN$m7%S
z3E*$?5Nv3zs=M_n65-n7hYe+0gY=}$rs8i(>XhlKMOQPfoE~<5`XHDy#}Ltp++q&8
zjH^i6aqr$q#o$_*42>wMm&4nQsqVb8X#hQg9#R2UH}*-OqdrYM$^qR>%cnR0Gn2qU
zkIZLu$^-R|ZWgydNyiy2KerlEHlM=@24}r!(Z`?C$w;^!Fy4*INM1Kl)jyf8nWfwR
zewjV_3HTo`;MY3u^vfjy?dkKE{c)cYReHna_F)|Z^Mx*ulWf{d6=(&}P99j{dts}(
zor9F*QZK+RD_1G{Aai>86buIJF0k?xqNcjy$E9_73Zg#b1Y=mXo^pTn%k*aAI1eki
z-(j;Mh1Bpu$}VMvHQ7n2alG(6{?yvV8gFP5fB*BC%j!_6V?2+ILe^&?vac9?TSl7A
zVRHYp))uJ1Mot0Us_tVo2I97~Z>dGe;9F{GOdh0Uvu7yp>TZHR>ro1awsKTzo@i1>
zLSBhuvhlSKoEd*e8Z72Axxsd=j4D^(V5@WR@3a$u<+KloIgi9OweRb8@}hususVO;
z=^JSg6Luc!dD#}y_Uy8Gy%|Bw&hz0%nmXn@r5e?H9V6jI3Ng)Pn9(y~((4#}b^O2CC*A2Zb{OW^6>
z+^MngxbF1#*TVYpAa~eq#Nel2XO2UI9A
z^{&@@pxSn>-F|{73_{poonuW*_>s;?!{hXJpNPb;hkOKk=Kpb&t0~k@2Bez^=zOWi`bz9=oz`%tU^EIPTXd4nBkc+Esd5
zLP>E{GFp_d42ZhtaWMOu)0by2P5Q|9`H^>HQkV2)xb=p9s&=;*Ttz<8aA^Upi|*a!
zAN8NRzk~~Y;b~Qm^-|ZWC5vy~JiDh!%jNa3n-|yuwA(QH(Rr3qWk3EN1knj)Jcvrf
z`RFMaN1@+mRP%B|wE~vL&Ygr?E{i>tyOde>TbVLd+KF-NHX0umvw8Pc?l(4r?|&}X
zajJBPEju2dK6oLqqpYvtzn&4$gXIN{H$MryR7=jEu}J$?A9$2{N+u&=wKe@nNsh06
zj?8wUc5}F5t)|`EYX=JU+$&9_S!IPT`3B?o8D|n5PQ@?V!L#dKBeFrHL1U3SQ
z7~E3gee>@4(|YD}>L!g8D;JjW3{d!3&Iqw8IO0p`=$o=8jm=0bG^Ei|aIX55jK(-@
z3*6NfWI<{%&obDcG-g~`6f`o^0yat?H%xkiYvY-9hd-`6!12e@Ns19Q=2+geFdn`=
z<*M<#A#I2O-6t~UOIoGmvI#wUQ~$D-0CkjXvzDzT+MjHD+PXFk3L9+bS0)i
z$JYheyT#_j3%1%_|1=H}eDf~!+xapU{`jAaqdz%#B|F#=TY|1rDDzrUc(59vtA|`9_jj8qWPObV|70??hBz8fSY?)H+(X)=oE|D1%igUQaOBV4^B_
zs^!|EmI?G;HV<^ltQ7+mm-!db2R0#Pixpj>1A+1yncc%Nk*R+Z)#YoW?E%|OJA01Q
zx9P7#7jqsCe>rRY;rT*A%k1RysTs9%4-KvqX(VGh6Jt$?MY=b1frQh3Dm>Nu?ru|kcQ3f^`%EvQAMFp-PYD{h
z9SOU^g|zhPLNgQ7IISrPxx$VVO^{JK)k(_QDy}{4Hc%fXcS)VOBfQ9IER=I=H~XV&
zbNF*ZGB%$3Ft5_wI_~8a=NehG4S6NfZ*Cd4#j
z|f`>b1IvNuIeLn)%=PR=d2=F-NmvP4jkw(ChO^d?bJEjQYZ
z@)q-#-C`LfS-$dgr|F!;lp`B$q;h)#=LaR}RTsv&vpT0iwqbB3h&j#OZ+WYOSp^W`
zMf#x4$)q-%v9vhUnAwEDhK|*g@qpsymUF|i6T|j(&Lryin}&3YPsctxrn$?f)6VwDS}(1z&4%KHu$bB};=X
zmzHNfi>#x2kQj#n$Ez%FmI{$onAU0E{Jg-U_w(7yu#uSWcR9CXu&p|tQz|?0iG6na
z7I$sms!IbOrE_d}bwS*S!?=WmEZJyXmBaoS-aJ{;**;|3d+UppT)Kesa9RG(n6^
zfzcBpf1KwIX|~Ih)e8DPgphYduGcxdYO{S_p7v7n`^i-^Le|g9{hj?YmTk$2j>m28
z&7RwfgvZLSjB?T6Vc=ZkqU_>
zJsJ_Yx$k9k-;QF=ChJ3D2Z>Yp85}
z0pj@aO54@sxKK*op$STQmW~|FeD!e=u^czsFy=ODc*K6WxfmnCfC5g-WGC6l4Q#>j
zk+3;&ZIyWc6~)IFOQt;x|7dUYbZWRP^bF@y(y=uATPXvRsKO;M$WKyZEisySJ0>fi
zyPcOz*nV8ohu2V>Q6Crnz4r3xr`X4>T;;vQIWw4-8
z2V|yt^LgiMUl6%E*|S$#Ww@-2zZWnM;r;E_5fv2v(UakA&Ma#JHj|;B2Ny9_JKuiO
z(dMds@AJL2;L6TkGALYZb>N$DC_CA9ei|U}rAus;(!hV9Q&+O(QultbKs!IPAx%un
z{`*I%4`?S6%Z(MSLd`R*3$7{~X33&f1ky4Z1JWi(k8s^>8oN
z)qv-etGRVj6q`ssZiH|?IlqGYQTf5=(`RQV1qZ?GE0-8Aj^WjVFZC{P*AHnMs`z)W
znIeQ(1>IQlx`*i7%YC{lM++#slUfb6@KhsMZ
zPrZzomw*Sif9DLeZL9VTQtuA!-tVx_J_Sefa#}IF_cqXM`#bH7yyss5%|iF=7AW07)G4)UD|hT!m$u~`W6Fb)H)u~^`hSWm0=~S
zp#*+Y*)PwP)9x7n68{K*vAazNPq-bNZ*KQeRPo0=48d*J!WhdV9XIqV9Zn9vi#q*O
z#65JL@5l$%8Z!Tj`^&ue8
zeUy?KOqrax?ae&B7%K33O^Udsiavkyy108@F{&yLIBx~OXpfv=NGG|$ggK%n@~=BV
zguCi#=-hu7RB^^PUp*dv#_w8EyTaA0gxQ-hJ)NM;@(y+yfO-!$jOZY{?fW|8@?o;L
zD%WV=C*6aUNJdY8Ig7u?MPbSrIK2fOv0Tjt)mB?3yC9z|n4Hu%nvz7pdOHVYg(}rT
z=|d$cbaaWlUvRD=IA9zGO`akGh$CR-7@`le3#}nQf~9v7X8LqV-$Jpn3%|YzN{+l#
z3ayM+Y9nx8hJcq<_Co|;EdB{yieH0GoFg3txAx5uHi|`XR84$BH%7=9#w@8(_73~Q
za-fPPQKMgC{bmQ|dfG{MZ4bS%->LrC1
zx`lPgu{l_tVmNOva*Y~&S1_*L`V+%CM#U~)mleK7E!ynQIl?13H+NvaDvT;US8X6Wr%aNkTS_<
z6G>Tho8Qo{YZIcmj_t*gSJFN8zmK;Iz}Bze+18BHtjRU*(lo5Tm^Fs0AN*2W{&)jA
zx&oG7+J&P#4O;2BDO?;Y*}5gNS$QLq-{Qe(3f
z&Fb|s4`95@v$G>aLO6`n|FWD6A;XxgEiDW9NNF(3hV}KPY&(3FXnOHUx#q2%+Kxcv
z9NJFJmC3NnHp_xBfIp%~{0w$#b&w;0z;C(^H9Q3L6`*5{a8mmx-$Z~cYloS}p41Zl
z$hXkf)mrHXslBId7d6uL`>hJxzL-n4A!vjdFFv{ywi{*~c02lJ(W40{Zw94Id^CRL
z<#_yT7_CG#3}#t-$KV5%&P!HLw$-~R%_tL=7_0a4Kju37-;@`b*dp*(l-_vWN8%yL8
zvm6hK<4k8PbsS7Yk(6ftaohBpMfUfSU~#*5o^NgKql63_1v>(IkH-tNN=tyWA4eiu
zZ8OZZ3;Qbb8G@592;nL#Xrnsf`ASO;*B!N>R+}kt!sqHqMH1L!39*_asz)AAz-Eiu
z+8QGXEkLtsT$JK?wLrUGX83wj)!6?9gqlDH~*5l#^@N3~*JUaC1+zo8!iGWa0%H~YmM=r!_Z69S~DLnZsi??8jO;}jR2bd}tQfGtL%kx82Bf9nN>hMn`
zJ}rnT!M5>}r)SUBkp`OmkN99(EPWm^rJt7B3+mSqCTec?;?~Rc9m(FfVjxv@b8mCn
zNUjH+<$Q(VUQswRyawgq++6a&D*b1_0t#ezR|@RL%ACfHJ7aB8x9(dal!N1P|5}Ge
zblg4j9{wnGp^zblN8~LH94TGVQeLYRHH$hTP3P
zC1o=63)f_$d!0{e-K6KS6Un5Mx2`4K8tvFdIn-+*Z)Z6RJ7~gc&`V(*L<6)?XC5u`
zZ&ZN`PXXvIA4=5fHb5C9>dc~sMkx577ia!gH*WRc#LdT}Z+U{h{R8(TCBcCNSEk2c
zh9dT}bI#%KthYs*Z{cWmoSAlESLA?{ut0CPpcavc68|DQkDbmrc2{C+@Cs@Im&!1Y
zcBr%zvK^%@)y0x*f7+wH3HX#OSm$K{VGcHpI4(N(mQnnAOVqmlO|ELGS2eaQTppNG
z7L*EPu>@9%I7n6+`%j{7Ik);ZefPx;vfGru=jmgD9u%C^_M~7GUjl01%$4CnDv$7+
zca$_s$R)QiXYi-Q`4Ra~Rq;?HvGKxMV5Fm_!}@rESuokBJ@8)7ha*u4MiJA8snn7$
zawS!#lLrgqR{#txkksVGTx&A1nPGP@aVO9R2
z3LX87mElqbk8nFMk=n7UggB55Z=TU>d`34r&vE+ook0?pVDk(L#+KQSR5~mr&bRs_ssjM#j*LdFXAtGoE>pI*FKPPw|^xPgZA{1Qlm%L9pD)4RDy_2;}zZ&ZMU0hXy*5&h}YQX^@}7Pjj|!}
zdzgS1c)6|Gw1&RQ*7{!IS)M3lO{T_+_*uwf2*eIKtx|1eK59|Ad)|+W_0R%0ba-j8
zUqs-@&Zqh{M0B|I>bP(ID8B75p3SC`+~$qTh&WFf2x}WcPn!ExE@dZUoZbr-%^?)2
z0l~Kd5q*@XB%0XgAi>NFB%=Lh@Y!Nr4{6aw9b0Wn$f$!321gab?Z5vne-E)Hokq{^xk6I4lm=b37;
z!DsDon673E;n-uf`O;s0#|>u|&uI}1!ZRA6Z)|Jw#{Cc0JV16uSlbb@x)f8(SSmSX
za%s(!e6oMJ7n*+Ig_V^OoM6uEXU!Bkd4SM28?0(3QGF=M=eb>vg;?Oe54>;FX7Z_`
zOQJ?n8jKz1rx?*D`JkPTYt*%B&S5eSr0{~3zEfgGvlqW!LZobodHj;0TE>>Ks5}6@
znU&9tQdR^D(nypncGUvL(NM29sKfG<8(dWXUHJFmXiNwL_@e&r_jmeokqWb4i`>piJpw161@H814W?GX?=*G*9yR=m?_ca
z*`{zO#d>xgK@f}&h*%pr;vvYiRF=XS5BE8;uk<_f#lMR6qO|9kb*HCXLHFn0&GAYl
zj2LJVte(E>rYVi09UCXKz9NiBj9&AeFLIc2H9S+#QEVP3
zhM=rns{4*4w$mH;=k))c?2`xWBidjQfdsb(vINrk4pu*bh=??9i8T8BAZ@!x
z!)RXCR5TM-_aOo6~dxUq7kyYT*e!aw)
zI6f0UTINvi<+N_QU1`3Pr?&F5=~ct{%Tz=xv?O?2OxMA{DZO=Qw1U#jIa@>9m4Ma$nNB9Zn*{scc6UWk
zh`zY=+nf)Dp8SWBebcB66<_YX6H@l+?0@_Vr%713i@GOJkruC;*8^$kqi!
zMn%gV_K9?RQdis2VvRJP!~ES{={~F%@wp&dmC-6SG@#%!yfeZkS`FqX=?$rOXq2%;
zu>U;(LSf(IacCGbR3uJZwc0tn*woNzv^|s|saWakReNU4OPDIAUl1X0)4*h%et3mC
z-A31)RQd1RJINDC4Mwj2KbfX&rBkA#g&v!)ckrXx_v<~TwJs9Vr7zJSva=slT@o9^
zuJ_!wK-!NBVSe=0j74tD=5I2}H_0(PtcQ!dWEqYo$nyeYmy?L4T*~%tI3a*^B66
zonb&IUlxBG<+3R6;O9aW1`lZAeziz@I;kkQxAOWOD1$r&_`{8>J
zL03unfePMvI-iuic}I*Iz!W9!3$>qebe>R=N|XM34!TbW(G&KIq2!#IpgYX7p$?3a
z?)IT!s>g02K|EWIyt>6FHv18pZi8&AACu^%iwH5&f~lrBLn$U__D88jV7A-D%M>K^
z?@ck8NRJJ02TAP!(P8P>1;WM8k+6@XuIlgeCUhKcFAMNI=9~svlSUCp;@TIl0DNp_CD_W*CBz
zCdFy6UhXZNE%Y_v0$4;SNyYJJka!0NDi_j{a5BQrhT6VIVGGmRTk
zpcQIsgbDhP&1S|qRB>K3;8YG5qjXXRYAd+_DfQ3wR_LG%y7j*!-$sou2JQzFkm6i=
z(NTGmrS0Mu5ehaTpVxsps*Vf`$ONRV0MK+zdXc9uuw@Wda!a0J$*cYK&+m7{K_1O3
zWq(9GbVUgF|BpN=|NCT@+NFeFcI^~jgLd!{BmBso*cEhld9=fYt(EJOl}g>INTp)k
zay-cC>LKaedKGyq9$U?q31rB&Bt)fg3dJ7ms*udS(I3sSm_qDC}1qM+*1ZKr>TrtYj9(@qlFRYOZ
zwPxTq8mZhUb6e;$%TRK5i6*wT{h0v;stt8qx@Fnm$<C?z$A|zHkcv`0!LnN~4dm
zBEU0b6b&ACc{am^g}4M%E#sIH5rfr(8XyzO%P)6unf~P;29~oCIIyV$wor)ETSa1)
zJbeoC&)4`+0Oi4JFUNLsuR&$ufLd)r-v4e1^rrkkmp4c>-*Ub-2$TdZa7@yuv>E<#
z>AprExxAib2;6L5SS7dq9#!e(R&>_`Qu0*wbc?9-t7IM#(>}_e2=`|x@iRsAOGtXUfUdkN1)sE0~3ETH%{r$j0dTe`kQY#%qAR71Sk~u+8CaDToT?DoJPbr{Pcj
z+dmsjJ0*X?UH1-z@NkM5Gmc!IwD*(r}PfJEg)S!TUD#d=zm4pVy2V2)vGYS2V|
zfKCiQ5nD=Z5Jn;NQj4>2%H5qz<+&+fz%ESW&y(D>LX1m#&eo&=LqF
zl_g8W?(ZW_%KbhLYa(;FwDZyx&vEx1e8A4MFw0qY_tTlJ2yQ*v5*s~e<7M(lx{2sY
z5`F}wKG{WHY5eoRsQ@cS*vZGAqq9&r?ON<8W(_IlK2d#8mQ%}6%VzXoHlkuWOQ*$Z
z`-$A#vmK3__wk{9$BiDQQ7C=tJKb=3V%IzaqlXB!B^aP_y=4D#$?DDMfnqFoAuJtR
zF)4;w8#K0K|F8&_JXk@LQTLWwz@pON7leJ~3Ofoku!A)#miAv?3fiDqC%
zD2k_VrmOO$2Cg#Jr7S@hmn%Nzi1xRnSV}K=oXj)rcjl9B9Mk0q
z`CiTk_7o7$^u8A3T2Ll8)sh#gr#
z>{gMZ^SwsR$B|r9+Om)G#jWl4Dsg#};ZT7v++SmYL}^BF9<%X-VfQcFZ`#Hs$~S3#
zr=mNXu8Sp~fmue4BR_sC+P>Ek)9-4Q(2X`&Z(HonR#?9bBu(cexl4aJ$uPs`Xitnu
zxMbjXW6I{#9~V}ZX=x_Pp%B=MoR^m=!eZ#)a1N8A0lk!xU?e+_!1B@Hb(edr=i%D<
zE%<8js0b;yG1vxNG3f
zSA>Hn1c2SDFS*J|Z2Ih^+|dtiU20d}hsEMci!e94BJ+T;>G$PpNcM<0n7y!08mY$s
zTos=uDul~uPCtN45gx$Q{`Kz3hcs+tKmbz(Ze}q9n@k9~8itiuV7JCJ9_{xC@R_^$
zo)97u&Qrt-7RLG0io_01wcwVc-fvDbHns-mWmsMM
zOLNaSYhj#@KWx%&s!RssYB7$$1b)O9tB7^|O_$!v4?34VsbX#^itz|3L&-5-W&43k
zyNJ4!GY$%Xu}lB1{Z{9CTzq_d-vY(dOkKQm-uK_!LI6O#|LzhJC}~VBk74-V4q-V<
z{(5@h;!w%)bFZ<1oRR&eeE(d89@)aA_4+ugh;#Kv3R-(Cc{Rsud0Pt8t%S)55{mQh
zLe2!|P|@ldxZkd!#o-SA{@FCL4@ob&G9~kdDz$8+LcV&=j=M~GZ5=OTwAm0HTnlCT
zEipeusFT&3L!1`BMb*Ge!{BCUaUTY=Haz?lZ`c%`Z5!p?1s#Q?1Y_ZOw;hQMk+Htg
z2lBX+MCi*EJ8_{b)Z{z0YZy{D=9fLKAml)K1$@Nq4SFi2f#BK7^gj4g09)5E<@na2
z34l;m1d=3}85}32A~~j8hg?41;PW|A+{5AL9bCqD=fCJFxf7Eb4$3w3r`BInp!JW?
zxswyUyp>JzS_%9WE`Vyg@KR~}TISf@Um<8HHF_E~vkLmm$;+;e2_(R|YGu{AL&G+qF{XE3r
zX7Z`(^Y-v%_{3j?;_McoTZnt_;m}Em{3>7i;@7KO1j_@!U9d|vUT2|wCoyD!0rI)5mtK_V!Ecfk&%F!ZvwkGG=aq-0VR!ofHB
z`#SuowcDTe0u#);Y-WTNf9^jwnZH1O5^4=4V^z%CTc0Qlk(H9+R5g=z!G^1t%?Zuk
zFxt5@hVz|>Wdf4>X)1PP{=Y|sHP$mPC(zI1Z|7qNT>RZ+assF8oV3_-6Oz&X(z#jY
ztvxB@988aR6R}lEQmi4Ae!1U2Vm~ni+xuq6+uns%Azt^#XsC$FK;g@|Q77VTNfX*@
zg=cp4Um#B~%R>1R=D|9WmDVpeX_@*l+pl$Clr(hJM05^)yiQ{OO}@9iT@4Vt&`O2V
z4tkhm!^TuSqgO9q!{Df&Ira|PP$iz>gYqE7Vn100YgiQ_*a6uGr{{hb(G_Yg6A|is
zOoyNNIsI;=!v9`BAi$^zu%1tIbq#%GCt7)9f`Xvjstt@`)2n5Bp!~pWzxL{csovL0
zwQqZ^OEgLrNq5}WhB*1_zX1BzlPsDlO9HqrK919uVD^GPvZ?wovL8AFd&8nqDM(!v
zxOEG>Rm~ZIkJSIZc~Jc-^9V~I%ET4ea*}_Z(U7S7poniIEKynt*gU9W#}rea(g@Ur
z=03GvAM6(MZhbkO$H9Jix>4dnXs%WE
zcPMbWhk6AqtS**h(NDa+dCvpp7sRiNUIk(5y)0>X;}M_c=l?zpEA}TRatdSeyi%=3
zZnh@1cTJwnghS^RlXEi%rmDOh8Ds%fH~FnyW;?0ocRA^qq*B!elp$e3e%I^*!1R#v
z7E;F9aRPb_+}=LIHp=rTmE8tT?VFywW01eWSotXRQKeNIIFXoo{swCRV!SKfqH0wV
z@!UW(JLuKi0&GfbygA?1!5`i^coipqX4l2%a+v<~z%k1*$Nz`h3Ll~KsZF%m*%RyelItc;L7GTvnGW6zX5itNqrI{JKnkN%G1
zyzbX^U)S?`4uVQ@32ggsa6sUY7?Q$A7AS2P>`TYiorW;S^BE#G>81;r(3x%X<@mP?
zq@|Pnpv;6P36mPpn8u{>9q`OF%d~I~Q(_p}#%zTy|-uC={#|
zvl_Ub!uoFh5zPOuc4HEv376J}Vtn@x?4BO(IM9quXCv$oV_umRX!Ab_2~^PAt2-|C
z(oQmk@_L{nsWzy&jaL7!Gb2XldBaf?Wn0rHU$dMKFD4P7xcA1kll!)i4V8Bw5Al2H
zE-e;A1FVPF(7dpnw=JbSxso|<3hTThbAqjx0gIyTETb2S)_%Lc%?N_qnii*w+FX_X<@PK$t{vb7I=Ljfe0NGodU;{A(s{G$%eoA
zSMxJ+nBPhlJ-b5aI-#7K^uPeDCvs`}Is@uw5{xBdaVswt(0PI9%yH8LiPqO%w?gM>
z_}Q+ltE;s3@)f)>x~ns~17C%HNWk2QaV5a-DJjifws%3Fu5FGZD7AuUP%Xg27X4MzSMAADD+SfwUTky#AeaBN_a+
zv_FvZNR}%IEW#irdwTEX01HGla*N%`3wXzp
z%I;9#XT2OwBa0@N(@?&g#`4imjdxg~o)1+97t3pfqzOi)#fN7)G2d(rs*;&b3F4eX{hH*K6fJ`|1AY!qq|&?4C`N1+2*8V*KfctuG;Q
zdZ5ITnq4Qj|334a&tMWXQ%hcf;ws7c0!{TgEOgU%toAjTfea}0>U0$UcoJ;+DS7I?skAB*4
zrK2yy^S~2}12So(3mB21h3(GpKlXonqI0$U_XyGP&xol}CC{#(f2T0wWme`(S6=Ib
zLw(_a<|Q2}Uc96~?H!sOQXFDw^kLV|8l7|Jrm|unY-rKT%m&y71%k8lIlw?PxqkjZ
zySZFG1$w2u{zLS+2fX}6())0y+0oMpT`jrN_dnSFSrcmYF;pvP4ppL94DPMIO(K_7
z$kaY}&ejC5P2W^l^}?n4rJ%n4!Z`ze>ptlyK7K|nF14;?aYM^17EfvZz3}Joc{JLP
z%Sj#CDpfY3p@%SWF*{r4v(MY+&PtO_2Nu6{i+hSrzPgUCq7BN$KI?1
zuytIz*ZSt{c0Kp(I*xHi1UmsIs3xz!MRe<*<8KO;H8?0T#(8?-T_%H@TM#MYt^MJ}
z|Gi}AXnM_yNEq73YeeVGudO_*C%A}jjVLsNpV1WXm^?&}9|!ged8{u8n%=5hD-OD<
z;_2{@T1Qpd=D>#52}>HGLP)^PDFy9SEKdn1aV1qH2l4rj7ZfZ5^L|n|AJgLfdp<1#
zz4hkpov0zC9Sp$vNZ2}#RXZ00u+5%$1uBqo8nH(C&k`Jdoa(Hp6wjpM3QWiZJwYYM
zO$nxdONVo5LVv4Z^Oz0n%tbjNqfH>&eVp;N{*z`KwImeIz)3Pb4Iq>QW
z08xu_FY)Xr<;6LJUBWgRM_&F
zreVWu8;}cNXX6M6@I&A^W5%YF;-QG&I6F4)dRrMr?s?_={^kjLIh-A_*g6Sy*`Snd
zpyndJtagLml~Fm!QzP1X4)E|xAg-BTqta6G{GASzM!-*AcO*RDekpQ=_x9Px`e|__
z+yXc3_Afz~a1_vhD2M*@nhENv)NPq=Ek!nU*X6|p+canBB{-*q-13+_&JD!
zIPcmo2cu3*q?bAFW~8}A;8JVp2cDUmOUgV|P>N`({KEd2ji-&0Ha{RtYrS6?Er*;H
zSY7Duksz`YJ{ic?*Nx&S6Z@8?Yh_|#>21Cg7-KWiy>Ij~@L{?tYJ!c~{}#QDKM0
zhY6@z!EmaJZEoE?d(F>8URQj060dG5JFAGn=Lt;bRxdPGMqo(RWbSGjTHT&rc!RPG
zbS|hO3!ri9lS>cu@tTAsit%`Dsnb#7|!VtJiCdi4@|qI^YqPqnED4j|JosIFZr5sg7cJGQv-KiFI*(
zOa+?e8@HcAA_A|R=CY7^SlCmEd3MVXj+@StLBV(b@3wt`zfU2h8jonv8%eOw^knd#
z>7m}A7QQekeu#Wg%`~K37Ftt3@Kv2&>%|y6K89Ge}&}^nx#E
z>b_dx(oL6e_m3I?y8ASlQEURJlMPKyj$#xWCOZs$c6?iT(^y(*aP5Bs*BTpimiMGk
z@-DQbeSRg%Z^8fk%iYZo;SB(+GF%+i?(CMgGE`#sW$juy=HIKqkRj#kMjFc;ae;+|jtV;_Zh~
zCD_uny|TpZknzw#hQA4~4pF$M_`)I}FE5&dcrG+%9Brw~lKR!SgkQ$}2e1sWQ=%Ar
zuNknbOM9HJN8yshE9zUPOZ5#J=|@<&Yq|H6w-_LyF)4Q!6B7>#jly-Iz-l_3iBLl7-6k?a^98?q?a|*Iq!Pr$
zPxjKpur41pb9s=1k^|)GvGbbHPQI$5KQgZEX7%j7XPZx73SaSbBkv
zL#z4;Ht9=J!5JVh&w|T22hi~4Eb^9zc9bgQioV`c(M_NjGhc|IL@g>5^0k=bfS;r1
zqpghBc~8eE=9^hF7biY%Ilov8Oujgr$7=WX@J~6GB~S60b)!7wuDHS7kR+@UHAg9i
zlZP60!gU$xP+1UqQ}rc|kn4s(lMa1ewh-Pz-fhpn+idm-cTW%oca41D{i%UbGU3k+yZP8Nll}YC;3sd~Rs|FK{Dsn~rnhR50w`)ZPh!NP?**P%8laH$MTw_(FBl(nJ
zTtcOx@bqRtPB?DAU4Dvxq4=A>&m3VZxm4HBAKocf?%8;0^3(wD<+tT__Ce9LT1uzn
zQtN)%n!P@ys@BN7Bguwp8|_
zzby8|ak*CkbMD)XPS+=(Mzgr-WQ8|fXnW88k*n)n$`8ml9Ld1
zN}8Y9`Xt%ohx5xmZNsL-m9dN<7uVlUv`SxU@#FB`q{B`~yb~
zKF%qpRn71?nz<|TaiRK1;Q(-qJa(mT^h*<=t3CgfnZHUC4*tcq>xaFhc6L_BKMT%>
z<~norw=4#NSdF~hMxQ{gAUWTk@5PU5=S4-oH2Ouwn4z2Hv9-S^b5CFS7vJnt)2{Ag
zuDGRR>kRbZLGWA4#*54FTEwdLwOT@`9Wgu@Vzc#@OxCisRADFT#>(Xk<|11LbOV5J
z8rVel?&3ttqfk6as=;t2C$MB+2Io%Ay3^yeZMx|3xRoKo)MLE6sA;8DYx{1x9xRDy
zA@^L|zuoM0fxt!rwh+>`uIMjmGC3SUJPa}-vsk+@0k+1+)}tSWqxm<@HtiLj+wr8#
zI*BH$iG>mip{1^8e)UVL-T|4-BZ$c@tXfPBDw%28zelc!(=tEdNL6Xmh$*7=K`SMv
zv1iVdaXU1j+WqT8@=ZgcN5)=Kjp9--wBDa~kWlJV(@c;XPYsinYQNBdojJiPNZ{Cm
zj)LLil;TM{sryB=CxYZ*KFbUoEI>I6VQ#tbQ2@LC{jWgy2o43JMt<+w-
z&)rADL5}qO5BEcB5SeRd!`fL&?nY1qWb>-EtIq+1gb!720)VEu4N0w8SbN#XO_>9%55r)=K2|AlvezQM+)CLG
zWlup^U*mfR`WsK1IpYKEYAO;V$EN+8;EX4w!kT41+g2)blD2ICN~QkVx%8h
zSlhsDTWcHa_m-<|jLp2gYJadt_8p!nV)6GA}JD6oE9r
zIdIeYIp!to6mvXIPoAnZhfwDRl_PYD82%OqnjC@~X1{tB=-7Nj;HKR=6mc<@U6}4f
z{7h)FB!i7H#lJlzzkOM32r0ASoX7F7$5M6Zr;~az_RLLD#Q}M!OK*d=FUgeqjOLJ+
z=ns|M=wB(04=pabrDjPy_4Y<|^|
zD%}~igPL`jT-icY5kA%i5=vPbJAqx*@9`E%^^X>T2wZJD4RKfoixi1>Mmtd?1E8Ht
zX;UHbna4I&&Vqznwy9hbE^_0_SpS4
zl??=SzQ^@rWr*9KmA+yS$C_RARg5tMxM#NU44)W3C32o8sN7mB>?a;?2&0;=s0N|s
zAWY;Jr!;HXr~j)4bLnY2wf7j}I^R!T=86xyC%9ecl9f^|zk!sO{y3v|6wKe(zCB=A
zqs1W=2OjcvdEK$fN+3ldyt(!KP4;9M0+mnVv@@%_r+CGWF2gnHYeSAa^{Odj;xwp!
zvY-}WUP!~xG6wpoQUXSjx=Ft<3*voi_3OO;8gYv*bLCsS_%(EfRxOYyRQib_gsiQF
zOd9CMgW#i-Wj}!=;CgW>%z{Q1^W@e`qO`3eVA&d~BvCo7cD}v1lL&9MR^#z1GhGnY
z*5?z~9c#kD(bI6Hfyh_20pH}hSBF&Gw$@v!USVQP#BEmBLWn%sL#Odw!9sfw7DE^5
zA3nc#u(gzyHU=lOQ4$l`0B)A>@+Zj;T>(8YTQg#p^6^EB^d*+3!AW>Sm%xKRS+GtvzB6*UH>D
z_p3Eh6vXPbq3Za=eju$dRroH^(#_94pCcS{Q=rHzI627TPu1;AsmgKESii%>?2_4gZ5KXkm?
z;chOV$GU5-;8jIJ#oZ=iFunC0^fMOyOd<>9D}jg)UR2nj0<7I0K?9-m^NAuG8Ol`g
zHc|Qa{(g}w^e9u)QTZ4VW8?e
zjbhUtG9lnMJKuWg47;L|GYfL{ss-iLAV{pYq4@0AY6CNFt4wq>a~K-x*h2i~HbTMO
ziNc1S^E1!6oss&p>IYJ|qzb3<;`_A7rEc-lx07p`pM3fmQ3=Y8_y~*H1R}YPdHfL4
z>uS(!2h8u%{H~!CXrC^%+8xdg)o!h`A$cV7tH!yP!%&cYxj(&rr6!s-8kH=We>i!FfC7cf8CYT
z8lUd{Q3I%z?AdVnlN>Hwxgh0U#)DLd=BauYau+=FhX`)Np-oE)^Fx)5*mYNah;WDx
z`*td3pX)&WIcO_rD|9PjE9NddqXY0Nq!>bAgzalE1ooL}GIaC)HfIqd8Sg@3*`nBW
z>FvHt_h3Hzrm>ej!sWe8=*Ss#yKml?W2-IIZ?-EB7EM|HC(4+}uP)PM+gxsRtiFR4`V7qEEa?api3KHTi9YBD)JJ
z^x&j~!@0aKhIO^w7?ypq#Jm8B%jj|7BT~irBMzD@gGxq)`V9MCg~zBKVDOeaD6UZ4
zGP7dgoL18cN_#He)d3i)m-p{%=z{^V>j1Wi
zeL0*@P42r=&JvmJLKykF{`CM*zK$VqaKMdu*qPRi%Y??nlTQdoC0j!#>g(L(3)EF!
z$Wewe4&UuGRYC1RP8HO@64aUuq~|(H$2=0b8EDleD5u055|fe@gGCqIBTu#@nj(
zTj>M!1I>d$Jc?TsWE9CXpOpl6EP02uvK~rAahFBkZ50YEI|Mpn3|yat2z>=Dd#h}c
zogn(*&lRKx;UANPL%VAhhe&i{BfOr6uZ;%5l)h;Et|)~d^PNs+hAFFm(q4w%SD6LH
zyNZU2$u6`ev?g>WCi6c+s`!ST)fSqj5t~r<>tMW|Gj@-1H{y0X+
zcz<1S(s(&wD`RU3e52;~Yk8S?1y>U)?P5j3yEY>MOnm%XnKT*HP6T_@=jDa|l|d=9
zpjpLgXkhO#zsGKEvGY5Wb;a0;_ifq~G7Ynp{AmW9-+uE>_Vi5|%ZZ|cJAzT%Qj|LW>6*5Vc7^2em_v!q?}E2Tg~K^Pzp9)gRil4YmZMaUm%V4jElp321?G)AZ;ydqn_}Ry#26J3VS;$^LYF+SD)WuZLKSt@s?@>bgC_@Lcog&$?4QF@
zho|w6L_>=!_c)H3)VsiZKkgr^RMMd=7L5o&@!QWUI|7k(WkHZZ;~7x+u)3pKlB58u
zYl4whmm{<<>BDsDD2?8#i|L<(2EBjAOt#w8o>OGp?YSk9S=3T%)HLmQfbIXPajT^I
zH52xdZ$BxB^|Y-*ChCsnxSk4CS6~YVG(%dzDf?nUTutitUq16Z*vF}5gk}_E^uX5}
z&XX2qY+I+4?y=4QyP0(Qvvw92BWy=0+nw$LgrQ|H-P!K^&Zs-^Joir_KRvhvw>sHb)
z=@M==Jz3t~7AmR4`OZR+@69
zHDNL#(=xN+4eDX!s4PDYWV!HvA9EBlEf|cYk7MmHN8G6WXwZA6<
zYKe==T927=N={blPCAO$uj7>Wi!R1^%PU2M%tmkGE*lXQ6@v*e}0B)
zQ87@JEY$xrk(SPm_K9}Fp~@XdetKOBb~!7SmP22%Az(BQAZ4>I-<2T}e#Y&}K>Hu{
z%9S*syq9@Nd6@*kuZnm0ewlfjM{iIMtL*;8^#2jkEE-l{mvO-OF$v*lNd?~^{Q#@u
ze@m}xTb6Z+IlzKEPL;EK-B>o=$Fb@tM3eWqoFXB$cZTh)VH#&!=%q#f<|voAGlI(_m5d0V4SnS}!5
zJ}}KzEFt2<%5kk`*N^QUvDSB3a4XZdTAu?wDtC9~^btqs>i3DV-&=Xsq9wth!4Wqo
zv?+iDzAKxW}ctnM;(vjBY)zYo*qaEohPhoEp)mQXkd42iUs&jlkhi;kj
zRE)traG(~s@2~CPF_2gREpZu+`R=J+d1h;D>&Tn+_u@wqp?Sj%4^7G5bEuza{0??N
zgjxJ6sy{FN2rdd+u7~`&;%xzt+GzvI7)pXyh}276wpwp@vfL?%kzjZ)Okf>qlu)@^
z7#2_)4C>(FjQWJw2}<5L;GZxUldxASa(87g^{_hpLK2
zR%{-Q2JGD$XI3|0vGaJXsXdz%#j*}L@;Zj>m}4N$C_SO#b@Z)l$He8nLE0zskV911
z9eTPnbt34e4{-%ZVACAqJ`&al!7uu6zf(<0XT#7kv*l4uWUKc7{1vRyDV96%rr
zOnR+KyXSQC_n4YqNT+I_=i6L>eR($cXXH3<&S!%$O%}u^=E7rMO1-N=V30a7i_aYM
zD2(jAHf&F~ccga(&4qYXv2Z}u-v+#G=_%>v{nQ}NzNn?SHPnxm`}I9LJj8M~>&8ul
zRETeSA*98(tgPSVn?{svEn?_15njPps!|EXHmPxmobaguuzF=*$0B9(!
z%>>(%WsKb28=dbv^yWqu+pyEIN3v@Y-5RqA1Kxx1=%!gvM)dDH6jV$H3M1MwPjOf+
z0vYL?CqHl}Cc@f8~E$)d2eIdazAEYH?Es3vZBjY$zme
zKEkdc1RsGQgE=Zbo^p9t!6C31p`G;2{^?jS14Y5q>B;UgM3>n&xV8y*yn!*9w)=ry
zone_MNxKf)1Esn^3EV}+N2M~&UHhvY!U(Blx`^W@@
zge*GmfxmPj7~6<0L)y|qU?Fh)o$7@a-^YtCQ%WTy-lj?c%L`uygG-e+wm2eY69-hk1hXa^+zrzu4e_F4dTG;=eSr*}17j>NsT)dEpQbuLcJ~ONf}e4gPe=&?
zW+5a0)dSCKCE(BDRGGEz_P42H)z4ErXr9{Blf&)&8w+dpR2AwD4_y#q3x&d$DE`60
z#7tW+96CU)GH4Qd|AmljN2I#i{q4x4dqsanJ(N-{h!uYDj@}lwo0TtpR_`TwEQ+O!
zx!MGAhoi#lWSUi)3qa5?B;Sv{;FrUdIMw0whUzscg~X)cq+Qhy_*w>8Vk{E66C&^O
zeWX@V@wLAj(3H#He$F*U`Fa(av^+Hy>-6W8}Cm84ooXwudBhF;jsuouk@A7oC3@vdV0H4xSYfPjgS#4oK>Nb#G
zLFYBC3>OS*#elv#`!3!%H(>$`S^qC%QkHPJ%Ex72$HP!D>rNg?R$14n=V7p51?r^U
z&4(@Apc|xzDhh&OsB&
zq0?6-OdK`+VkwJRkQ%iT5IhP7XGvizQ~UP_qb%7|$px&`Px{E9h3dBmKKDNY1}*c>
z>a|)|V{wm@w>8J3e#BoY0s5Q{PGym;*xOG27G6wTNMIse<|qb7u~xa^N}8quC>`~$xxR9dIVO@=I8FR(
zC#yIx4}9fkZj+F?>M)S0t27ZL)fPhSV&|~kIB<&Pcmu5PbV{oE2z%jh;ywq^E%s0&
zzfk7jF6fB9)09F(aOsj-QwqwNQ7-@r6WWhv)>Z70tgZfe2
zW306!0Nsq__cwNx7$Rm%9fCP_Yr?x`INWp}1=DHs2ZgkDb!ft4k`V5VYPAe81xyiax?+2{rv*`m`WkOzmxxot
zHE1KY8WS%L>)%sv?nHIBKT&)d4Ra&Ik-pun)>QS#R68R6rtqir(Kp$aH;Lq5oqcu&
z9*mRPUx?XGWm1lPR%5Uc3AKi+4}%c=-Wp&<0E57J;st5^J~L=hhL94B9?8S6)mr|$
zX2O7DP2u?YiLIEo$7hnj%)rPWM3T4FUP>*%Y*V!o;F>?Z7*Nd3V8Uv`
zmd&d?dk3`kY6I9Dd;F-N%-=U0>wZr7I=nk8ShP(0f}gSed6pkOMp)Yym39?O7h8|4o
zjlLtK-T4MG$*J|hLi%$7(w#Sxw@lFQ)?FJ!N^F-1p{(YUw|>fJCI$}(iQLVmzkyYR
z{@(__NAA(nB<_Cdf}$wv<2iC!aLjlN49$%%AOG|gJy%NX_yZm4kS=k}by}{O7H9s@
zl(+Uq<-+ZIHz@@>56U~-Xm{UtErow*3{0euB|zF?pi&vaVR_bAXdSf2OegdNHiH2T
zN8r`?1++rP+gN7OwOyOC&k*5sA7vTf+L
zI>`&yO)f2K#CKS!&=z43?tU0hIu3vwEDP)x0I@QDI{`i`F6=0%#xucp%6Uxrf~{o1
zs={=9$BL3t{Bw|1GXID3603NX;?;BpMO{zAtMg=nh!wa4&%j2~Ih}#4`q$(r3Ko%6
zm5=oc-PEj>M0_^pI$ULEs@Al(7>>6Zb02=Bh|+~ZvN9!ZH`^*NyN9QG8!fEz=^dRZ
z`PoD(seCXQp&z$c2Lr>9