From 15c0b8b44a9c999527cbdb6877ea446c61260e6c Mon Sep 17 00:00:00 2001
From: Guanghua Yu <742925032@qq.com>
Date: Thu, 14 Jul 2022 13:53:10 +0800
Subject: [PATCH] cherry pick some pr (#1295)
* fixed-docs (#1283)
* [documentation] fix typos (#1287)
* add YOLOv7 ACT example (#1291)
Co-authored-by: leiqing <54695910+leiqing1@users.noreply.github.com>
Co-authored-by: minghaoBD <79566150+minghaoBD@users.noreply.github.com>
---
README.md | 2 +-
example/auto_compression/README.md | 415 +++++++++++-------
.../auto_compression/pytorch_yolov6/README.md | 4 +-
.../auto_compression/pytorch_yolov7/README.md | 152 +++++++
.../configs/yolov7_qat_dis.yaml | 30 ++
.../pytorch_yolov7/configs/yolov7_reader.yaml | 27 ++
.../pytorch_yolov7/cpp_infer/CMakeLists.txt | 263 +++++++++++
.../pytorch_yolov7/cpp_infer/README.md | 51 +++
.../pytorch_yolov7/cpp_infer/compile.sh | 37 ++
.../pytorch_yolov7/cpp_infer/trt_run.cc | 116 +++++
.../auto_compression/pytorch_yolov7/eval.py | 151 +++++++
.../pytorch_yolov7/images/000000570688.jpg | Bin 0 -> 138365 bytes
.../pytorch_yolov7/paddle_trt_infer.py | 322 ++++++++++++++
.../pytorch_yolov7/post_process.py | 173 ++++++++
.../pytorch_yolov7/post_quant.py | 104 +++++
.../auto_compression/pytorch_yolov7/run.py | 172 ++++++++
.../semantic_segmentation/README.md | 16 +-
17 files changed, 1861 insertions(+), 174 deletions(-)
create mode 100644 example/auto_compression/pytorch_yolov7/README.md
create mode 100644 example/auto_compression/pytorch_yolov7/configs/yolov7_qat_dis.yaml
create mode 100644 example/auto_compression/pytorch_yolov7/configs/yolov7_reader.yaml
create mode 100644 example/auto_compression/pytorch_yolov7/cpp_infer/CMakeLists.txt
create mode 100644 example/auto_compression/pytorch_yolov7/cpp_infer/README.md
create mode 100644 example/auto_compression/pytorch_yolov7/cpp_infer/compile.sh
create mode 100644 example/auto_compression/pytorch_yolov7/cpp_infer/trt_run.cc
create mode 100644 example/auto_compression/pytorch_yolov7/eval.py
create mode 100644 example/auto_compression/pytorch_yolov7/images/000000570688.jpg
create mode 100644 example/auto_compression/pytorch_yolov7/paddle_trt_infer.py
create mode 100644 example/auto_compression/pytorch_yolov7/post_process.py
create mode 100644 example/auto_compression/pytorch_yolov7/post_quant.py
create mode 100644 example/auto_compression/pytorch_yolov7/run.py
diff --git a/README.md b/README.md
index 0e2ce025..5289fb2e 100755
--- a/README.md
+++ b/README.md
@@ -20,7 +20,7 @@ PaddleSlim是一个专注于深度学习模型压缩的工具库,提供**低
- 支持代码无感知压缩:用户只需提供推理模型文件和数据,既可进行离线量化(PTQ)、量化训练(QAT)、稀疏训练等压缩任务。
- 支持自动策略选择,根据任务特点和部署环境特性:自动搜索合适的离线量化方法,自动搜索最佳的压缩策略组合方式。
- 发布[自然语言处理](example/auto_compression/nlp)、[图像语义分割](example/auto_compression/semantic_segmentation)、[图像目标检测](example/auto_compression/detection)三个方向的自动化压缩示例。
- - 发布`X2Paddle`模型自动化压缩方案:[YOLOv5](example/auto_compression/pytorch_yolov5)、[YOLOv6](example/auto_compression/pytorch_yolov6)、[HuggingFace](example/auto_compression/pytorch_huggingface)、[MobileNet](example/auto_compression/tensorflow_mobilenet)。
+ - 发布`X2Paddle`模型自动化压缩方案:[YOLOv5](example/auto_compression/pytorch_yolov5)、[YOLOv6](example/auto_compression/pytorch_yolov6)、[YOLOv7](example/auto_compression/pytorch_yolov7)、[HuggingFace](example/auto_compression/pytorch_huggingface)、[MobileNet](example/auto_compression/tensorflow_mobilenet)。
- 升级量化功能
diff --git a/example/auto_compression/README.md b/example/auto_compression/README.md
index c9c9a91d..e907908b 100644
--- a/example/auto_compression/README.md
+++ b/example/auto_compression/README.md
@@ -1,165 +1,250 @@
-# 自动化压缩工具ACT(Auto Compression Toolkit)
-
-## 简介
-PaddleSlim推出全新自动化压缩工具(ACT),旨在通过Source-Free的方式,自动对预测模型进行压缩,压缩后模型可直接部署应用。ACT自动化压缩工具主要特性如下:
-- **『更便捷』**:开发者无需了解或修改模型源码,直接使用导出的预测模型进行压缩;
-- **『更智能』**:开发者简单配置即可启动压缩,ACT工具会自动优化得到最好预测模型;
-- **『更丰富』**:ACT中提供了量化训练、蒸馏、结构化剪枝、非结构化剪枝、多种离线量化方法及超参搜索等等,可任意搭配使用。
-
-
-## 环境准备
-
-- 安装PaddlePaddle >= 2.3 (从[Paddle官网](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html)下载安装)
-- 安装PaddleSlim >=2.3
-
-(1)安装paddlepaddle:
-```shell
-# CPU
-pip install paddlepaddle
-# GPU
-pip install paddlepaddle-gpu
-```
-
-(2)安装paddleslim:
-```shell
-pip install paddleslim
-```
-
-## 快速上手
-
-- 1.准备模型及数据集
-
-```shell
-# 下载MobileNet预测模型
-wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV1_infer.tar
-tar -xf MobileNetV1_infer.tar
-# 下载ImageNet小型数据集
-wget https://sys-p0.bj.bcebos.com/slim_ci/ILSVRC2012_data_demo.tar.gz
-tar -xf ILSVRC2012_data_demo.tar.gz
-```
-
-- 2.运行
-
-```python
-# 导入依赖包
-import paddle
-from PIL import Image
-from paddle.vision.datasets import DatasetFolder
-from paddle.vision.transforms import transforms
-from paddleslim.auto_compression import AutoCompression
-paddle.enable_static()
-# 定义DataSet
-class ImageNetDataset(DatasetFolder):
- def __init__(self, path, image_size=224):
- super(ImageNetDataset, self).__init__(path)
- normalize = transforms.Normalize(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
- self.transform = transforms.Compose([
- transforms.Resize(256),
- transforms.CenterCrop(image_size), transforms.Transpose(),
- normalize
- ])
-
- def __getitem__(self, idx):
- img_path, _ = self.samples[idx]
- return self.transform(Image.open(img_path).convert('RGB'))
-
- def __len__(self):
- return len(self.samples)
-
-# 定义DataLoader
-train_dataset = ImageNetDataset("./ILSVRC2012_data_demo/ILSVRC2012/train/")
-image = paddle.static.data(
- name='inputs', shape=[None] + [3, 224, 224], dtype='float32')
-train_loader = paddle.io.DataLoader(train_dataset, feed_list=[image], batch_size=32, return_list=False)
-# 开始自动压缩
-ac = AutoCompression(
- model_dir="./MobileNetV1_infer",
- model_filename="inference.pdmodel",
- params_filename="inference.pdiparams",
- save_dir="MobileNetV1_quant",
- config={'Quantization': {}, "HyperParameterOptimization": {'ptq_algo': ['avg'], 'max_quant_count': 3}},
- train_dataloader=train_loader,
- eval_dataloader=train_loader)
-ac.compress()
-```
-
-- 3.测试精度
-
-测试压缩前模型的精度:
-```shell
-CUDA_VISIBLE_DEVICES=0 python ./image_classification/eval.py
-### Eval Top1: 0.7171724759615384
-```
-
-测试量化模型的精度:
-```shell
-CUDA_VISIBLE_DEVICES=0 python ./image_classification/eval.py --model_dir='MobileNetV1_quant'
-### Eval Top1: 0.7166466346153846
-```
-
-量化后模型的精度相比量化前的模型几乎精度无损,由于是使用的超参搜索的方法来选择的量化参数,所以每次运行得到的量化模型精度会有些许波动。
-
-- 4.推理速度测试
-量化模型速度的测试依赖推理库的支持,所以确保安装的是带有TensorRT的PaddlePaddle。以下示例和展示的测试结果是基于Tesla V100、CUDA 10.2、python3.7得到的。
-
-使用以下指令查看本地cuda版本,并且在[下载链接](https://paddleinference.paddlepaddle.org.cn/master/user_guides/download_lib.html#python)中下载对应cuda版本和对应python版本的paddlepaddle安装包。
-```shell
-cat /usr/local/cuda/version.txt ### CUDA Version 10.2.89
-### 10.2.89 为cuda版本号,可以根据这个版本号选择需要安装的带有TensorRT的PaddlePaddle安装包。
-```
-
-安装下载的whl包:
-```
-### 这里通过wget下载到的是python3.7、cuda10.2的PaddlePaddle安装包,若您的环境和示例环境不同,请依赖您自己机器的环境下载对应的安装包,否则运行示例代码会报错。
-wget https://paddle-inference-lib.bj.bcebos.com/2.3.0/python/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.2_cudnn8.1.1_trt7.2.3.4/paddlepaddle_gpu-2.3.0-cp37-cp37m-linux_x86_64.whl
-pip install paddlepaddle_gpu-2.3.0-cp37-cp37m-linux_x86_64.whl --force-reinstall
-```
-
-测试FP32模型的速度
-```
-python ./image_classification/infer.py
-### using tensorrt FP32 batch size: 1 time(ms): 0.6140608787536621
-```
-
-测试FP16模型的速度
-```
-python ./image_classification/infer.py --use_fp16=True
-### using tensorrt FP16 batch size: 1 time(ms): 0.5795984268188477
-```
-
-测试INT8模型的速度
-```
-python ./image_classification/infer.py --model_dir=./MobileNetV1_quant/ --use_int8=True
-### using tensorrt INT8 batch size: 1 time(ms): 0.5213963985443115
-```
-
-**提示:**
-- DataLoader传入的数据集是待压缩模型所用的数据集,DataLoader继承自`paddle.io.DataLoader`。可以直接使用模型套件中的DataLoader,或者根据[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)自定义所需要的DataLoader。
-- 自动化压缩Config中定义量化、蒸馏、剪枝等压缩算法会合并执行,压缩策略有:量化+蒸馏,剪枝+蒸馏等等。示例中选择的配置为离线量化超参搜索。
-- 如果要压缩的模型参数是存储在各自分离的文件中,需要先通过[convert.py](./convert.py) 脚本将其保存成一个单独的二进制文件。
-
-## 应用示例
-
-#### [图像分类](./image_classification)
-
-#### [目标检测](./detection)
-
-#### [语义分割](./semantic_segmentation)
-
-#### [NLP](./nlp)
-
-#### X2Paddle
-
-- [PyTorch YOLOv5](./pytorch_yolov5)
-- [HuggingFace](./pytorch_huggingface)
-- [TensorFlow MobileNet](./tensorflow_mobilenet)
-
-#### 即将发布
-- [ ] 更多自动化压缩应用示例
-
-## 其他
-
-- ACT可以自动处理常见的预测模型,如果有更特殊的改造需求,可以参考[ACT超参配置教程](./hyperparameter_tutorial.md)来进行单独配置压缩策略。
-
-- 如果你发现任何关于ACT自动化压缩工具的问题或者是建议, 欢迎通过[GitHub Issues](https://github.com/PaddlePaddle/PaddleSlim/issues)给我们提issues。同时欢迎贡献更多优秀模型,共建开源生态。
+# 模型自动化压缩工具ACT(Auto Compression Toolkit)
+
+------------------------------------------------------------------------------------------
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## **简介**
+
+PaddleSlim推出全新自动化压缩工具(Auto Compression Toolkit, ACT),旨在通过Source-Free的方式,自动对预测模型进行压缩,压缩后模型可直接部署应用。
+
+## **News** 📢
+
+* 🎉 2022.7.6 [**PaddleSlim v2.3.0**](https://github.com/PaddlePaddle/PaddleSlim/releases/tag/v2.3.0)全新发布!目前已经在图像分类、目标检测、图像分割、NLP等20多个模型验证正向效果。
+* 🔥 2022.7.14 晚 20:30,PaddleSlim自动压缩天使用户沟通会。与开发者共同探讨模型压缩痛点问题,欢迎大家扫码报名入群获取会议链接。
+
+
+
+
+
+## **特性**
+
+- **🚀『解耦训练代码』** :开发者无需了解或修改模型源码,直接使用导出的预测模型进行压缩;
+- **🎛️『全流程自动优化』** :开发者简单配置即可启动压缩,ACT工具会自动优化得到最好预测模型;
+- **📦『支持丰富压缩算法』** :ACT中提供了量化训练、蒸馏、结构化剪枝、非结构化剪枝、多种离线量化方法及超参搜索等等,可任意搭配使用
+
+### **ACT核心思想**
+
+相比于传统手工压缩,自动化压缩的“自动”主要体现在4个方面:解耦训练代码、离线量化超参搜索、算法
+
+
+
+
+
+### **模型压缩效果示例**
+
+ACT相比传统的模型压缩方法,
+
+- 代码量减少 50% 以上
+- 压缩精度与手工压缩基本持平。在 PP-YOLOE 模型上,效果还优于手动压缩,
+- 自动化压缩后的推理性能收益与手工压缩持平,相比压缩前,推理速度可以提升1.4~7.1倍。
+
+
+
+
+
+### **模型压缩效果Benchmark**
+
+
+
+
+
+| 模型类型 | model name | 压缩前
精度(Top1 Acc %) | 压缩后
精度(Top1 Acc %) | 压缩前
推理时延(ms) | 压缩后
推理时延(ms) | 推理
加速比 | 芯片 |
+| ------------------------------- | ---------------------------- | ---------------------- | ---------------------- | ---------------- | ---------------- | ---------- | ----------------- |
+| [图像分类](./image_classification) | MobileNetV1 | 70.90 | 70.57 | 33.15 | 13.64 | **2.43** | SDM865(骁龙865) |
+| [图像分类](./image_classification) | ShuffleNetV2_x1_0 | 68.65 | 68.32 | 10.43 | 5.51 | **1.89** | SDM865(骁龙865) |
+| [图像分类](./image_classification) | SqueezeNet1_0_infer | 59.60 | 59.45 | 35.98 | 16.96 | **2.12** | SDM865(骁龙865) |
+| [图像分类](./image_classification) | PPLCNetV2_base | 76.86 | 76.43 | 36.50 | 15.79 | **2.31** | SDM865(骁龙865) |
+| [图像分类](./image_classification) | ResNet50_vd | 79.12 | 78.74 | 3.19 | 0.92 | **3.47** | NVIDIA Tesla T4 |
+| [语义分割](./semantic_segmentation) | PPHGNet_tiny | 79.59 | 79.20 | 2.82 | 0.98 | **2.88** | NVIDIA Tesla T4 |
+| [语义分割](./semantic_segmentation) | PP-HumanSeg-Lite | 92.87 | 92.35 | 56.36 | 37.71 | **1.49** | SDM710 |
+| [语义分割](./semantic_segmentation) | PP-LiteSeg | 77.04 | 76.93 | 1.43 | 1.16 | **1.23** | NVIDIA Tesla T4 |
+| [语义分割](./semantic_segmentation) | HRNet | 78.97 | 78.90 | 8.19 | 5.81 | **1.41** | NVIDIA Tesla T4 |
+| [语义分割](./semantic_segmentation) | UNet | 65.00 | 64.93 | 15.29 | 10.23 | **1.49** | NVIDIA Tesla T4 |
+| NLP | PP-MiniLM | 72.81 | 72.44 | 128.01 | 17.97 | **7.12** | NVIDIA Tesla T4 |
+| NLP | ERNIE 3.0-Medium | 73.09 | 72.40 | 29.25(fp16) | 19.61 | **1.49** | NVIDIA Tesla T4 |
+| [目标检测](./pytorch_yolov5) | YOLOv5s
(PyTorch) | 37.40 | 36.9 | 5.95 | 1.87 | **3.18** | NVIDIA Tesla T4 |
+| [目标检测](./pytorch_yolov6) | YOLOv6s
(PyTorch) | 42.4 | 41.3 | 9.06 | 1.83 | **4.95** | NVIDIA Tesla T4 |
+| [目标检测](./pytorch_yolov7) | YOLOv7
(PyTorch) | 51.1 | 50.8 | 26.84 | 4.55 | **5.89** | NVIDIA Tesla T4 |
+| [目标检测](./detection) | PP-YOLOE-l | 50.9 | 50.6 | 11.2 | 6.7 | **1.67** | NVIDIA Tesla V100 |
+| [图像分类](./image_classification) | MobileNetV1
(TensorFlow) | 71.0 | 70.22 | 30.45 | 15.86 | **1.92** | SDMM865(骁龙865) |
+
+- 备注:目标检测精度指标为mAP(0.5:0.95)精度测量结果。图像分割精度指标为IoU精度测量结果。
+- 更多飞桨模型应用示例及Benchmark可以参考:[图像分类](./image_classification),[目标检测](./detection),[语义分割](./semantic_segmentation),[自然语言处理](./nlp)
+- 更多其它框架应用示例及Benchmark可以参考:[YOLOv5(PyTorch)](./pytorch_yolov5),[YOLOv6(PyTorch)](./pytorch_yolov6),[YOLOv7(PyTorch)](./pytorch_yolov7),[HuggingFace(PyTorch)](./pytorch_huggingface),[MobileNet(TensorFlow)](./tensorflow_mobilenet)。
+
+## **环境准备**
+
+- 安装PaddlePaddle >= 2.3.1:(可以参考[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html)下载安装)
+
+ ```shell
+ # CPU
+ pip install paddlepaddle --upgrade
+ # GPU
+ pip install paddlepaddle-gpu --upgrade
+ ```
+
+- 安装PaddleSlim >=2.3.0:
+
+ ```shell
+ pip install paddleslim
+ ```
+
+## **快速开始**
+
+- **1. 准备模型及数据集**
+
+```shell
+# 下载MobileNet预测模型
+wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV1_infer.tar
+tar -xf MobileNetV1_infer.tar
+# 下载ImageNet小型数据集
+wget https://sys-p0.bj.bcebos.com/slim_ci/ILSVRC2012_data_demo.tar.gz
+tar -xf ILSVRC2012_data_demo.tar.gz
+```
+
+- **2.运行自动化压缩**
+
+```python
+# 导入依赖包
+import paddle
+from PIL import Image
+from paddle.vision.datasets import DatasetFolder
+from paddle.vision.transforms import transforms
+from paddleslim.auto_compression import AutoCompression
+paddle.enable_static()
+# 定义DataSet
+class ImageNetDataset(DatasetFolder):
+ def __init__(self, path, image_size=224):
+ super(ImageNetDataset, self).__init__(path)
+ normalize = transforms.Normalize(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
+ self.transform = transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(image_size), transforms.Transpose(),
+ normalize
+ ])
+
+ def __getitem__(self, idx):
+ img_path, _ = self.samples[idx]
+ return self.transform(Image.open(img_path).convert('RGB'))
+
+ def __len__(self):
+ return len(self.samples)
+
+# 定义DataLoader
+train_dataset = ImageNetDataset("./ILSVRC2012_data_demo/ILSVRC2012/train/")
+image = paddle.static.data(
+ name='inputs', shape=[None] + [3, 224, 224], dtype='float32')
+train_loader = paddle.io.DataLoader(train_dataset, feed_list=[image], batch_size=32, return_list=False)
+# 开始自动压缩
+ac = AutoCompression(
+ model_dir="./MobileNetV1_infer",
+ model_filename="inference.pdmodel",
+ params_filename="inference.pdiparams",
+ save_dir="MobileNetV1_quant",
+ config={'Quantization': {}, "HyperParameterOptimization": {'ptq_algo': ['avg'], 'max_quant_count': 3}},
+ train_dataloader=train_loader,
+ eval_dataloader=train_loader)
+ac.compress()
+```
+
+- **3.精度测试**
+
+ - 测试压缩前模型的精度:
+
+ ```shell
+ CUDA_VISIBLE_DEVICES=0 python ./image_classification/eval.py
+ ### Eval Top1: 0.7171724759615384
+ ```
+
+ - 测试量化模型的精度:
+
+ ```shell
+ CUDA_VISIBLE_DEVICES=0 python ./image_classification/eval.py --model_dir='MobileNetV1_quant'
+ ### Eval Top1: 0.7166466346153846
+ ```
+
+ - 量化后模型的精度相比量化前的模型几乎精度无损,由于是使用的超参搜索的方法来选择的量化参数,所以每次运行得到的量化模型精度会有些许波动。
+
+- **4.推理速度测试**
+
+ - 量化模型速度的测试依赖推理库的支持,所以确保安装的是带有TensorRT的PaddlePaddle。以下示例和展示的测试结果是基于Tesla V100、CUDA 10.2、python3.7得到的。
+
+ - 使用以下指令查看本地cuda版本,并且在[下载链接](https://paddleinference.paddlepaddle.org.cn/master/user_guides/download_lib.html#python)中下载对应cuda版本和对应python版本的paddlepaddle安装包。
+
+ ```shell
+ cat /usr/local/cuda/version.txt ### CUDA Version 10.2.89
+ ### 10.2.89 为cuda版本号,可以根据这个版本号选择需要安装的带有TensorRT的PaddlePaddle安装包。
+ ```
+
+ - 安装下载的whl包:(这里通过wget下载到的是python3.7、cuda10.2的PaddlePaddle安装包,若您的环境和示例环境不同,请依赖您自己机器的环境下载对应的安装包,否则运行示例代码会报错。)
+
+ ```
+ wget https://paddle-inference-lib.bj.bcebos.com/2.3.0/python/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.2_cudnn8.1.1_trt7.2.3.4/paddlepaddle_gpu-2.3.0-cp37-cp37m-linux_x86_64.whl
+ pip install paddlepaddle_gpu-2.3.0-cp37-cp37m-linux_x86_64.whl --force-reinstall
+ ```
+
+ - 测试FP32模型的速度
+
+ ```
+ python ./image_classification/infer.py
+ ### using tensorrt FP32 batch size: 1 time(ms): 0.6140608787536621
+ ```
+
+ - 测试FP16模型的速度
+
+ ```
+ python ./image_classification/infer.py --use_fp16=True
+ ### using tensorrt FP16 batch size: 1 time(ms): 0.5795984268188477
+ ```
+
+ - 测试INT8模型的速度
+
+ ```
+ python ./image_classification/infer.py --model_dir=./MobileNetV1_quant/ --use_int8=True
+ ### using tensorrt INT8 batch size: 1 time(ms): 0.5213963985443115
+ ```
+
+ - **提示:**
+
+ - DataLoader传入的数据集是待压缩模型所用的数据集,DataLoader继承自`paddle.io.DataLoader`。可以直接使用模型套件中的DataLoader,或者根据[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)自定义所需要的DataLoader。
+ - 自动化压缩Config中定义量化、蒸馏、剪枝等压缩算法会合并执行,压缩策略有:量化+蒸馏,剪枝+蒸馏等等。示例中选择的配置为离线量化超参搜索。
+ - 如果要压缩的模型参数是存储在各自分离的文件中,需要先通过[convert.py](./convert.py) 脚本将其保存成一个单独的二进制文件。
+
+## 进阶使用
+
+- ACT可以自动处理常见的预测模型,如果有更特殊的改造需求,可以参考[ACT超参配置教程](./hyperparameter_tutorial.md)来进行单独配置压缩策略。
+
+## 社区交流
+
+- 微信扫描二维码并填写问卷之后,加入技术交流群
+
+
+
+
+
+- 如果你发现任何关于ACT自动化压缩工具的问题或者是建议, 欢迎通过[GitHub Issues](https://github.com/PaddlePaddle/PaddleSlim/issues)给我们提issues。同时欢迎贡献更多优秀模型,共建开源生态。
+
+## License
+
+本项目遵循[Apache-2.0开源协议](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/LICENSE)
diff --git a/example/auto_compression/pytorch_yolov6/README.md b/example/auto_compression/pytorch_yolov6/README.md
index 41a61b54..7cdb5464 100644
--- a/example/auto_compression/pytorch_yolov6/README.md
+++ b/example/auto_compression/pytorch_yolov6/README.md
@@ -22,7 +22,7 @@
| 模型 | 策略 | 输入尺寸 | mAPval
0.5:0.95 | 预测时延FP32
(ms) |预测时延FP16
(ms) | 预测时延INT8
(ms) | 配置文件 | Inference模型 |
| :-------- |:-------- |:--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: |
-| YOLOv6s | Base模型 | 640*640 | 42.4 | 9.06ms | 2.90ms | - | - | [Model](https://bj.bcebos.com/v1/paddle-slim-models/detection/yolov6s_infer.tar) |
+| YOLOv6s | Base模型 | 640*640 | 42.4 | 9.06ms | 2.90ms | - | - | [Model](https://bj.bcebos.com/v1/paddle-slim-models/act/yolov6s_infer.tar) |
| YOLOv6s | KL离线量化 | 640*640 | 30.3 | - | - | 1.83ms | - | - |
| YOLOv6s | 量化蒸馏训练 | 640*640 | **41.3** | - | - | **1.83ms** | [config](./configs/yolov6s_qat_dis.yaml) | [Model](https://bj.bcebos.com/v1/paddle-slim-models/act/yolov6s_quant.tar) |
@@ -83,7 +83,7 @@ pip install x2paddle sympy onnx
x2paddle --framework=onnx --model=yolov6s.onnx --save_dir=pd_model
cp -r pd_model/inference_model/ yolov6s_infer
```
-即可得到YOLOv6s模型的预测模型(`model.pdmodel` 和 `model.pdiparams`)。如想快速体验,可直接下载上方表格中YOLOv6s的[Paddle预测模型](https://bj.bcebos.com/v1/paddle-slim-models/detection/yolov6s_infer.tar)。
+即可得到YOLOv6s模型的预测模型(`model.pdmodel` 和 `model.pdiparams`)。如想快速体验,可直接下载上方表格中YOLOv6s的[Paddle预测模型](https://bj.bcebos.com/v1/paddle-slim-models/act/yolov6s_infer.tar)。
预测模型的格式为:`model.pdmodel` 和 `model.pdiparams`两个,带`pdmodel`的是模型文件,带`pdiparams`后缀的是权重文件。
diff --git a/example/auto_compression/pytorch_yolov7/README.md b/example/auto_compression/pytorch_yolov7/README.md
new file mode 100644
index 00000000..7dddf99b
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/README.md
@@ -0,0 +1,152 @@
+# YOLOv7自动压缩示例
+
+目录:
+- [1.简介](#1简介)
+- [2.Benchmark](#2Benchmark)
+- [3.开始自动压缩](#自动压缩流程)
+ - [3.1 环境准备](#31-准备环境)
+ - [3.2 准备数据集](#32-准备数据集)
+ - [3.3 准备预测模型](#33-准备预测模型)
+ - [3.4 测试模型精度](#34-测试模型精度)
+ - [3.5 自动压缩并产出模型](#35-自动压缩并产出模型)
+- [4.预测部署](#4预测部署)
+- [5.FAQ](5FAQ)
+
+## 1. 简介
+
+飞桨模型转换工具[X2Paddle](https://github.com/PaddlePaddle/X2Paddle)支持将```Caffe/TensorFlow/ONNX/PyTorch```的模型一键转为飞桨(PaddlePaddle)的预测模型。借助X2Paddle的能力,各种框架的推理模型可以很方便的使用PaddleSlim的自动化压缩功能。
+
+本示例将以[WongKinYiu/yolov7](https://github.com/WongKinYiu/yolov7)目标检测模型为例,将PyTorch框架模型转换为Paddle框架模型,再使用ACT自动压缩功能进行自动压缩。本示例使用的自动压缩策略为量化训练。
+
+## 2.Benchmark
+
+| 模型 | 策略 | 输入尺寸 | mAPval
0.5:0.95 | 预测时延FP32
(ms) |预测时延FP16
(ms) | 预测时延INT8
(ms) | 配置文件 | Inference模型 |
+| :-------- |:-------- |:--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: |
+| YOLOv7 | Base模型 | 640*640 | 51.1 | 26.84ms | 7.44ms | - | - | [Model](https://bj.bcebos.com/v1/paddle-slim-models/act/yolov7_infer.tar) |
+| YOLOv7 | KL离线量化 | 640*640 | 50.2 | - | - | 4.55ms | - | - |
+| YOLOv7 | 量化蒸馏训练 | 640*640 | **50.8** | - | - | **4.55ms** | [config](./configs/yolov7_qat_dis.yaml) | [Model](https://bj.bcebos.com/v1/paddle-slim-models/act/yolov7_quant.tar) |
+
+说明:
+- mAP的指标均在COCO val2017数据集中评测得到。
+- YOLOv7模型在Tesla T4的GPU环境下开启TensorRT 8.4.1,batch_size=1, 测试脚本是[cpp_infer](./cpp_infer)。
+
+## 3. 自动压缩流程
+
+#### 3.1 准备环境
+- PaddlePaddle >= 2.3 (可从[Paddle官网](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html)下载安装)
+- PaddleSlim > 2.3版本
+- PaddleDet >= 2.4
+- [X2Paddle](https://github.com/PaddlePaddle/X2Paddle) >= 1.3.6
+- opencv-python
+
+(1)安装paddlepaddle:
+```shell
+# CPU
+pip install paddlepaddle
+# GPU
+pip install paddlepaddle-gpu
+```
+
+(2)安装paddleslim:
+```shell
+pip install paddleslim
+```
+
+(3)安装paddledet:
+```shell
+pip install paddledet
+```
+
+注:安装PaddleDet的目的只是为了直接使用PaddleDetection中的Dataloader组件。
+
+(4)安装X2Paddle的1.3.6以上版本:
+```shell
+pip install x2paddle sympy onnx
+```
+
+#### 3.2 准备数据集
+
+本案例默认以COCO数据进行自动压缩实验,并且依赖PaddleDetection中数据读取模块,如果自定义COCO数据,或者其他格式数据,请参考[PaddleDetection数据准备文档](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/docs/tutorials/PrepareDataSet.md) 来准备数据。
+
+如果已经准备好数据集,请直接修改[./configs/yolov7_reader.yml]中`EvalDataset`的`dataset_dir`字段为自己数据集路径即可。
+
+
+#### 3.3 准备预测模型
+
+(1)准备ONNX模型:
+
+可通过[WongKinYiu/yolov7](https://github.com/WongKinYiu/yolov7)的导出脚本来准备ONNX模型,具体步骤如下:
+```shell
+git clone https://github.com/WongKinYiu/yolov7.git
+# 切换分支到u5分支,保持导出的ONNX模型后处理和YOLOv5一致
+git checkout u5
+# 下载好yolov7.pt权重后执行:
+python export.py --weights yolov7.pt --include onnx
+```
+
+也可以直接下载我们已经准备好的[yolov7.onnx](https://paddle-slim-models.bj.bcebos.com/act/yolov7.onnx)。
+
+
+(2) 转换模型:
+```
+x2paddle --framework=onnx --model=yolov7.onnx --save_dir=pd_model
+cp -r pd_model/inference_model/ yolov7_infer
+```
+即可得到YOLOv7模型的预测模型(`model.pdmodel` 和 `model.pdiparams`)。如想快速体验,可直接下载上方表格中YOLOv7的[Paddle预测模型](https://bj.bcebos.com/v1/paddle-slim-models/act/yolov7_infer.tar)。
+
+
+预测模型的格式为:`model.pdmodel` 和 `model.pdiparams`两个,带`pdmodel`的是模型文件,带`pdiparams`后缀的是权重文件。
+
+
+#### 3.4 自动压缩并产出模型
+
+蒸馏量化自动压缩示例通过run.py脚本启动,会使用接口```paddleslim.auto_compression.AutoCompression```对模型进行自动压缩。配置config文件中模型路径、蒸馏、量化、和训练等部分的参数,配置完成后便可对模型进行量化和蒸馏。具体运行命令为:
+
+- 单卡训练:
+```
+export CUDA_VISIBLE_DEVICES=0
+python run.py --config_path=./configs/yolov7_qat_dis.yaml --save_dir='./output/'
+```
+
+- 多卡训练:
+```
+CUDA_VISIBLE_DEVICES=0,1,2,3 python -m paddle.distributed.launch --log_dir=log --gpus 0,1,2,3 run.py \
+ --config_path=./configs/yolov7_qat_dis.yaml --save_dir='./output/'
+```
+
+#### 3.5 测试模型精度
+
+修改[yolov7_qat_dis.yaml](./configs/yolov7_qat_dis.yaml)中`model_dir`字段为模型存储路径,然后使用eval.py脚本得到模型的mAP:
+```
+export CUDA_VISIBLE_DEVICES=0
+python eval.py --config_path=./configs/yolov7_qat_dis.yaml
+```
+
+
+## 4.预测部署
+
+#### Paddle-TensorRT C++部署
+
+进入[cpp_infer](./cpp_infer)文件夹内,请按照[C++ TensorRT Benchmark测试教程](./cpp_infer/README.md)进行准备环境及编译,然后开始测试:
+```shell
+# 编译
+bash complie.sh
+# 执行
+./build/trt_run --model_file yolov7_quant/model.pdmodel --params_file yolov7_quant/model.pdiparams --run_mode=trt_int8
+```
+
+#### Paddle-TensorRT Python部署:
+
+首先安装带有TensorRT的[Paddle安装包](https://www.paddlepaddle.org.cn/inference/v2.3/user_guides/download_lib.html#python)。
+
+然后使用[paddle_trt_infer.py](./paddle_trt_infer.py)进行部署:
+```shell
+python paddle_trt_infer.py --model_path=output --image_file=images/000000570688.jpg --benchmark=True --run_mode=trt_int8
+```
+
+## 5.FAQ
+
+- 如果想测试离线量化模型精度,可执行:
+```shell
+python post_quant.py --config_path=./configs/yolov7_qat_dis.yaml
+```
diff --git a/example/auto_compression/pytorch_yolov7/configs/yolov7_qat_dis.yaml b/example/auto_compression/pytorch_yolov7/configs/yolov7_qat_dis.yaml
new file mode 100644
index 00000000..6607e361
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/configs/yolov7_qat_dis.yaml
@@ -0,0 +1,30 @@
+
+Global:
+ reader_config: configs/yolov7_reader.yaml
+ input_list: {'image': 'x2paddle_images'}
+ Evaluation: True
+ model_dir: ./yolov7_infer
+ model_filename: model.pdmodel
+ params_filename: model.pdiparams
+
+Distillation:
+ alpha: 1.0
+ loss: soft_label
+
+Quantization:
+ activation_quantize_type: 'moving_average_abs_max'
+ quantize_op_types:
+ - conv2d
+ - depthwise_conv2d
+
+TrainConfig:
+ train_iter: 8000
+ eval_iter: 1000
+ learning_rate:
+ type: CosineAnnealingDecay
+ learning_rate: 0.00003
+ T_max: 8000
+ optimizer_builder:
+ optimizer:
+ type: SGD
+ weight_decay: 0.00004
diff --git a/example/auto_compression/pytorch_yolov7/configs/yolov7_reader.yaml b/example/auto_compression/pytorch_yolov7/configs/yolov7_reader.yaml
new file mode 100644
index 00000000..cb87c3f8
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/configs/yolov7_reader.yaml
@@ -0,0 +1,27 @@
+metric: COCO
+num_classes: 80
+
+# Datset configuration
+TrainDataset:
+ !COCODataSet
+ image_dir: train2017
+ anno_path: annotations/instances_train2017.json
+ dataset_dir: dataset/coco/
+
+EvalDataset:
+ !COCODataSet
+ image_dir: val2017
+ anno_path: annotations/instances_val2017.json
+ dataset_dir: dataset/coco/
+
+worker_num: 0
+
+# preprocess reader in test
+EvalReader:
+ sample_transforms:
+ - Decode: {}
+ - Resize: {target_size: [640, 640], keep_ratio: True}
+ - Pad: {size: [640, 640], fill_value: [114., 114., 114.]}
+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}
+ - Permute: {}
+ batch_size: 1
diff --git a/example/auto_compression/pytorch_yolov7/cpp_infer/CMakeLists.txt b/example/auto_compression/pytorch_yolov7/cpp_infer/CMakeLists.txt
new file mode 100644
index 00000000..d5307c65
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/cpp_infer/CMakeLists.txt
@@ -0,0 +1,263 @@
+cmake_minimum_required(VERSION 3.0)
+project(cpp_inference_demo CXX C)
+option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
+option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
+option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
+option(USE_TENSORRT "Compile demo with TensorRT." OFF)
+option(WITH_ROCM "Compile demo with rocm." OFF)
+option(WITH_ONNXRUNTIME "Compile demo with ONNXRuntime" OFF)
+option(WITH_ARM "Compile demo with ARM" OFF)
+option(WITH_MIPS "Compile demo with MIPS" OFF)
+option(WITH_SW "Compile demo with SW" OFF)
+option(WITH_XPU "Compile demow ith xpu" OFF)
+option(WITH_NPU "Compile demow ith npu" OFF)
+
+if(NOT WITH_STATIC_LIB)
+ add_definitions("-DPADDLE_WITH_SHARED_LIB")
+else()
+ # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode.
+ # Set it to empty in static library mode to avoid compilation issues.
+ add_definitions("/DPD_INFER_DECL=")
+endif()
+
+macro(safe_set_static_flag)
+ foreach(flag_var
+ CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
+ if(${flag_var} MATCHES "/MD")
+ string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
+ endif(${flag_var} MATCHES "/MD")
+ endforeach(flag_var)
+endmacro()
+
+if(NOT DEFINED PADDLE_LIB)
+ message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
+endif()
+if(NOT DEFINED DEMO_NAME)
+ message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name")
+endif()
+
+include_directories("${PADDLE_LIB}/")
+set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/")
+include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include")
+include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include")
+include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include")
+include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include")
+include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/include")
+include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include")
+include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include")
+
+link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib")
+link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib")
+link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib")
+link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib")
+link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/lib")
+link_directories("${PADDLE_LIB}/paddle/lib")
+link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib")
+link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib")
+
+if (WIN32)
+ add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
+ option(MSVC_STATIC_CRT "use static C Runtime library by default" ON)
+ if (MSVC_STATIC_CRT)
+ if (WITH_MKL)
+ set(FLAG_OPENMP "/openmp")
+ endif()
+ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
+ set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
+ safe_set_static_flag()
+ if (WITH_STATIC_LIB)
+ add_definitions(-DSTATIC_LIB)
+ endif()
+ endif()
+else()
+ if(WITH_MKL)
+ set(FLAG_OPENMP "-fopenmp")
+ endif()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ${FLAG_OPENMP}")
+endif()
+
+if(WITH_GPU)
+ if(NOT WIN32)
+ include_directories("/usr/local/cuda/include")
+ if(CUDA_LIB STREQUAL "")
+ set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
+ endif()
+ else()
+ include_directories("C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\include")
+ if(CUDA_LIB STREQUAL "")
+ set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64")
+ endif()
+ endif(NOT WIN32)
+endif()
+
+if (USE_TENSORRT AND WITH_GPU)
+ set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library")
+ if("${TENSORRT_ROOT}" STREQUAL "")
+ message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ")
+ endif()
+ set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include)
+ set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib)
+ file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
+ string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
+ "${TENSORRT_VERSION_FILE_CONTENTS}")
+ if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
+ file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
+ string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
+ "${TENSORRT_VERSION_FILE_CONTENTS}")
+ endif()
+ if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
+ message(SEND_ERROR "Failed to detect TensorRT version.")
+ endif()
+ string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
+ TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
+ message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
+ "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
+ include_directories("${TENSORRT_INCLUDE_DIR}")
+ link_directories("${TENSORRT_LIB_DIR}")
+endif()
+
+if(WITH_MKL)
+ set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml")
+ include_directories("${MATH_LIB_PATH}/include")
+ if(WIN32)
+ set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX}
+ ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else()
+ set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
+ ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+ set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn")
+ if(EXISTS ${MKLDNN_PATH})
+ include_directories("${MKLDNN_PATH}/include")
+ if(WIN32)
+ set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
+ else(WIN32)
+ set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
+ endif(WIN32)
+ endif()
+elseif((NOT WITH_MIPS) AND (NOT WITH_SW))
+ set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
+ include_directories("${OPENBLAS_LIB_PATH}/include/openblas")
+ if(WIN32)
+ set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else()
+ set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
+ endif()
+endif()
+
+if(WITH_STATIC_LIB)
+ set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
+else()
+ if(WIN32)
+ set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else()
+ set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+endif()
+
+if (WITH_ONNXRUNTIME)
+ if(WIN32)
+ set(DEPS ${DEPS} ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.lib paddle2onnx)
+ elseif(APPLE)
+ set(DEPS ${DEPS} ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/libonnxruntime.1.10.0.dylib paddle2onnx)
+ else()
+ set(DEPS ${DEPS} ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/libonnxruntime.so.1.10.0 paddle2onnx)
+ endif()
+endif()
+
+if (NOT WIN32)
+ set(EXTERNAL_LIB "-lrt -ldl -lpthread")
+ set(DEPS ${DEPS}
+ ${MATH_LIB} ${MKLDNN_LIB}
+ glog gflags protobuf xxhash cryptopp
+ ${EXTERNAL_LIB})
+else()
+ set(DEPS ${DEPS}
+ ${MATH_LIB} ${MKLDNN_LIB}
+ glog gflags_static libprotobuf xxhash cryptopp-static ${EXTERNAL_LIB})
+ set(DEPS ${DEPS} shlwapi.lib)
+endif(NOT WIN32)
+
+if(WITH_GPU)
+ if(NOT WIN32)
+ if (USE_TENSORRT)
+ set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+ set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
+ else()
+ if(USE_TENSORRT)
+ set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
+ if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
+ set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX})
+ endif()
+ endif()
+ set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
+ set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
+ set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} )
+ endif()
+endif()
+
+if(WITH_ROCM AND NOT WIN32)
+ set(DEPS ${DEPS} ${ROCM_LIB}/libamdhip64${CMAKE_SHARED_LIBRARY_SUFFIX})
+endif()
+
+if(WITH_XPU AND NOT WIN32)
+ set(XPU_INSTALL_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}xpu")
+ set(DEPS ${DEPS} ${XPU_INSTALL_PATH}/lib/libxpuapi${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${XPU_INSTALL_PATH}/lib/libxpurt${CMAKE_SHARED_LIBRARY_SUFFIX})
+endif()
+
+if(WITH_NPU AND NOT WIN32)
+ set(DEPS ${DEPS} ${ASCEND_DIR}/ascend-toolkit/latest/fwkacllib/lib64/libgraph${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${ASCEND_DIR}/ascend-toolkit/latest/fwkacllib/lib64/libge_runner${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${ASCEND_DIR}/ascend-toolkit/latest/fwkacllib/lib64/libascendcl${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${ASCEND_DIR}/ascend-toolkit/latest/fwkacllib/lib64/libascendcl${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${ASCEND_DIR}/ascend-toolkit/latest/fwkacllib/lib64/libacl_op_compiler${CMAKE_SHARED_LIBRARY_SUFFIX})
+endif()
+
+add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
+target_link_libraries(${DEMO_NAME} ${DEPS})
+if(WIN32)
+ if(USE_TENSORRT)
+ add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
+ ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
+ COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
+ ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
+ )
+ if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
+ add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX}
+ ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})
+ endif()
+ endif()
+ if(WITH_MKL)
+ add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release
+ COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release
+ COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release
+ )
+ else()
+ add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release
+ )
+ endif()
+ if(WITH_ONNXRUNTIME)
+ add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.dll
+ ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
+ COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib/paddle2onnx.dll
+ ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
+ )
+ endif()
+ if(NOT WITH_STATIC_LIB)
+ add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
+ )
+ endif()
+endif()
diff --git a/example/auto_compression/pytorch_yolov7/cpp_infer/README.md b/example/auto_compression/pytorch_yolov7/cpp_infer/README.md
new file mode 100644
index 00000000..04c1c23b
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/cpp_infer/README.md
@@ -0,0 +1,51 @@
+# YOLOv7 TensorRT Benchmark测试(Linux)
+
+## 环境准备
+
+- CUDA、CUDNN:确认环境中已经安装CUDA和CUDNN,并且提前获取其安装路径。
+
+- TensorRT:可通过NVIDIA官网下载[TensorRT 8.4.1.5](https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.4.1/tars/tensorrt-8.4.1.5.linux.x86_64-gnu.cuda-11.6.cudnn8.4.tar.gz)或其他版本安装包。
+
+- Paddle Inference C++预测库:编译develop版本请参考[编译文档](https://www.paddlepaddle.org.cn/inference/user_guides/source_compile.html)。编译完成后,会在build目录下生成`paddle_inference_install_dir`文件夹,这个就是我们需要的C++预测库文件。
+
+## 编译可执行程序
+
+- (1)修改`compile.sh`中依赖库路径,主要是以下内容:
+```shell
+# Paddle Inference预测库路径
+LIB_DIR=/root/auto_compress/Paddle/build/paddle_inference_install_dir/
+# CUDNN路径
+CUDNN_LIB=/usr/lib/x86_64-linux-gnu/
+# CUDA路径
+CUDA_LIB=/usr/local/cuda/lib64
+# TensorRT安装包路径,为TRT资源包解压完成后的绝对路径,其中包含`lib`和`include`文件夹
+TENSORRT_ROOT=/root/auto_compress/trt/trt8.4/
+```
+
+## 测试
+
+- FP32
+```
+./build/trt_run --model_file yolov7_infer/model.pdmodel --params_file yolov7_infer/model.pdiparams --run_mode=trt_fp32
+```
+
+- FP16
+```
+./build/trt_run --model_file yolov7_infer/model.pdmodel --params_file yolov7_infer/model.pdiparams --run_mode=trt_fp16
+```
+
+- INT8
+```
+./build/trt_run --model_file yolov7_quant/model.pdmodel --params_file yolov7_quant/model.pdiparams --run_mode=trt_int8
+```
+
+## 性能对比
+
+| 预测库 | 模型 | 预测时延FP32
(ms) |预测时延FP16
(ms) | 预测时延INT8
(ms) |
+| :--------: | :--------: |:-------- |:--------: | :---------------------: |
+| Paddle TensorRT | YOLOv7 | 26.84ms | 7.44ms | 4.55ms |
+| TensorRT | YOLOv7 | 28.25ms | 7.23ms | 4.67ms |
+
+环境:
+- Tesla T4,TensorRT 8.4.1,CUDA 11.2
+- batch_size=1
diff --git a/example/auto_compression/pytorch_yolov7/cpp_infer/compile.sh b/example/auto_compression/pytorch_yolov7/cpp_infer/compile.sh
new file mode 100644
index 00000000..afff924b
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/cpp_infer/compile.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+set +x
+set -e
+
+work_path=$(dirname $(readlink -f $0))
+
+mkdir -p build
+cd build
+rm -rf *
+
+DEMO_NAME=trt_run
+
+WITH_MKL=ON
+WITH_GPU=ON
+USE_TENSORRT=ON
+
+LIB_DIR=/root/auto_compress/Paddle/build/paddle_inference_install_dir/
+CUDNN_LIB=/usr/lib/x86_64-linux-gnu/
+CUDA_LIB=/usr/local/cuda/lib64
+TENSORRT_ROOT=/root/auto_compress/trt/trt8.4/
+
+WITH_ROCM=OFF
+ROCM_LIB=/opt/rocm/lib
+
+cmake .. -DPADDLE_LIB=${LIB_DIR} \
+ -DWITH_MKL=${WITH_MKL} \
+ -DDEMO_NAME=${DEMO_NAME} \
+ -DWITH_GPU=${WITH_GPU} \
+ -DWITH_STATIC_LIB=OFF \
+ -DUSE_TENSORRT=${USE_TENSORRT} \
+ -DWITH_ROCM=${WITH_ROCM} \
+ -DROCM_LIB=${ROCM_LIB} \
+ -DCUDNN_LIB=${CUDNN_LIB} \
+ -DCUDA_LIB=${CUDA_LIB} \
+ -DTENSORRT_ROOT=${TENSORRT_ROOT}
+
+make -j
diff --git a/example/auto_compression/pytorch_yolov7/cpp_infer/trt_run.cc b/example/auto_compression/pytorch_yolov7/cpp_infer/trt_run.cc
new file mode 100644
index 00000000..0ae055ac
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/cpp_infer/trt_run.cc
@@ -0,0 +1,116 @@
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include "paddle/include/paddle_inference_api.h"
+#include "paddle/include/experimental/phi/common/float16.h"
+
+using paddle_infer::Config;
+using paddle_infer::Predictor;
+using paddle_infer::CreatePredictor;
+using paddle_infer::PrecisionType;
+using phi::dtype::float16;
+
+DEFINE_string(model_dir, "", "Directory of the inference model.");
+DEFINE_string(model_file, "", "Path of the inference model file.");
+DEFINE_string(params_file, "", "Path of the inference params file.");
+DEFINE_string(run_mode, "trt_fp32", "run_mode which can be: trt_fp32, trt_fp16 and trt_int8");
+DEFINE_int32(batch_size, 1, "Batch size.");
+DEFINE_int32(gpu_id, 0, "GPU card ID num.");
+DEFINE_int32(trt_min_subgraph_size, 3, "tensorrt min_subgraph_size");
+DEFINE_int32(warmup, 50, "warmup");
+DEFINE_int32(repeats, 1000, "repeats");
+
+using Time = decltype(std::chrono::high_resolution_clock::now());
+Time time() { return std::chrono::high_resolution_clock::now(); };
+double time_diff(Time t1, Time t2) {
+ typedef std::chrono::microseconds ms;
+ auto diff = t2 - t1;
+ ms counter = std::chrono::duration_cast(diff);
+ return counter.count() / 1000.0;
+}
+
+std::shared_ptr InitPredictor() {
+ Config config;
+ std::string model_path;
+ if (FLAGS_model_dir != "") {
+ config.SetModel(FLAGS_model_dir);
+ model_path = FLAGS_model_dir.substr(0, FLAGS_model_dir.find_last_of("/"));
+ } else {
+ config.SetModel(FLAGS_model_file, FLAGS_params_file);
+ model_path = FLAGS_model_file.substr(0, FLAGS_model_file.find_last_of("/"));
+ }
+ // enable tune
+ std::cout << "model_path: " << model_path << std::endl;
+ config.EnableUseGpu(256, FLAGS_gpu_id);
+ if (FLAGS_run_mode == "trt_fp32") {
+ config.EnableTensorRtEngine(1 << 30, FLAGS_batch_size, FLAGS_trt_min_subgraph_size,
+ PrecisionType::kFloat32, false, false);
+ } else if (FLAGS_run_mode == "trt_fp16") {
+ config.EnableTensorRtEngine(1 << 30, FLAGS_batch_size, FLAGS_trt_min_subgraph_size,
+ PrecisionType::kHalf, false, false);
+ } else if (FLAGS_run_mode == "trt_int8") {
+ config.EnableTensorRtEngine(1 << 30, FLAGS_batch_size, FLAGS_trt_min_subgraph_size,
+ PrecisionType::kInt8, false, false);
+ }
+ config.EnableMemoryOptim();
+ config.SwitchIrOptim(true);
+ return CreatePredictor(config);
+}
+
+template
+void run(Predictor *predictor, const std::vector &input,
+ const std::vector &input_shape, type* out_data, std::vector out_shape) {
+
+ // prepare input
+ int input_num = std::accumulate(input_shape.begin(), input_shape.end(), 1,
+ std::multiplies());
+
+ auto input_names = predictor->GetInputNames();
+ auto input_t = predictor->GetInputHandle(input_names[0]);
+ input_t->Reshape(input_shape);
+ input_t->CopyFromCpu(input.data());
+
+ for (int i = 0; i < FLAGS_warmup; ++i)
+ CHECK(predictor->Run());
+
+ auto st = time();
+ for (int i = 0; i < FLAGS_repeats; ++i) {
+ auto input_names = predictor->GetInputNames();
+ auto input_t = predictor->GetInputHandle(input_names[0]);
+ input_t->Reshape(input_shape);
+ input_t->CopyFromCpu(input.data());
+
+ CHECK(predictor->Run());
+
+ auto output_names = predictor->GetOutputNames();
+ auto output_t = predictor->GetOutputHandle(output_names[0]);
+ std::vector output_shape = output_t->shape();
+ output_t -> ShareExternalData(out_data, out_shape, paddle_infer::PlaceType::kGPU);
+ }
+
+ LOG(INFO) << "[" << FLAGS_run_mode << " bs-" << FLAGS_batch_size << " ] run avg time is " << time_diff(st, time()) / FLAGS_repeats
+ << " ms";
+}
+
+int main(int argc, char *argv[]) {
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ auto predictor = InitPredictor();
+ std::vector input_shape = {FLAGS_batch_size, 3, 640, 640};
+ // float16
+ using dtype = float16;
+ std::vector input_data(FLAGS_batch_size * 3 * 640 * 640, dtype(1.0));
+
+ dtype *out_data;
+ int out_data_size = FLAGS_batch_size * 25200 * 85;
+ cudaHostAlloc((void**)&out_data, sizeof(float) * out_data_size, cudaHostAllocMapped);
+
+ std::vector out_shape{ FLAGS_batch_size, 1, 25200, 85};
+ run(predictor.get(), input_data, input_shape, out_data, out_shape);
+ return 0;
+}
diff --git a/example/auto_compression/pytorch_yolov7/eval.py b/example/auto_compression/pytorch_yolov7/eval.py
new file mode 100644
index 00000000..478c4e1a
--- /dev/null
+++ b/example/auto_compression/pytorch_yolov7/eval.py
@@ -0,0 +1,151 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import numpy as np
+import argparse
+import paddle
+from ppdet.core.workspace import load_config, merge_config
+from ppdet.core.workspace import create
+from ppdet.metrics import COCOMetric, VOCMetric
+from paddleslim.auto_compression.config_helpers import load_config as load_slim_config
+
+from post_process import YOLOv7PostProcess
+
+
+def argsparser():
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ '--config_path',
+ type=str,
+ default=None,
+ help="path of compression strategy config.",
+ required=True)
+ parser.add_argument(
+ '--devices',
+ type=str,
+ default='gpu',
+ help="which device used to compress.")
+
+ return parser
+
+
+def reader_wrapper(reader, input_list):
+ def gen():
+ for data in reader:
+ in_dict = {}
+ if isinstance(input_list, list):
+ for input_name in input_list:
+ in_dict[input_name] = data[input_name]
+ elif isinstance(input_list, dict):
+ for input_name in input_list.keys():
+ in_dict[input_list[input_name]] = data[input_name]
+ yield in_dict
+
+ return gen
+
+
+def convert_numpy_data(data, metric):
+ data_all = {}
+ data_all = {k: np.array(v) for k, v in data.items()}
+ if isinstance(metric, VOCMetric):
+ for k, v in data_all.items():
+ if not isinstance(v[0], np.ndarray):
+ tmp_list = []
+ for t in v:
+ tmp_list.append(np.array(t))
+ data_all[k] = np.array(tmp_list)
+ else:
+ data_all = {k: np.array(v) for k, v in data.items()}
+ return data_all
+
+
+def eval():
+
+ place = paddle.CUDAPlace(0) if FLAGS.devices == 'gpu' else paddle.CPUPlace()
+ exe = paddle.static.Executor(place)
+
+ val_program, feed_target_names, fetch_targets = paddle.static.load_inference_model(
+ global_config["model_dir"],
+ exe,
+ model_filename=global_config["model_filename"],
+ params_filename=global_config["params_filename"])
+ print('Loaded model from: {}'.format(global_config["model_dir"]))
+
+ metric = global_config['metric']
+ for batch_id, data in enumerate(val_loader):
+ data_all = convert_numpy_data(data, metric)
+ data_input = {}
+ for k, v in data.items():
+ if isinstance(global_config['input_list'], list):
+ if k in global_config['input_list']:
+ data_input[k] = np.array(v)
+ elif isinstance(global_config['input_list'], dict):
+ if k in global_config['input_list'].keys():
+ data_input[global_config['input_list'][k]] = np.array(v)
+ outs = exe.run(val_program,
+ feed=data_input,
+ fetch_list=fetch_targets,
+ return_numpy=False)
+ res = {}
+ postprocess = YOLOv7PostProcess(
+ score_threshold=0.001, nms_threshold=0.65, multi_label=True)
+ res = postprocess(np.array(outs[0]), data_all['scale_factor'])
+ metric.update(data_all, res)
+ if batch_id % 100 == 0:
+ print('Eval iter:', batch_id)
+ metric.accumulate()
+ metric.log()
+ metric.reset()
+
+
+def main():
+ global global_config
+ all_config = load_slim_config(FLAGS.config_path)
+ global_config = all_config["Global"]
+ reader_cfg = load_config(global_config['reader_config'])
+
+ dataset = reader_cfg['EvalDataset']
+ global val_loader
+ val_loader = create('EvalReader')(reader_cfg['EvalDataset'],
+ reader_cfg['worker_num'],
+ return_list=True)
+ metric = None
+ if reader_cfg['metric'] == 'COCO':
+ clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}
+ anno_file = dataset.get_anno()
+ metric = COCOMetric(
+ anno_file=anno_file, clsid2catid=clsid2catid, IouType='bbox')
+ elif reader_cfg['metric'] == 'VOC':
+ metric = VOCMetric(
+ label_list=dataset.get_label_list(),
+ class_num=reader_cfg['num_classes'],
+ map_type=reader_cfg['map_type'])
+ else:
+ raise ValueError("metric currently only supports COCO and VOC.")
+ global_config['metric'] = metric
+
+ eval()
+
+
+if __name__ == '__main__':
+ paddle.enable_static()
+ parser = argsparser()
+ FLAGS = parser.parse_args()
+
+ assert FLAGS.devices in ['cpu', 'gpu', 'xpu', 'npu']
+ paddle.set_device(FLAGS.devices)
+
+ main()
diff --git a/example/auto_compression/pytorch_yolov7/images/000000570688.jpg b/example/auto_compression/pytorch_yolov7/images/000000570688.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cb304bd56c4010c08611a30dcca58ea9140cea54
GIT binary patch
literal 138365
zcmb5V3p~^B|35rulbBHE+i@z?~H~WK5`T$!vo2HfV;o1;E=dz2QM$Te`Fx;LK%mZj|4CcG|I-g*+Xv(R*thqe;>6_9Kp+3Lxw)VR
z0?E)pp!LGd&DEOC&GlNKj`twYqll3B*o1%b0?LT*1A+df?>{NZj=_;(vB9w4!{QZt
z_77r2*#G4G{q_HHH!>m~XzxFlBJfu}EXvOZ_|^s5SrHa@=(oHZsLCxg*a>JW2nEPv
zpCEG#-$FXT@=G2|mBeGz321P6y+>fNx-2#kQY0>-swn(0}54FA#izJ_M-)>45C$
z0m$0{`5DL+6by<4g@Iy$?A|JK#(-);Dpu515yT|sdPzrrp5Q{Qh*
z!m>S<{$0W;K&v>=8Q?i0C;^Zofd9gP{G|WU^S^uVf3^CLCcpFl*8JZ+JV5I5?MVJl
zeIt^cfjoJ3^8ZWgL;tDE5%lTbBlO=bI{Tk;Kp0@GNWdrLKXv_gz5kW-ze@-M>iyqQ
z{#UR217i;Z{j1&xm>N)8^#6=O*gq}){az6Mr!EjFERh7fM(N?GPnpm2KO$$t&(q-L9evRoSNU
zyAUx6;2B9Nu#^;7W$Tu$D*wN)%|{?5S+Nh|X%b>eAaNxz2_>=3XCN(L@`y?OUgrPz
z5)+pONl3}aN`mEp`yLSBKA^a`q=b~Tq@*NBQA`|Yf)Yp?Dx+hj>>#_#J6I)#vJJks
zuo{fM^LXgTc3u3(eW%jrRS}M{JM>UKY9VnMPV}Oh+9zC~JPk1r&=dbv9WY77r6eS!
zWn_W7j!Gagpeo>!k_2j%1T+IwmC%udnmI^$2gfK=3V%rNs=lML_wmrjZE*BFUS{8^
z*mU}KU7wmKT$E!*>*g3pUgDpJlt6Z%uNCH6quR$WBscFrSSM}UjbfRLDM-kuBT(kz
zRtl|
z1%{KhKCjA^7GS<0?LE^-E?K+eq-0VfEnHju9faByD2hSm&9tO*Yi&3=d>XmUWmHy0
zb?BOD32(4H*U&&kVy?yq=hJN?bfx&~H>yb2ctveFW4kne(1tfwjyy>l~JJ8bQaL$R3<#xoF?
ze#N^1=XHLb>52M?%sa=~dXvgk(WDIN`*txyHKSQAa?@QU12rZ(-EnR97E`X!0IOA`HeV*jZILbMeBb1UaKW`
z@)XWz#7GbI`TG??V#v5tt$k6N#59SQbx4M4(il~g&FkqXmmQLJSegD#$K)wSDqGfr
zSY{dHgX11}vud1I>>R7ryY+-y=sGHb;ehiB7t~B=-pp`fa78DJ=IRYCoN6`A3!mhP
zr9Y=-N!+umUtMU}al~{a%Fkav=gBXL_lk>~ARANJdycPI$VD~FI8qK+PElQNy8M!V
z9@$JpoUS?aMFAAfw`r>{)r}}D>Zr6l=dj(r*L-=Ml)*Bl7^ps}SRZ_=j*xzWSM4>U
zS`@2d9US|y@(KZglgW)gh0tZ&wZ`_@@}h)$N5-6QUQ9Y~mwidiPtHd=
z?8()`Zk@Yh)shtT?>`M0zf`4pDKWma0IRMU-jM6R^;{N?2zjfl_iWHYBBe25?n&fK
z^`8NEZ~5P~p*#q>+`rQ5@G3p!s`?AFwH;emkTP1&*1z2_EQ`DK>*}{5(O}cND|4Cm
zx7?x6d%N%Jn7<{zm+rX(+UNb`F0Od-B(D0V76*
ze1E@V%i}AqjE}U>+tM~lr)K7Fx(EDmF+a`7GJfWOx<`~~9dt}Ca9v6h+XkWo_2y^tTfi|)FWXSnYZ>H_yk+4jSHs_WXkppSf$z+
zLrPZ4ZF658mj>nYC;8Z*zFb^DM+!d@l~z!qYCcbezk;{QbXx9jy^@O^>>%&0MS1{*
z`!v+n;H$K?$u2tmNXbe&9xixq!OCDpUHUoKJWX2%by&bafkO+{nWoCyE_-0RJ*~L_
znLB}0cQDEIC@nB885dAxuVG{AN!-ep_9N0@9EL==lA~1d7m1DP!`d$}YoiNjym61S
zp=W9W$-;hbC@!#UrI#TQ>Vk{QSmGAL82OU}rTLAXg=>py*Xs&fF=87*W6*2%Q$Iz7
zOuFggbvt)le^fy}pNCZ-;v~cEhrmgjpm9%ML-rI+-4eMqsvs|8(awRnyAX*TFWEu0KPHigc#D%`b;QSX573t$QvM$uxX4b5AisXto}Mm%R%?1>;TH%
zp;hwSPm~T^u}X7&fXa#Rg10B99tW#bM<;6#BPb+hf-Y6r65imDT`8cdR~d!H@G*_(
z&P`BP-ZN>noV_XKPc|w%`P%L}aR|4(8jBmMT6R~;3^apvg7{~0MgRlKJ!zeE7y0UR32kZ$f1L$k+^&?d9mA_cm}D5(Can@U
zNELWsS;vot!I-tS@}aHJsBMi5tV`Hdes|KOo4w25F4zFAcM{#UW2L!o@U6?UlY)k<
z9dTHPby9yC#ZuCg+&@?I+-dQ+aa!E$@|)I{3c{rR+=jZcw3o#|mYN@}IroU37hHcF
zoioE&wmglesKLk4crUniT($Cc*D17viOnYHO6dgzyocuwrD!Ejy4dD=rjrPapE!)-
zNz@Wr3p~Kkl{;bah{tnEaNSs)Mh;N+=qAMB#{z))?7mnvqUkog8;2fpJfJzSQ^xNj@0=+>`Ru
zHESo|PWH?uD9jkG-t$!Y(;q^Isi&~~!%Kttq9Qk?uU??138zOj9;QvK~zPqXJv$d$=E3G9#h_d
z^o(0xC3JzY&MmwNUBz-pW~aG2oNiTxNlvXx%aom~jD%j*(NWV}BQD|O5w`pIKh?wb
z;wwZLfo|{KnED+!xf)saFwtqIjA>|1d`~DLna0f3l3=l(?U>8xq^jf6m
zM#$`(XVrM+B|6M#K=$J8UBRw)_oC}+BgIz;i5fz
zzScUa9&lmn(o%z2B>HmSTwQG}V|^3kSYuNBN#K#ZF6@&0TZ&^#ZXs=9x~L=I{I!R|
ztaMZt-fXI6t?h|IW1S8}f4nFq)swd#FTK`_yM7a9B-8Qh+#7d)tz4}3$(S{v{xHj8
z{+pfqe&w48_%|^TA;-atbgSYS0ZZSt1825noPYEsS^QaL{W$tyEBe_K8g$6RlaDTN
zvntoki^>3{j1lP)oZ$PS*`>chnaXn}-jkJ-Y%^GS*N9
zH`Q-=p1$XL2fSZ8UQ-|Aun&87$Z5F5s@EB!WFm%ua-M^)F7x^O0p)QDl>MY~_{THQWx
zf|_p%k3QrU@_WiXL+`PEVi%SMsx0e~SU9^JcMf~>BYe+UySLq$c`xu*Q@yqmLWw(`
zUJU9^L=)cZ-6qIbv%p^6C@5*FqvOlsaOm}h>#%@sRwM)+7``S=KP2AcsSaT*4^W{v
z=ZcAqQmbmXxMxvqBOZs5PWnuorsKPvg_h!2rNYIbnoz{@Kqf3HM6fYm2aHNWFG?jH
zZ}YIbR2g>%)oQq_l1Bqj(I+QTW6b~^qG!jW^aO&w5-qn%}mX2XBr>g)o*IHDAQhw
zzX;|s@~=2*mlSdRzuD%-EQk~+UBFuv1k6k{6Z!gROE-=HA5v}iL}wl)@AE2Ab2pQx
zhigxwCs+2Ao5Jk9_1wM}+`cnhH=`eopYPXN7a~p-XGcx+7Jx0Gh3}TiOsdx9sjrJ=kQ=PcEh%m!!SG
zb3MjTNGS)pyh}21Ra;%$0l5iEJJjvWwvmw?KK@NtGlw%6#&(I!_b}&(LjLT=
ziAbEtph%_$zZd0%IN(rHGR4FZW$h~8x;4C4%gnQ(m7`uS`o9bRvPve=WzE)8jZs2e
zT*WANPdahPO25}xNrTC`7@*1L6e?-lAW{f
zKTl6r^vm*Qyl#lft#ouWg#|a%HbKWNa1eNR3~N+wR89N1tpAys#?nhH1K#kB#L-|c
zFrx3t(%L|(yIqN6eWK{rE4Y-Kqbf<)Bw=Rj(yZ&fW+PqH&=9XB6FX8te5!Z4VaY2PUK^#{8PuQhzR!MtQX
zdg-zM&-{sTjU*%UTn?_vf-Y;bt)N;b&^poy9#HJmFE*n^H2gGSlYLaj!t+((4MV
zyT|+|vgD;(StEYTvR<`*Iq{WK53`r!xqfHVykFO^IG7!5{VBI%9Uv_W^>@{>+HmyB
z%IL!H#cE&4yjif^KR4-0aP?J;*aTfHi=foVbd0MXtqv@>;LR9(^=QXn%TJkK$|CY2
zD70@h^r~h+d3I%UtHKQp)=BVuSrYLDRMC7OqsDkIMQg)_Lq*{*GCUV&QucSVNxmnr
zvpc`-rstPd?dW8;XM|4D1LYN8svkv6C`PQVx3ysp$gG16Bt@}_CSgVaY(^tquvE)p~L^}a7{qa==z?wTBwy>2;{Q1`%Z&tlPm~GFf`~!guAeI83VD9Uktv#JhD;p$&fT)QX^L!Jd|!
zQVNk|!5@i;utnFrRK5eIKW2QcdOH}~C)y|_s2hcn7iwFn9HabA&`V3~2vWP6Xa5Bm
zQ;o9vOx5JMvd0JCqNSec8ddg0f+ZiMtZz?kZz+&b?D4BfTB9Q`IdSiJ|ota1pW3
zDN`1hfyMNY_KF!Z%c_8O>nW@xfzg~gSFMbID?HT#-do8Du-s(J#?d6W6EoG_7{Z{{
z3wf9huxSkc=R#dn?TFF2x!HzM#NkJSWG5m1%@-Z5m_^a)qXyq^L1NyeWP5OHZfN$@
zWelNMNvqR1d{OwQuO}JzTO$
zmwI5FvKwfX^Eez{lO5l-p|P)G`yv=QKhsTJEqhg!H$e>QvFPTl>v~(v_6Hr>
zIGwpJb;a;OoSK*8aRzB4?07^+-23&oW@_95Fj9G|R1^ZP-$T}o$2v!7+mg7BYTp&V
zGBf=WX~FsvT;N=jY?81|
zBDfh*l887m2M!pm#$W_2VfOv1CQ2vGvvYNtAz9yE3Tbhx)p97nh8yM^ou{w(teDO-W34
zEcUT2-yM=`Piyy}dN(<)ZLrZUvY>N3T^${-xXLM36IMyIx->MZNe8T0s*;rSw9+^`
za_4b)6*ge-*R=P<9wnprxu4vj{X>D}uMDhJE_s
z1fjIdN6uhHFYu~b6+(xB4Okvt)+k=pWwY#=`3}9?%
zMKuaddV&~M6A`z>i755=yuQdmxfm(hv_&a@W86+v9g=LArUZuD@i2{wHGzt-jV_c!
zmxu0pcZLH_;nKAv0kLUo8xCXB)0~OMKb)uTSF!rMJb>D{D;OEHVdt*A0|zmpOya`S
z5k~y)DP1Pzbkjb~wqf_ThlDIx&yZKDgHjB8nyJ2RxByE64r4v-q3k@Y9rU)=M0vy#=>Zdw6Jw
zB_UI8$}#}l+j#gKkHIaZEpkVsp9rvzdrOaVTV|)w2XF!7nr*xO7AU5!5}DXh>h(s>GO{{A)%UHaEb1UcF!(!Cx^n~c2sI56P#WI>D$_lDIsP1&26$5xkD(~xfOC7
z&ann@H%vjFH|$ufFh`L!k(0>&jRSw`nx*Zn4|re-_llm$Itzj1`4
zh@zon`$PgEPJo@AT_Lz2)wG)nfR|t$rBcvql|&dpF+;|COQEK-8)`!ae!5w;evkZ4
z1TQY@e{G;h(pw+S!uHp|XKsBsO@5>MAo-tp3GXOJx+7J2XblHo0(}AFe$CUG>3O5b
ze(gGzjMJ7TnM(yWtID*q^{cByrEA!CM~=ZeQd7y;Z7Kjvq0lfQZg+)R_@L_LIGgW1
z=G}$1s{E;5LP9)}V4rQztVW)AX?b)40xcfCi+${P_+Ew7r@c9U=Q5ZHOKTfl
z4!aoX*5r7TMm1c3ey`a&YRP*qZu(QVbIdK6|MX`HO&=7338kydQ*l1UXge~}e#FN_
z=IdOIw``K2*3Zp~Ap6^WXybUnyK@f}81db$5b$w#n}S}lP}dTnKR1q!#uv`6qhPEh
zp1r-|?Wu7WvrxIR1nn>N$RZ?e3Y~|5qfhl?RllVggIPwi-%$XnqY6tR&d`Hw?n{UW
zjJ}8T!l{8QRa{bKB)Fmx`*=+VQ!%5H3K7zpYg2P|xc~xS(}oz;lPLlgr3On#LH^1{
zYNQp@5&>Ud73f8k2$ndMAw8@0?lGcdW_U-Bus5UM2EvZ|3pCVJFvG~(2qMM19%I?`
z95<>$1i$+*t?rOYy;aJ<*gEuDrlD=7$58swwzHr1xHIF(>^IYIe-M6*aQ{6u#5ZL6{P(5^t?s13~ZQU#rj*
z1Ea)4ZL>qS4jupF!)@^z^S?ZCdRJ>i(F+NAhRdg7_r~sjd-8`nO6`RDyyt&mNb`G=LqL1MRt9y?yYMteQ1ExdQy4tnxe%-W8fkG5MzrTo#Ea7{B_
zgtQ&`Ra#xish>~*JA1LA{zslsRd6a{e^s2)>yJ9I+qPNSR3XXl&BtG*JO9wi?ms-{UyILZ9f{C-P{RDB?b%-XxHT{|v2T@NAgi>p)mZ6dadySC)GJpS%nVE=eu@OI
zs-AQ_Th?z+tt-t#*sLXTDjK%2zbtcdx&p)1Ho71e!J790lXTn0QJ;Pc#AxjA=HhEj>8i~mJSjZ}j?HIx!vn*+KvX_Z!Jyy-0rS%b&4O+TX
z2Xb9~St-}n?y9-vIus4Yt%P_a9O@OwO?m7RI56ba6$+5LVQat|;TjLskx%~1?zr(6
zakOOkT0T`=AvbXe-VwDfeBONJjl)Hcq=X@vP?NmLx!I=0yVBcYIMgozUxA4&CcAA=
zBzO^wJn>_Rhj+-HY6LcPg%wMLu@{>^jz$eB4)Wb)wYAt^selE`WYsl5Y_9?EiGtM>
z5bR*JN;E|
zbdJIGI~F;Wp*Qzc@5&(9TXPGS>;}=YROn=TmdehbY#tg~5H0g-n(@7a-?;(c0MTRs
zNX*g%;0h0mddbs;*aVlwX?<>jM8H+efN;W2U
z*@dko^QBxi^QQGmZ|0(-DhG!?L@(|o-Nkdccjx>>X5llX&ux0m^}9ISo7?Z*@cU9<
z8)NknE^=t;wSNpYJc34|!Iu5mKc+uP>%3BcUzOpKY>M{Zi9AE}zIpf+_rt;2K8jfw
zsOV$+Y&eVF{yZS;c8|}qCl?JOsq){x6m~wfx+467*l`)Ovq0mB+P$sc{7plS?#x~5
zo_bj-tdM!0qWy6sd#R~xDXz))bx2Yg>TmI>udGnNVl($+k+=8n{^>sBaAbG9Ui9NI
zedGSCU*h_piNE^Zcpqs4ku5a4jtz(f4bICv-TkQ(d-Et!>7dx_7OIAH+Q5U3BkQku
zv!fy7s2%sq>(-2la?9CW#>cKyR7SnId9bUsTCn?1i?tB>
z>P12r#>Fpje(+S4*4Ks&eZ}0B7#k@~!J?3W!)^_s=;$1Xh`
zu8Q+s8EreW*6>3+yR$^={Urxb1+Z(w_V~81%}-0O7~URVGx!2x`3~P!
zIm_3)7u+ffORq|o?Tx20nuZBUPWc~6dA}5?wgoqP|3vbp2&>PhB7I9=c12oI&yO2^
zZo*qgqvA~)C8oKUrc`u1^>v)QIr{Hk^8Ysb#`2iXozmY6!6J0q)+@_W(ecPDfiM+HRn5mau?@*?I$9z{(_A5oI>z!KHg;
zw%c}#F#QyRgK0O`;9uvKxzO|?TH$OX8XID<%yZ~cU7&1&$Wprsnp5p8iH2<6iogvC
zc4ga&;zBP4LhSZ5Q%Oz;LlV}z%P}vdf|TDl-Eh0RGE{|0j$09t(^G5l3yH^CDVz
zEmD$sQt3Uh5Gl3;iTjXRca@eBMphYgHAHF9104Zc-Dn`mQ
zVQ(hq_OCcswhZG6x~3~EzVB!L+1occK(}f#OtsmMY^%;o;Npm0Im)#+?aXpW<#ppK#i?B}71iyV4jN{_n>%8W8wZPJKU`)=pH6UJbsbU|C}
zSMx$`zYF
z6TJ0#Zm}{_mOWK>O;%}5h!8`{Jf5wVDaG%#9V0lZt_l)dYb4Lv+@Hp$72G~smx(Y+
zHyan!8}9OAw=?RXApnkrSK{lV!Bl)PC)2F5ITKWnLDL*eCcZd1p%WxJiWsg6keI87
z)=Ns7e+UNxEG}D)8+a*3gKhu9WRR7>LALf&`mp@JNL6ULA4k6VzixB+bKD?O@^eX5
z9X4=ka6ranV|{f7^XY*9yXHO5%x=j)dz
za`+pS!jFIV56?H&&b9k7uNto5pOzL49=@UMw<5Ia1%d-f6y%viGv-0N0bJ5Na`0PIq;sh?G=p#2oR65
zcMfHSZ-~68c+4-Bwhgd2fY?X56tq`yP$cn^rl%;<((2@ne
z=hE$D0t4pU(OpW$)6Qzwl^!qXl_{#r8I{;jS3+X@Es0NpHbK{h(~q&>0BA4EU~2Q$
zP`qn?Y$5k|sG}6Jo3Uhbu~e63M0sCWZN!3iOjS!EgVrzF2=z%1lEMWn11#WQ1c~Sn
zcElD?ofz0h
zOI_pw)Rkh(hZ3sLqIC+-ot}^<|0(3+1c$>{&U4bkeQ!;5M2bQ5@GYx}
z9O#J7i9!2A+?Ia$%A~sziIr+i1OgZ`K#oa4lT_wWZY<6QId*6);SLU9gi
z3zNX25siJ6{6;2_OFYjcvwl$990*0Gbv=C
zG+57SiHE)XBFLtVXP^IzlUcvONn_A@B3*W5GRbX2R)(OZA0E*%Q{(EF%iiQ3kc$Xu
zg7kv*Pg1l4X@djTvC&uRmMrpjCuVQO6fjc9)#;f(aY)%wJ~(ocC(fj7J1ddROoS_HUc4V8<o3>(RlnM2Blnh--RS>KVp#E>Tf@yqYU1KCeU0p?Nz1;|
zbX$M=pLtftHbDtDoX=jLU2O`>4OP|imHCzP$@8<_+ryin)ocj}|Mrio=D1VXI2vFk
z35yop?zTGobu|Stsa0b^j1ev>#%qVl+1`6&mT@NU4PvUfqSU~*_YE9wi}@M((4+p<
zZKRA%zwYPHWtI0v(m=9Xoay*`{i}(*X(oea!C_Pb{s&;3%iiS{kndNvX9nS0Kx9%B
z745J34b|=>`5Vc6o(cT^4m+TVD=Wv%fm@E2tBL(u|kAN&dF6^R-Skj?v9&|5lwsdpa2N4
z)`=6$N7I~`C#Tj(yC6o3y;Lv8?XNW!uxUmkyZdUUCZw07lI`mUk{V1)@gfh@{1IEX
z8v&Q6yXa0?h^D7Y@Wn;B^DBZ`Io(4ghkz}<_r?`YSF_DpI517~u_4-mYF*jE_RR12
z!n!gwSK1Gri?_DJB>DoO;CAq`NWdD>hoSQm*gQ+ff!i^natpKyc4{j`7+Nf05cEMcQU2)9HwEL1VDXTL<2sVK))%-
z?&VzlLA0u*e7L$C3LH~dgK1#Y^Viz+7jo!zD8skAWQ_e|OywGv@vE5!vX#K{_@6AVmuf}M!{ehk7O!p(O{c|ojVjiU0BpN)U>;x8O5!@
zDvjIK6{+TWK%RGESZBDS%k`1m6Q}fswm>q{M4`?cBk-MnK#y*B(-w-JX^n!N^mt
z7(2?u7tT%n|NNFr%n9;@}oBr
zH)*rFPSSzPCyEkzE_*c2sjj0=Qz4T=-jJ+PWrL+tH8QYrc?u0}U7oK!19vKmtA^aj
zrhTafV+YGoB%Fi3e&;4=tDNO@w)WZPOpiXLmp?hBK{okG#0F>7gIKFX?u1zq00WS%
z(Dpc~Kq^F&PkX5Qibv|Oh6*S`DbbqylH6a(7xY0#~IcQA?VI)@yFyk)h&;TM%
zMdlU+o4+9+mGqbsf-30MjuL1Pp}
zOZ0oF#?9yIGCQ@GEEO6YFIo|XY;&>w+Q(7N1^E*c4y(cb_Efo?X^AQTKSQ@I4H-}`
z;qTd{W@
zUhm4*u8*r$mt=WTraT+bQug!Xm_}xpXsg@-Fm7o*jut5w02l~U=%a)Z2^qenrS1i$
z?jE}B#G0VKO((B|kI8YJoXrppfz!Quty7!nk`l2fT#(&IrVlPa<|l{2GU9ix
z1IE^)^%P*h#UHK_aFCDHvfJjy2Xb*h_Ah`M(@LL{1Q{(Z$RPfly11^+(<>k;TFdf~
zDdiT=@?^B;dcW4x?2A
zFsMO({ZAp!4PhkG?k;2P)M588@qWh^Dnzjb(~SIIJa+|iOX#Q(aKj%tmm2+h?KvabD(MMI}KAe;%Ft|!IE!!^Qaq*rL+L1sIZv1Oam_$=e
z?V_bhT@-_AnzIuD@6F75&27zpH(d0+fGP%s^riyb-}c7C3B8PlSAaQ}Czw&mfdES#
zqpB)du!!L@@-&g2)wqB!tKsj7MXluEkeRvmCc8l2AY)&QxFrAbJ)g${=w69j-5?9i
z-sIIYQaGVt^luyAs=}IuvE@VgE6#|p)|E4z;~#UI5Vx7PE*&+mmB|fn^f#@NR9Y~%
zp+)+9>RjIXxO4BrlbSDm1Wk7}^ml$Z2%QOQn=qXOX>mhWq%y}J9vNL7jPpC3Fe0jq
zd5=V(|)1qvHb?7vsSxlRUMuBDf^dE7k0sGeC;!`h>&W6t#
zRlMjizS5MGg?{sFxX}IA)hEt^=1^*hSl%^S!EOz$-4I3-+rQB%+jMnd-Q;EqW7Nys
ze0S>86VH1mXGq&;+`s;rDq$QU@N`LTyIrmF<*MPI_!rLm2hQ8CVv`-gc{hRv
z!n@D){I$lo@!@CkE2D->%k0j+kry|SF_jTX#A@*>`AHE`55>6oaQ#j-O0Hr8c%yOn
zhYvPNT@rB4yxvmThxFFZOMt^NviFw+`>p-M;mgcX9nJe>yDJp}XI)lwsykSWBY)
znx2pZ72M{b^FZ110HxaXIK&7rg;eA|Z#-P6Ryv4tE+f|ZvTWwxC2$x=keUk{i36Fs
zMrBLOb4AtC(|cD$f+*_l6581xtAbiz^8^5IMR9uR*lqomNxG0Q8Fr*BKmsTCR9tnf
zQGKL6S?e@eYVCGX>;1drp5Vpd{dmZJ|4Shq?RCB;5F=(hx>=v!O~qeE7?rInROF11
z%pQ-p-D)$N1%g!&xaQilH}BBl?+U0dE(+!gzX7AUN}dy|3E!P0E?rhFPh>R(Ds1f6
z&4UHBXMR*%N*PnKLKx+XP6Oexb`)<+fbCxex(`LiVUmCG(1C!u?WGlsII?u9BVAHq
zav)R1j1O#Kv*lnx6TWBAU7aE;@l3
zvrL~|hl;iB$WSU&yX4;Bml`iOwWIJ)-=B6VJpJ=~MP=mMhl|^vpJ$#18yFiM4@4e0
zEjgbDx2x@Y_R20(L+gG1`4qdSjg!-%i3i^wp4B?j_$&3Y-^au0f8V`*CUbiyHhMUB
z?MY-p!TD`<2O^SatlANimWAg}qscizcfH*HC%@SgayZ~IjIbX#I2IP#BAKSG`~+4WX!JFIxX{5>Q}
zV$AvAvs=%lQhxopNFxU(&^(&-$8k$ruy=ijx#O3)hn#ok|9J5IW^<80?n%%dmGp!D
zPqkc=ekAO?^y9szng2DDgi|XJ2R+nIdzmBMFRxB2);^=2{`Efajr3Tj-Jfmh6<0uK
zBIkBVCCEJWV|ySWS?_fBe0)%+;KxdC4J-_zm(?7}y|EM}*6
zv~ZmdeYiJfF!$!tSHH1~rC-i8J81fKwVUm?o>8zT*<6?Dv~~C{YqOZoI^YMHH;hQS
z8dkSm?j`qONxMGovew|8Q3NJnYhIEJb-dO!N)|9rJi}GyfL8{%=E^
zSBTp3oA@l`iL+!2v?SRSr6Z2QT|fYSZ#rHkrpCwqcJO$20TRM!MxFMSl@f)w@aQ@I
zFK90Xb%oP6Xeq$Ci=8aM&rBH7t@Z7E3|P3b69#So`z=%KwSpyHC0P94{CAtSAux7s
z<0zEMLGn8}%IZ}BKiQI~<)9O&yw-Nf{MtDC(N=(V*Xv
z%u;Vl1hO8<)|CrYUJ{+>F>0erR0qmfRrYyFsm%aRV>CA>1hzsT)=r=d2GmujR`
z5elxhwg;>OP*4;_b#}JyawaqNfdHrb9Uv%BU46Nex)3{KW2J+w@@cfI3ViQ6m6|vH
zc}i2iAd{OQ16~oc;jpUMzRFV|FR!aqn7P?f5&(I^(^`IEL${dUq8M0DZ({0-8R=u_f&yDPN_
zNRAq?Pq+KM1LE57P+4X+KEUFQKVXhov}F>!gV%}rklsk#Z$RIfGx&*h6^8Q1Ld
z%?Bf8U7xD47r1mES1Rf-0*E8O#J<-vfll6;t&{F^%(rpZKSgvg9y`Z6h62$&mEr#?;c{fN!qHB8U%BqbF*4ns-
zLjGQJK6`Hyl(Jvb%Vq`sVs_vE{c@C6R4OA&E4-0x
z(hwAYG|6B#O7276>hy>)ciRBC58#^v=QnunrWyJ@zowoyB@O3k?^)E<4a*$_!L5s#3b90!bXhrKx?
z$iO^;t7|p8-{vB{;BGp;=Nqxs$$S&^XH^3LtN!}MW3;DJ0)hCPjj_))*QanHk0FZ#
zm?71WTb&l7_4#`1;gdJqnA&Qq0#DT0&r`{|SVN^Lh?cvpHbDuCG$ui1Vv^);|#%P?S
zrHrX$PE*_xH0NiT7K}(S9Z${sr}1=+q>3?ueRGU($l#tUdqwaqjS@7<)l(YUqp?-i
zV4H=-{2~u9gxA+nkLwabtJ&Qs9CUv0tvMDDZvNa_
zTh@zf5S^p&ukTigS(0)<@c3&r826>Si`X<-sWs+e_;FmYXz2pH?fX_{uvr{LYIEb2
zsY8KrQy%#I)0dWjs7SSV&!A`;E5!M{+X|kh1dTCj@Wn$0t~7l-CJTh(UWPe;t?^2N
zVfNJ{(LwO>_Ocoiisl1iCcPM7LVMsdS)R!N#l{J+_!LkaM&5ZI;1OuMg`;7TO!9+O
z0Z`gHN=G`dvJUXd%BC3vMtaiuWlnJ$t{)(7{oOCLPftBAdmUKOR;OK%fy+M?IRtkj
zP&aUZCdc(Dh$cxa_q}Vd%?3d6EU-S7v9)bg0X=VB{dBBYKu6VY$LaMcEzQ;7^hssF
zVF%(1eUkZ+3=scvRYZO(-a^DTRT^S*_0K7Mq>S*6y1|0>?%G
z0yjmS!1FCjUg8uB?Qf5Jw(7m;1PneTL!43HA?z(RuEEy|hel0uaqFpt+*U6+snohG
zV?1MiZbk5#>-V(X24GQ(n^3%$IS~BS0jtlN>J3*|NdDuw>U~Y%0^
zQdKr#pp?ul{%#}u8-Eb@Vu$60s-Hlgpj`kGTBQ@qWoZB)4+So3rHozBH`vDRSm5Ai
zD1k-ePIbFW@@=QkDD0#2c5mHH8OWa)MV_Eq=S&qwI`fFhUeXr}T!41TaMFyySJ`c|
zd>AqF3BDUxMBHVs!$JmF2r(Tu?bXwkRRqTEx9lRouaqJUnU>HNhH9j<#Ou4IQD1DT
zj~dYialQ_R*I##i5_I>!Px`BFZ22_#KJISaMY~VsuP#xNmY!VP?V_qGx%Ae8ZFovS
zBiJ-v!f?dm=<%wV+ll_F0nJ;UQI<#JgsD3Qdk*gNeH`HE5_ledw0T(Iv8b=b?R_6=
zn3}!yC2)w|Be5iU6O_B5c8+>%^`>Rv!a<(*^h22)j}81!hKy_E_(a}*d)bfNADL7k
zyF~59_|`WxHEez1XvY$BU
zq-EUe?XpVwzvk-7V9u2?&VFJXRxe3^6d5!H998kG(TImq!~lMu!qpaR1z{U-gbzyX
zVt%p_#$!+T$_~DtM%Wa<*8xVEQI!vHGO$Yw(*+CY0~{6G!C#8>0*1d
zrrN1ql;dK1v$X-%@)L0>a$9}q^Mc7m?Lkkc4{^As{lkvw`B^$A>TE@!H9F!}ip>QG
zc&K?&60v{8M1ad911ItXxLqbrpFfE4H^y?2&)%_8uNI}*{`H8AdgKrji+;FKit++j
zo4|1(g(l}F)g^!i2%SV1j%>Gp_y_^JU3uY4I3@P<%4ZO38@!E(#P3{TxU>jG1=
z8gFpo>hf?=)*(_sLB@9!cZLRpwxUD_oSn=li>osRE@}I9?fvsYJHOj&@lPyotPvR1
zQ0Dr+Z_>H=5o8eC-E3r!M*eymeTI=QB12rRSF;BrHH!c%c6Or+VZ0yfHPxQE1)yc8
zJVl=b&TP$9Bd<$7NM7avD|yV6Q&p8O*{&i>EwgTKO8wW3QUp%lZ`(}D`v>1QDx=*THM
zE6l`j&I-suzPva2>!-4V64g1u>^9DxwYyhskG!`cdc`f6=e1g%c-YZ{wAubyKF#b}De3NJ>7`S;yF}^k
zbP18}E@_eOknWNeknYm&{rKk$v#v8fbK9bJDVDT-TZ**KGT6adhL^sI@E*U}awajh&ssG=J(6
z(IK>7LT2lvHY*QqSjWy!*>&kbJLr`!DSkzv{PoX|pchxKCsKOlgbcq`bYF)mUaX??
ztNFd85l8A974w%bCKNd@>@qzKwG3R}D7qs!n_pCJ8jqAH6*s-oZvq)gX44D5?CdG3
zYrC0Kb1>AsJ>&FyV?4W1rkiFi^+zZs@qi)t1C4QrcZF7jr9E%^%?QH0Iv;oQc|uJ<
z%amL5*^eLYtvx$Jl}E0;bp$H%(%(PjfyLe2+{;C?TnRQ&+^2D@?wS`v`WLtnAotKs
z=ZrTA_8g02{q*h$_O-lM84{92(&|U~nJb70^kwY-}}y$8}8
z&~O8IV5ytn3zzHy%%yP&YYslMOw!5Y?*$ZYyBPrMGETakR_FE6CP)R53JrySuk;YC
z3`s*4ddjPN1{OXoJ|o0qg%-K(47+_kGzJN%*Cal%6@ekI@_u%SKzgsWCAWjI&=f$O
zgEet*Sl1xZd^*FgxvTJmgu{8rDm2nt)nBgHk5hHWDBOW2LJEueVR3jhw?A;p&Z1Cv
zJuG7es;1|q(Gag*X~YlLPsd8PIso6n0883WKrI+1!^w!uEWwGGgg!wmZ8pX<3jFk!PqWHP{*1UKzk-
zNa5mUM*EFGap|*QgT9NVABilUasnGc$k|bF7J&2e%?s!98s&`<>qT-Vmp`v}LCZbN9nK?TO|&IcIm~$(GxHJ_+yA^SS1Uml#`Qecx2|1
z$MmU3Z}7$Sjay2%cGaaBQh5B&UC`*!lM$E|gMkwRAoQ0WkH%uD@?6EbR(Y6!_ekD-
z4y6WAcke0zJHZka#5X0x7>sl*ARcZ|C3+eV0oU$J(`g4>+IC$@_k_eFs}WTJ@juXL
zZA4F{7t~!U-oFMyxYP{9pjr7Qr
zIs;~mMycQZkVNL6AXQiMOKmQdDXH!LOz!4Aa!7OkaO+`B1vtFKi{1{n(g$?D@~TeD
z&0fAV6lp60!fjSrQS
z=X6&U-#f8epZ#m@3}flFbHNWo<8%k~9Byq%{YSGz6kT8&ZF40NjuM8l
zk83(qMUP3I{Tv_v?B~fk+E?L3wfg4)!KA029%V>z>Dp)n-+aotIp(1;&G6itjJC#o
zTD`VdUEIeo=;I5;t&s((jcV&Z5dQQ4x1g>C%&J~$AJy$&lcsB&HQV21kV40eJlYp{UFeBMPi$=yu7iO{s+;E;3E7dIg0$xPenTHuOpxK?L5o)BXshX-_Fcr>&6s$WpNP+~-v
z1Fc)mdT*T{0q>61BoowAE1GjvyNQ+?1YXqsk93R&)*+(?}(@Lig
z?^l%#ho>NW18Y8lW=1(jyo%&lU%KK!$
zDq?m{*kQmUjaB+sz>(yeyg?6lbSYCE=xv7eeJ)z-`v;O@*brV8@M&F;KZ2gLSG-HF
zEKcC2N!O>3(G2pXgnnS11nuVTQWFpI7Z6+`3e8z8rneV=Z9{Fxsj}O#=M~&Bp34|_
z79p({iAX=HQ}}f9%~3W3^J_cBFkES2XkhJl*lr8g^l&Ep
zd6_h%%tPK5HLs>4<;P;mz%O?!_L{skQZztqnaw@nV_W!gkz
zESI!Q2i(fE*%dubLYUXwq0+|`@?C^Y-+on$?_60rOX63oSU7VKM!Vr=On4tVi+?b1
zZe95KeRc@%@tey_s1M&OZ#lOr6Ojx3#pi+ipRk)iHZbZ@xm)?2`o>i@9ly&NwCW=7^&bs<81OmSFy>H+5>K3uZf=1!3
zXe`@*M$PhG;ay&n;K}0Grxf3!D?EW~IQl!xR8P)k%V8+nWHx@uhaf`z+Eqlu;
z1oyV|^^cQM58ND!(v%a`(_l{LV)I~Z(wvLjcVb7rF26~-kxTN_DMB&yjs`vbQV+vG
zHnj9efuv*_A)4Ty6tFf=YySE7&(`Bt3oJ_P3S
zF^SMjqLGq0QPif5h(E#7qI+awdZ^U*i=neekVEgwL#s#eDrz4Z{j~&~=XCfpPd1*gJ6h-|0a_Csj<)9mai&@qRi&&ZC1774i%ejcD{v6NIFte>Cm$q6KDogfQdo0e`0O{AFJ#R508UKHGRy
zTraX_W%wzgm4!`j~MduD1NBqUXv)#tpPewIS
zQYuRlwtQL}OO4}i(0jb-twv^oh$Ce-W(86kGBdP6Z9MzS@o0laac@pS`;qD}njx7d
z8(wKh`JmL*;lFs&mzvvln2
z(s#5&wf(!u2ilM+do)H(64~DLQ1o|VCh3gLZ$CVFjZ^H9a+BBRI2V{!3i~<}EF7>f
z7xHnlHR>{?R-bK2zH~|DoiU@Y-w6rVZvO+}?D`UqOTM{o=ngVf=D;S_ikfqCBu9qY
zs4s2=C(L_1E}+~^dh_a(kmcHXKUUbJ%F4o(e1*Za)3Lr)DC`~(cbQOVei6HjWQxN+
zo2g$XXI-ZzEi$cQUa~j;80<|IbaHDzuC9GIxQd)si|M?h2=*g
zFwy=o^n0*^lfZ)`odz!|5QhBY4OUb!0aND&c9>`c8|VHNyNLC3zoU!A?y&arKJpGG-Y*G=UpU|PjThMt
z`1S>Lcz(nV(kT>B@9#GJq^y`mZn4F_h!UVvQNA)oslo%j15?U%)>UvK&oh?By8MK*
z42juxgjU1^sL`$0&}*n1_8|))%#@-L91Z?Jf0e6M
zM^o@VVxD~pLbX-xcxG0cc2sBZl&0kxTbsLOcQP3^XP@9QAHI6e##P0_qwJhUyx7kj
zIx}>V$DN&EF(oTBQNF9R`(omLQht47yy1_770jJmC??_E{A9mmns;m3aQYm0O(^zP
zz^-(yHB+wZ2tNt@TWBEMvb%Wd9$_eyT19#7fJ0Oi9`B}rlc@7C)yhO;=)m60-RYG7YWcBR?z5A#>b1b1
z)9$&uBbQ6unA+RLOa~{twes*j(vl+Z!$(#zwad5WsQ8MQN*I-mf5_6%vs46gZKQ5<
zpElV?rS|6;7Tnxc`?Zz|SJEd-+FM>Rh@Ne(N^-YU@wBuQ{8umLr_?d*=wosD_*Cge
z%3~|h2S;|>?-m`LJmKCvFAx1d`LLneQ9>3aWS$#gZGW2aWSu$MR@AO2BP{zIh~}Q{
zU)crwwM$RUgTM9N;REJl*Ws;xZ}BdS$FzzS4FcYIQ9#f0CZX0#DCOMU!_F@%BK(m3qSO_vR7cio2To%2PP@t+OJZD2L%J}hd*V0DAwYt<9}P%sOG31Sfe!4O5@v0!mKbRnD)lue%q~O~FE}G=Nli;p!^#R?
zVNDfa*y>J7fi4q32LU-{=@yN9ged^p$(nstt3I)rU%%CR{o>l{>fBi{@m0RKpCXiG
zz#Nlp3>q{r{v{_ZET6fuv$073_d+R8ycWgs%;aOK4?BOzX5ew(XSZ`R`MR%el$fQ7syMu+#95Wp4G@ux%<_VPrp_x;
z!vU5|)-VB&n(hfTWLN)VN#?env0Lz-xAVRFwS*P+v(hCqCdsMoq%^z8PdR^43^6AEvQw
z0asp#at%NOh~DrThnT4iA;p_OcLn7rbPQcVpsh(~xp<%C&gY~eKaUr}qwVxw?Q%=}
zmFylA@kuJ*Y(LzAd$AGWhN^zM?koQi?}?#>F9CfCYaJ5p)oCd@TIWi~rtzG`6s0kJ
zJjA{|Q>C$!>!qZqVwWM6{%Q!+YG04dQQpO2GF)vEy^5Mr4%Sp+xOTD){n{{5Y+eN-
zlXFEI_lxi7#H_7|9@6?Y5G)ohiVGlv?>8jh@BCz;T>k2CIcG9y-=%=@O;z}tkf_n$pqtV!4_h&zGIi^
zUg)j_raKjqX2RgN{c^@y{Ive!V5#4dEShR^&Rr}|&@|mgg|id;VuOk;alx^O6P5K6U~+ZwT?}OH&E_Jw)#$tVo#_J~uFnk-XTO3AFoi
zbAaE`q}BZl$Q^sZz?+!A9TzG~kmFEy&hslCVf_vk690B~3k7zKEJxAo@7r8-+(
z%<1XzZ|2ipXZg&;6rm#b*7)(ygZQr#bbau5F$vf;k&)LWUwNBQB6ri3dx(zHg$W4J2XHlyU4+*B)Z15lq`;?J?t7al*!29
z6CQh`j<43bm4K*&QBc|UE2Bf}7g_DiNnXNqBb&Wc{YAO*Wnvyam00(|!Q372Nn8Es
z8+r~mNzB9t2|R?j4@5M_A}QvpOeX}*Zd9b~(^vdP0*ypa&kL9HtO1S?(OXLl{*kk({;dT5%z1`wnueX})pabi^Tc{4>&%C90
z>V8G_;=f{67ZiRGF8cu^aGjj<#uWFCS3vwJICE?RdGhYB&TU+11ZfYP0-cf|^?Rmk
z;h%sFwjo(5%~S<
z^}DpsY+6*LN0emcl0}u_Dru80VcgLX6L_R-LI&Zr>5Avpm(r4A~__N~oIm8O4
zAjT|NAMM^!zBYGOB26<(0?P)1(FbjlkSWyGK^dp;Io*5Z#YfW=W^qGB7!t*#`9f!I
z#JW`vr2
zJ%nl~_a7CY8gOk%N*$y`0gzx3luU-KJUr+`8~imxa0F_?5RJf`wK|R~;gi
zX2J6(aNz##`n1x8wZG!hiN3xWLoF%=oe-j0xn4UXC6FiO^Y(95Ro0``r%Q&A3ew74
zAv^qNDi}u;9J+@GsXpG`u8y(D_5?pN8s|UI{zv+cufnWbk!?c~+mMAyx8vm1L}W1B
zB*>2Dsyzk2W6?zNfxF(gI=5?+s=cod&BEO3CvfOBi-9rh)GTp
zn~rvg4S8g5TMm0G6;83Lx)$UolXXUnMRf>ai48m_PZg1Q13_bR4gcmn{l~d2z5~H*
zYYeR|g9{!!^L!AK89$t}=F(boM@uEkvo*LpRyKY$LQMMS7c7da9kg(+;Ex_m^U(am
zmT~vntV7#41H132D!~SCKZ5VJj{h!nE!&^H^C`_w`Kx4C%5l|Cw>v)~%M_X_;oA%M
zWJRIhN-aA8+I<9o{CB`p?V8g1$p*ajkExL;6W9K9c_bzlbrgkN#|@Fd?;4UZ6cjq>$ouM~=V
z26$JcrE@nf%g^$(rSG11*N=*$k*o{|V@IM`j@`xrhessR?(hOTNMx`
zIB))75`YhtpP5@}wQL^SEH}uTi?z}e6Xci}-p}~XCH}Zv6ju|8h2dzDJzi{lo9o{B
z$U0{1Qnz0VM=2@pe^{M{u#v;Dxu`r@AfR8fb|+Z+>sF_^X+alF7A>V3M)<%g$Ae>q
z%3O**P;aJBQ{#9vlYjg%Fu;>E$A*rBf7LdC=$VVeut*4B1ba>WQ%ybxrQcCWA=+=|
z6XDMmT*y%gd}6=G3fgvv%c>kcFJJYfz}NzwbF50Bas3uXZx7AIguI`en%>JqEqG=D
ziPO?<1A{fDL7T~L2)kc;EV*hAzjez3q^npx8%w*nLT$StES!ge0wclMsuIiOQ^Mc7
z+?h_6)?byJHs&ekuQijl>--xN18Xk0dMXP1A?h{P
zn>HQe`Alc8ORbd#v?X>5XWe)%`i9Tn!}p{{h9nVR6)0nQV%+Jjx}qLjQKgfxi+K*P
zK`BgZ|3F96xzE1~$Spr?`YiONe<6H#_r(S$&MB|McRU$Ej?GN49~Z_DIh9fY9n4Z;
zFL7QxB>qK0#daM%piF_ZA%f-U%Gc~{(WPkXm#Q@=G7q^NR1w!NU?7lL->aM8NmT@W
z&OQbn?|+oD)EE>v+&Y;Ow0fX}d)rf!YRI*J2Q%s-72=NvRy_9zO1MRARh3n7nRR?$
z{qwHorBX9ftRi>-MlRNrOmlW2BYf8rp7QY1tJe%bZUV>uxSG}s6DDs7eEaZ&eK>>l?q4cChA}l#
z`sC>deLyu8MaesRNiru1D+vq}7Aa~3BD~%VfZ2$Q5ygE@mPT*=@%e~e8dkRl7$fxB
z$xNUS4FHp-QP*fGigY?v*C6X>dn#brz-}tgX;GcrBV3Ptq
zt*U>Z$(eQV0MAh8cUX#V22!BeAUS2P>BxzdVavPb6ERe3;1+)~m9
z-ZeB)>iZNpdY*^7hVODFRKs^m+@WaMLu~Kl{U+vXmo|48?T|#$>@UydU--43!~Cs-
zcL_)yG2KDw2XBPH%uf=15qE;(8OUB*h)xH-G3_-D*1GSYG$Z8A?P&b*xb;Cb_~e
zzynjuYt~L?R`N-X?bo{rrbt@&^K9o(BEPGRW0`>KHN$dy6Yx6JvKU*~eG*ovp}^;y
zu{9)WZx;q8b>E$8`NE=zG6W>3V5JZj;8hk3pwNNQ3C)FhGcMFH$W)cnL-Ry
zl1DB^q@b4#P`%%ZSK`AhDRBL)5y=dsnuduvt8}@AwUZACMegOUP>0Eunc3>8c#qsK
zDMz;ABBQ95NjqfyznW02e>^4W)U)D04gvq^IO2`FaPl1)H)dr-f)H>)%`7w6UZ#N^STEVmGn(Xxpiwl~x
zBU7b~J%;p$B;3pl{!v5UI%*V@yr)DE#1xl~HdL0dn~f4fRQT78l3a!f`Q+f&l~@Uy
z;|%ZwaDrkW9Yq1YS-#}do0e;ag!-o2nQIMC=eOV}UU&mI&
zZdnj^c$mo`OO=aF@wKoBzGhaSdcSpAsI`Jc*>n#Xg}h3nhl3h2mR_2uMl9dn7R2gF8pqjMycj{sGznI4}G_~#%zii6No=-{%
zl&RJdTteqhnIsZIE39K2$aw7x)S0uD(vUP^ZxB6L{A>})p4PeMyW3?msFbUFUhI`g
z#xet$d!`Rk=v;Z|#K$ePq!=_HU~W7P=%#vqf;lq&C8*z3#f7vxiQYA%(eyM0J@xbGreEAHnr2b0v+I?X2l^g90&m3{$UfQW0iwfnFX4UNLj4h9Gs&kt1y
zq)mlWHGyt;4qmn*pn&v955WT9pgG~xHx&V*0ctUFvyFhFOumDWwKTekaY!wT76bU`
z!cpJXo9U4EbHGQ(yXOVv&&kI~`!X?h_d{W4ls$EybZC@bc=j%0wqlq!Wp^@SMtqg%*osX
z#f_^@3|df}_ZxFFx-?w1@2&{n0xgG!WoLs
z#9qZUh-vPUlTNTCijcRE4_9c(Cn?E#qOx`t{(4z14JUUwVwUYjghyEfgEr%9ldY|S
z#Pm&fCQ0hNIxyI+_SLYKk`_r;ZXq!?!AvrWm-z}TY%fTr3S)`h{pPHJulJWwsnmu5
zwXkpndbr?G395nr=Pf`AFU5>|74i4?1-5^n!J;tiXwk*62GN6tkpIU7_Z+&(El>WM
zK;?~wR6t=(LPZ>(Xv^v&_?5^!(j0dmod|?5#;r&)vc#&qZHgON{GYvZst15J=pRR^KaW0
zbgtP}rlOp|@YIreiy&>I$iKy%1X9+msGXhSRhje!*<6Y=JDUBL-I-E$6q3F38hbcG
zll?+bp2BiO7_EGFspNyR4Kz$Qxhl{Z|402AfDpH2x!?@AU|8`Qf=4(ybsTUDZk}tT
z2;2lf?v$h;>cA(nK3e#;*%i9949bvc{BS|)0DQ2wOp?F-ttyrf6>lhzBe(&4`><(n
z{4=(*khJ9gIV-eKgw&w$k9V`oF$=6IZUGku2G|p@
z8RDQY;H?V}L6R4>XX-FThUO|!Df#-4g8b~>@JeG+AqTMkiI&Rf26mo=S=dxd30KFb
ze7&wB0jcm353cikPbJqKV0X$ln(3&On6QI6C1C{I(BbR=|AdR5o&d~^#B;hO3*3Kz
z1t%sX{a*c~j5!<)u8jFO3hRl7q4O3tCB~bgpv6uR&-tyd+yqN%1gpAiNTlK_hzqPa
zrn)augg1bbpB;>xz^SD(d~~R$i3<7k*U&6KII0wd#s|nrfYM-oKFGkHAZNt2OAm!f
zBcV*qofJYLCt>k;-ZWa_o#KyET``)pSoBmP_63@%ctC5{%6Be}wS+bPkc<%-@C}G#
zWtV?$G$nloM1E(mQZ4{MFKN_OJN&;HDU>gt#ip?hjIn`OQJ020z=p$4%eb>l5vX`+
z5KFiKZVVF7vYkYK161H?nv;w=lr-m`wnBY(te~HIn^8$iD;xn|nV&4b
zy4OtM&{Z0%=&L%6a9L+(qE9zF06(Jq--;>kr8b?SdiC0r1Ej69S@H$ePGMP}qP?WJ|zpD7IY7R=Uip$#y|%rEh_+
zW_e5TAh%cancA9^Y1y8Fim`!hsr9U}HR)pMCu9y@1(^f^YNqe`c6^3dt0h1gDap(5
zBYtq5W)Gl$HxoH#LXIK282jpbj~*~Y+tqN7&MM>IyphQ4((-ih4^)~}3nrIHb^-`)
z$1EMv$1H^uU4n*X!>zl@raQOSr5_bG=p!;r)HW!Je?JW<8a2?ZvU24?Ty>eaCPbCU
z-QAxOpD+gqdRd1*HP5;ThU?u3UOoGx(5pfc^n&)^S?}nV+lh`-sIhC75DHTQ}1DeWoKI2DbxIbH-<5yQ$VQtRHDpM7$T_ZFnF#s!=~j
z(pUHMl7)2Q=viyYr^(D4@KY80Qz9CKMFd%cwFcIpz^mtyy(mMe!oI>7aWp`}V&_s+0AF>u*VEXGeeq-FieqBR
zEl;(*S*Nb?9sl8`wi`{N(7O`56L6WF$BQLraWhx4C0q3SI57IVsgNUWF5gP8bt8_1
zkvUlHRJgyyMT=0T&HPSDl9vsZJ!KCkXb&~_gZ}R>;XiAJ0Uh5D(r1(vKFPz3*}Qo~
zphHY{AE)4MC551WGre69BqY7+J6$PEkqn*vlopCnt^`cp4QwI2i1mx=nsYUiYKBUd
z_Z^7$^gHl^yknl&mn=_~)C5$$VCn2f_T7_sRf^6W3#=r9y`p-_q}HUn!TK17Egdt7
zMePwg$mn`HBCQAKK(tcvR(m*|F;-yrszDc-21f=Sg@z)OFXf?AjLf105>msTDv3Y#
zTn(R~AlO{PGfmH+PTF5oF?=6Fx;Rd?6#s`?C``hdm~Zl1T6NVf2Kkk6b~$}d4qk!=
zI7g__`W;4M|IZj{j33uP*fmBCkNdZ#WSM8~(_3C?IEi^X7$a*AFU#(-J;W^|sRihd
zNxoI2@+_3*Ys4t(xto6HHO0`$bDlIOTfTt>1OXuc-X57<9EF>b0@%D0NT~dOGcuz>
ze#<(*3Z8*jJvbzO_ap~R+}`(&Y879YMi06D9Ecf(qkZ1{+o42y5Tbs^C-grjyC-%_
zibe};@#EUu)gnM@J^(LTAu>i5hL$8a6eDd)#XC#K_y4#FJLy86!^a$6V3vzNE^xBd
z#sHRssN9XNC;MbxnGocmY+xVNJ1;=twp|18Z!O@BBMU$QfDi~Txok^?KuWTfcb|DL
zC8OKr@c$#CP!u6){uml9IUK+DVFpZN@d_j+$QjKDh%5l6_HH^NqHNR1r{&(jrnGyt
z?1!dAiI#I>2agW=FDiDTkG6=u$ok~b#m0t|-qB0Nl%EB2w=F9^C0SG*i%nS+U)S!n
zLI?GD@wCzu^w4~V9v?S!rW<$bokD$E_>l4XA8Lyar6?Bb2dDWqq9(6f7(V<1IWoz$
z*9cJd8(5UpMk)`o?PM9UrO;%|89>VE5Bh|OSd-gnl4ansE$atY@k}Qz;EQBaLZx
zAjt=IKnzW2RTY^XIzK4u@Eyz;Inm;~{N_CyIKG9u>LZ8A$;)r=R)OMM~vHn*8_EEenNl^VWuwPxn=)7UczfCkQz`%r4(F@RHOgLhW6}LOd6BqJ5d<50
ze(Tp#@;9*7?`+4GwPkJn6t(sII(sO)55%~8AZ06{GW^kO2tlK{wv=k}aaiKrt#@~y
zi2}A-(b1weS@(_+4IT%XqVU(DK3;B1CdQr1PQ%D7BiXeoSM7N;r-wNaZ`*mF|g>2f#U7uunMXt}2Bmxs^d}O!ooMdj-A|p(%_ei$e52EN4PYFASxNkt7
z`V7APS(kJ(^5U#^oZENI)A^74grWsMn4TVWzBFXxAqT6whh%ls+BNj|
zUQ?TOtfR9M6Z6DGaeFU4<70fc*j5pSnN=_;UiGPbx3JjUU$|z~$$wWJY>JlqeX8jge00A!)$Yv2JFZHp
z61yYBDAX6i!qOnBAKcg=D2QwjgsHU4ht_km;RxMbu5=ks=oL$;pys6X;aX62%28M)
zPe@_^S~;vIQb()8!CCw(V8;hbBAu`D*qh9(>G@9wME?)FO;lrk*u?Obx0@@goYS?G
z^T!XE+Sq;if2IQ&&7BZpm>A7~sT|jeSSY7kn(S`vxSLy3GxUjt>UaBxfaglW1i8}O
zZT$m7tX&P!iC-=gv0v2`uVq88p|&-K$Q?dz-Z{2#LcZ}+;6G4n>iCX@@eWxJGWW?7
z+&wFt*O3n9iQ8acyYohz*LV6a3376YULP4GwmeUS6*mLz={&Qkowet;^~r#@KF)EF
zfctH|i3xVBI89OT2TYQRLg(~7xDl0jDHN!>4)NE?QcPin|lrgioGThzXo&QI)^S;whZ?y$D`
z4+n)`(2$s<@OE!-u7J7f}d1
z{*1Sj%J6QJZDAUnFL*s~fjw?A!s
z(;G{2u4_V>%|YB7#z|G&PlX-@zl=r0aB5z+=XcsN@bk&m1t(5Xk~~Gy&R@v4H_-<<
z-_;BOgS{}R#dsde^yJORj1C!&9G^N8E@|g;3g({)OOIrlI}BtzW3|h|0%bJVo1yQh
zH`#{-O_tusja{LDC}I!U?ZbBb)tcY`-AR<9xIacUb-qLfJ-mmDaBS1V;X76xG9tQ0
zgoh);ee2!CK6CLnUzMc)N!ekeRhW!SICiXZ8Mj95R7r<|5EE@)mVt4>xG0fLX~OCb
z1N4XUHbTgE)EJj8-UyKiXn_i`m^cb|mtD!87@>=yZASo-87&Hu=%+hVZ1P^OD
z2^GI1{0_XdXZw*2=8bpQcU~o^ko$XKM%EH$1t(h-oY0I&(cgcvD1z>UnNq`D$}7n1
zO8^H|09Z;Uk!wvDuVbr3Qo=+7vZ*tyrs*xEup|skmaKC{5cU9cP&fN<@Ok6XINR}n
z4hgt!AQWo-2~pd#RMP-(BVe2YAW6kVaXa4yV!(EU!vL1A3WV5F9*-spn6T%7Zep|4
z=X$^cpIw?`%up
z&Rze3F5=ftoHA?lDx(h|tF}IgXi>>ShF;c4g~?C8-kN=CPU=K#@6&gT*>B;h!Vx6s
zERX9c-lFAu<|!iQUE#eI8v1d&Ah)zV+Yhkkh-J;Wv{zfy$aC`R3SQh)j
zirP@@0X}vST5e9owM+??T8&7!oVd8J;!YV>hIB!~me!^Z2I
zr5JE&NV{n*h0#gTFkK9*=DFtk#dJCjR`Acvz
zkX)y>Om|mlIB|Ps(wM*%V6)*)Ou}XB#pM4W%FS^^u*?<#qj;0VL#cukqsY#ABzP_
z6QwxJ@*;Myn+|j$zm&BRGBq#oo1f)^&$cId0dUBLEm~0I)L3ZLc_{}lD-itmvW)W!
z3dswlkYXax?s)$D$?mL1U71pqq3N^i=UE6BBkxF~9MeCL2PaFAX0eN{H!@!{888(r
zb?i{0vSJpa=vuPN@(8Prv#!ph
zkjmDaiMiH}pg2|+SuJig1~!gm=Y%~^S7+?phU|mN#^sr^iB4hG;CJ@QLEDTsu_y3|
zHizWjs)!D7b?DQgE{Q06jXZ}a?i-rz*F0;Aa=WS$%Pw;1qqdK;sTfyo?cN}cG?*im
z%8oY^#i{hRir;Omy}5K+%}`xxw*#Nyg&SdAwU7^DoKv4CV-HpIXpy)mQZS7u)3SI~
zX>BiTPWLdbH)l2v_${yr;O#z$@;S7vLcyDR=ql9dzfK&RzV>)8`n!sl9@Lh$o>Qe5
zRhgPC;BJ2M`{2h1ZNyP}VM{IlaNn0&j0ZZCPp3zZDW=P@gS8aTg0x6kz^2sv6
zlqx9=6hG#5yr1P;)B2rn^Q?=1!Er2gwH!1_9aZctM~_`bA~R(R3Z8D!>r-mhe&iTA
zI#};|mEJ0gU@ztty4n3)X6jjf&DGKN9Omkc{8w77?2q%Mr$!&S4KV2iDy3iCZl!Cg
zy?`dd%pm)bkSXS@WB5%b18*3RsFoNp=w#9fHKzEf=E|??&neSUR}QUw
zR{pr8F;I~>(Uj&)5ZD7sbkB1y*-@dG`)Pxvms@I`*bI57BV-`d-S7v^MH
z(|x|tH5rO|MbCyH7Fr{trH*=rZ3!0p8*YC<(}OVn8g`>qZ#Ho+(0yz+6+ql}hF}v*
ztb9v!6e@erAWQd}f3kIubdf*&&P|?7@O%8HFQ*l#{0MhXTgTdynri#ob0;@T(;o~R
z(AV!n=%-g%W^)DXa{clN%#A;45t*6=J*SB0CrdGGZ9N%b2D;Nwiv$pN#iB)56d6{`
zmKMWXrIQ2@SSSf+t)Z5ZIIu81`~#FifoCsydN@*{ecoC~CL(nPLc-|
z=MvqMT6o{I*lUK5#cCWYCS
zvp)V-f^WYr2C&qv>(kOauM!b}n8*+Q2Rbc4^|eEi0~=GK7~K@S{A|C_y`oT>gR%6^
z!lX$?=1X`5Uz;ErQCJ%{z%x8YD7;Bw#uh2uX-qU3>rJ|OaT#7sFuABA4TaDAK5%wU
z0ot5o)cAlOH>0XxNl{-Q6^_H~1$gmxRe_7aX%1erq5|9QaH!WlPOzjXCcq3Kw>k@H
zh6CRRwyl}e8o&v3a)!fpRS0msjYX15V`MRBVb7Lo(sWP)fEmO^)e#fWh04^qjV~vS
ztE|(Y=#Twqu&=3fh>dgRlbu+3$Kr9m#Y75-9besZd?I!oDWA~sR_))Gm9-8dZ|{U=B;ZR2o3TN)!tJ7|
z&{7f%f7$y7WQtdT5NyTjKS>&TgF}24fp@1s5BCYE;BmY1HQ*$~w&89U=
zWhu-oyVbtIwu=4m`G<&+pl7P(k^3R}ALMXDZlapsLaV6{pB(DBCPnHTt-IMxK#lk!
z3@kx&s&as81jsFCDXGZ(j8Ov100SHQpgEoPV}>+(5=Vua_?6HffUG9~FTDE847g8*
zO3IjmQgl?hRa325J4s2@eSTJuw&F0sI_BFUb5C8+xCRZr$nyr>^mf`3vg@`(&dSA)
zxqvmFvJU0eZmV_sM$7WXpYv5Cp$NQ!cKYS)!+Rt^vGkVGijajrznsd-v!lAM@1pr)
zfpdks`DwrP{j_=pE!oxmMQbO!o*mQ5^WW@M%ZT07g-=CXq;NEu<3kKFyB2U}GaMLp
z3jR6H5|q#y`K$!oPT9YC((MN?yb~N$MQ+VlAM7(T5z0(o6JT$|3Km>4T*$^c0-}8L
zi@AnoH+)nrNtEvq!U;+W&JaGuG4gxh{uo&d8r-hx?)Q%R`@Z<^Ylj{MptjJwG{W*)
zV?%)7;&p6tw0}eN!Q#b2mpuONWRmagc120XoNXwVeOuV)h+hMP46z_!NP>*ngSBwR
zucHOz-k@XbuD{BH4`GC%;}1`W2yN|@DsYzN!8TV`(wjbsWlrLCb2UhEI$cL>Wpj>d
z2gMNwY}I;m=$u|2H6!i{P84^@IzRj73q6U3HJ>Bq%pYozDX*IE8=~}k+c>>{OOMQW
zg*iEYqg!?66VV_0x$l%gwr00Cu%9ltkDDYGtbzgjIFPPieo*(7D6Z)bFQuS>!yQSv
znXYqRzH$8>!|^p&T`>}=RdrM-nd|z=vy3yK_cYJEybW10AOLU$Z~`K5yE^j=YXai0~^h?0v2vKMr_>$y0siSnNG4xLsgKSF&fN
zuijaL2E(5Z3Eyb`Le(z(ne#mS&Yx-`D10DP?D>i9QL%Ac9wViwQ3;p+tq=&K+cH!>
zNUK7nG)Cn&>{e!OD1OuqRz@j=5-xZ*F9p?qK0OF?S~=lpRocwjPz*IVNX#`@A*S`9
zk1<&L4!7)g-*ZOy6p}EZyNHc!L}`-g0B^htCx8A!XYIR@Z
zWpUa(5hOY#v#cx)&wYP5xyR|Q`U6p)7HyL6gwN&>?>%{KZeAaS>GzYCG`7Cao_LsX
zADhlOE%-nwG~oF9PVQ0iI#POz`252-gir}-J|_yyzFm|!Lf0r~ZgVoA!Ic6AzI+fF
ztkkJ$?)--8P&P*G?ICo_mTm&CV&JALxtF0&*jXZ5KOJz%z&*^qo_8Y-U7S!
zYmgXX9||)!)~fkzC?4!|**-J{qW2Ry053zkD^~#^F@PSFOw=u~K%qI2F-q`VfDS7P
zYZVU;{tM;z1aSBc9t@CT;Itl6Tdo;Dt%v*mR>RqS`VFJez(n&kwTZ`TJLo
z!)~!ih#(51wBFo#IxZs3Qpr>;Cgf?0N@U&p5HAPCoUG-|P0pRyCJm^cO%}O_dyT
zehpyam4O0UoI}gl6m$QIQ`65}{8BkQouGim7usq*0steGgs#98?KP#?gah&MwpazT
zI%20cN(@%`KR91Aco6zW(=rLh=)d!P?Z#s-FHks<(adqTwyptQ6($SS9L
z(ZccY@X-LE^jkp7E(CB<_y6ML#EVrqzH1s?B?Sw0jvTj^05A@Z&QBBOw%s_INDkKT
zm$2+yXn5Med_R^}kSng>d6&N4lj7v?J73x?i-g>VaSKkrjri6(kq+`YZY!lc3I8xc
zp)jMv7W@DXD?s-MusyfPDxU(MKLU8k63cMqMuDMfX=TMm+ff-^XqhlDss;U~*~^0`
zq}hAxP66cUnux#isY|Wjc8#qri|Ay{E(8HK^#F9l1=(89C_u9M&gj3p@wl}mz1Vhc
z0sENMJ?K#yU8`gt@PHF}dCwPVO_sh@`>5h8p)^g~v)t9`$8vvsV3)iD8|^E|P#9Oz
zylEAMcmRWcXk&K8k2RX%cSuZP&(>pW3K&}TJ|^