From 83ee52cc77ca8374d9e10bc79b97d540f1a31af6 Mon Sep 17 00:00:00 2001 From: Martin Wicke Date: Wed, 9 Mar 2016 10:02:48 -0800 Subject: [PATCH] added inception model --- .gitmodules | 3 + WORKSPACE | 37 + inception/BUILD | 178 + inception/README.md | 701 + inception/data/build_image_data.py | 430 + inception/data/build_imagenet_data.py | 702 + .../data/download_and_preprocess_flowers.sh | 96 + .../data/download_and_preprocess_imagenet.sh | 101 + inception/data/download_imagenet.sh | 100 + ...imagenet_2012_validation_synset_labels.txt | 50000 ++++++++++++++++ .../data/imagenet_lsvrc_2015_synsets.txt | 1000 + inception/data/imagenet_metadata.txt | 21842 +++++++ .../preprocess_imagenet_validation_data.py | 82 + inception/data/process_bounding_boxes.py | 252 + inception/dataset.py | 103 + inception/flowers_data.py | 52 + inception/flowers_eval.py | 40 + inception/flowers_train.py | 41 + inception/g3doc/inception_v3_architecture.png | Bin 0 -> 346842 bytes inception/image_processing.py | 479 + inception/imagenet_data.py | 59 + inception/imagenet_eval.py | 46 + inception/imagenet_train.py | 41 + inception/inception_eval.py | 171 + inception/inception_model.py | 160 + inception/inception_train.py | 351 + inception/slim/BUILD | 112 + inception/slim/README.md | 650 + inception/slim/inception_model.py | 329 + inception/slim/inception_test.py | 119 + inception/slim/losses.py | 110 + inception/slim/losses_test.py | 89 + inception/slim/ops.py | 418 + inception/slim/ops_test.py | 510 + inception/slim/scopes.py | 144 + inception/slim/scopes_test.py | 118 + inception/slim/slim.py | 24 + inception/slim/variables.py | 224 + inception/slim/variables_test.py | 200 + third_party | 1 + tools | 1 + 41 files changed, 80116 insertions(+) create mode 100644 .gitmodules create mode 100644 WORKSPACE create mode 100644 inception/BUILD create mode 100644 inception/README.md create mode 100644 inception/data/build_image_data.py create mode 100644 inception/data/build_imagenet_data.py create mode 100755 inception/data/download_and_preprocess_flowers.sh create mode 100755 inception/data/download_and_preprocess_imagenet.sh create mode 100755 inception/data/download_imagenet.sh create mode 100644 inception/data/imagenet_2012_validation_synset_labels.txt create mode 100644 inception/data/imagenet_lsvrc_2015_synsets.txt create mode 100644 inception/data/imagenet_metadata.txt create mode 100755 inception/data/preprocess_imagenet_validation_data.py create mode 100755 inception/data/process_bounding_boxes.py create mode 100644 inception/dataset.py create mode 100644 inception/flowers_data.py create mode 100644 inception/flowers_eval.py create mode 100644 inception/flowers_train.py create mode 100644 inception/g3doc/inception_v3_architecture.png create mode 100644 inception/image_processing.py create mode 100644 inception/imagenet_data.py create mode 100644 inception/imagenet_eval.py create mode 100644 inception/imagenet_train.py create mode 100644 inception/inception_eval.py create mode 100644 inception/inception_model.py create mode 100644 inception/inception_train.py create mode 100644 inception/slim/BUILD create mode 100644 inception/slim/README.md create mode 100644 inception/slim/inception_model.py create mode 100644 inception/slim/inception_test.py create mode 100644 inception/slim/losses.py create mode 100644 inception/slim/losses_test.py create mode 100644 inception/slim/ops.py create mode 100644 inception/slim/ops_test.py create mode 100644 inception/slim/scopes.py create mode 100644 inception/slim/scopes_test.py create mode 100644 inception/slim/slim.py create mode 100644 inception/slim/variables.py create mode 100644 inception/slim/variables_test.py create mode 120000 third_party create mode 120000 tools diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..93ea8acd3 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "tensorflow"] + path = tensorflow + url = https://github.com/tensorflow/tensorflow.git diff --git a/WORKSPACE b/WORKSPACE new file mode 100644 index 000000000..930a66c45 --- /dev/null +++ b/WORKSPACE @@ -0,0 +1,37 @@ +local_repository( + name = "tf", + path = __workspace_dir__ + "/tensorflow", +) + +load('//tensorflow/tensorflow:workspace.bzl', 'tf_workspace') +tf_workspace("tensorflow/") +# grpc expects //external:protobuf_clib and //external:protobuf_compiler +# to point to the protobuf's compiler library. +bind( + name = "protobuf_clib", + actual = "@tf//google/protobuf:protoc_lib", +) + +bind( + name = "protobuf_compiler", + actual = "@tf//google/protobuf:protoc_lib", +) + +git_repository( + name = "grpc", + commit = "73979f4", + init_submodules = True, + remote = "https://github.com/grpc/grpc.git", +) + +# protobuf expects //external:grpc_cpp_plugin to point to grpc's +# C++ plugin code generator. +bind( + name = "grpc_cpp_plugin", + actual = "@grpc//:grpc_cpp_plugin", +) + +bind( + name = "grpc_lib", + actual = "@grpc//:grpc++_unsecure", +) diff --git a/inception/BUILD b/inception/BUILD new file mode 100644 index 000000000..f6c378291 --- /dev/null +++ b/inception/BUILD @@ -0,0 +1,178 @@ +# Description: +# Example TensorFlow models for ImageNet. + +package(default_visibility = [":internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = ["//inception/..."], +) + +py_library( + name = "dataset", + srcs = [ + "dataset.py", + ], + deps = [ + "@tf//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "imagenet_data", + srcs = [ + "imagenet_data.py", + ], + deps = [ + ":dataset", + ], +) + +py_library( + name = "flowers_data", + srcs = [ + "flowers_data.py", + ], + deps = [ + ":dataset", + ], +) + +py_library( + name = "image_processing", + srcs = [ + "image_processing.py", + ], +) + +py_library( + name = "inception", + srcs = [ + "inception_model.py", + ], + deps = [ + "@tf//tensorflow:tensorflow_py", + ":dataset", + "//inception/slim", + ], +) + +py_binary( + name = "imagenet_eval", + srcs = [ + "imagenet_eval.py", + ], + deps = [ + ":imagenet_data", + ":inception_eval", + ], +) + +py_binary( + name = "flowers_eval", + srcs = [ + "flowers_eval.py", + ], + deps = [ + ":flowers_data", + ":inception_eval", + ], +) + +py_library( + name = "inception_eval", + srcs = [ + "inception_eval.py", + ], + deps = [ + "@tf//tensorflow:tensorflow_py", + ":image_processing", + ":inception", + ], +) + +py_binary( + name = "imagenet_train", + srcs = [ + "imagenet_train.py", + ], + deps = [ + ":imagenet_data", + ":inception_train", + ], +) + +py_binary( + name = "flowers_train", + srcs = [ + "flowers_train.py", + ], + deps = [ + ":flowers_data", + ":inception_train", + ], +) + +py_library( + name = "inception_train", + srcs = [ + "inception_train.py", + ], + deps = [ + "@tf//tensorflow:tensorflow_py", + ":image_processing", + ":inception", + ], +) + +py_binary( + name = "build_image_data", + srcs = ["data/build_image_data.py"], + deps = [ + "@tf//tensorflow:tensorflow_py", + ], +) + +sh_binary( + name = "download_and_preprocess_flowers", + srcs = ["data/download_and_preprocess_flowers.sh"], + data = [ + ":build_image_data", + ], +) + +sh_binary( + name = "download_and_preprocess_imagenet", + srcs = ["data/download_and_preprocess_imagenet.sh"], + data = [ + "data/download_imagenet.sh", + "data/imagenet_2012_validation_synset_labels.txt", + "data/imagenet_lsvrc_2015_synsets.txt", + "data/imagenet_metadata.txt", + "data/preprocess_imagenet_validation_data.py", + "data/process_bounding_boxes.py", + ":build_imagenet_data", + ], +) + +py_binary( + name = "build_imagenet_data", + srcs = ["data/build_imagenet_data.py"], + deps = [ + "@tf//tensorflow:tensorflow_py", + ], +) + +filegroup( + name = "srcs", + srcs = glob( + [ + "**/*.py", + "BUILD", + ], + ), +) diff --git a/inception/README.md b/inception/README.md new file mode 100644 index 000000000..082e902b5 --- /dev/null +++ b/inception/README.md @@ -0,0 +1,701 @@ +# Inception in TensorFlow +[TOC] + +[ImageNet](http://www.image-net.org/) is a common academic data set in machine +learning for training an image recognition system. Code in this directory +demonstrates how to use TensorFlow to train and evaluate +a type of convolutional neural network (CNN) on this academic data set. +In particular, we demonstrate how to train the Inception v3 architecture +as specified in: + +_Rethinking the Inception Architecture for Computer Vision_ + +Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, +Zbigniew Wojna + +http://arxiv.org/abs/1512.00567 + +This network achieves 21.2% top-1 and 5.6% top-5 error for single frame +evaluation with a computational cost of 5 billion multiply-adds per inference +and with using less than 25 million parameters. Below is a visualization +of the model architecture. + +
+![Inception-v3 Architecture](g3doc/inception_v3_architecture.png) +
+ +## Description of Code + +The code base provides three core binaries for: + +* Training an Inception v3 network from scratch across multiple GPUs and/or +multiple machines using the ImageNet 2012 Challenge training data set. +* Evaluating an Inception v3 network using the ImageNet 2012 Challenge +validation data set. +* Retraining an Inception v3 network on a novel task and back-propagating the +errors to fine tune the network weights. + +The training procedure employs synchronous stochastic gradient desent across +multiple GPUs. The user may specify the number of GPUs they wish harness. +The synchronous training performs *batch-splitting* by dividing a given batch +across multiple GPUs. + +The training set up is nearly identical to the section [Training a Model +Using Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#training-a-model-using-multiple-gpu-cards) +where we have substituted the CIFAR-10 model architecture +with Inception v3. The primary differences with that setup are: + +* Calculate and update the batch-norm statistics during training so that they +may be substituted in during evaluation. +* Specify the model architecture using a (still experimental) higher +level language called TensorFlow-Slim. + +For more details about TensorFlow-Slim, please see the +[Slim README](slim/README.md). Please +note that this higher-level language is still *experimental* and the API may +change over time depending on usage and subsequent research. + +## Getting Started + +**NOTE** Before doing anything, we first need to build TensorFlow from source. +Please follow the instructions at +[Installing From Source](https://www.tensorflow.org/versions/r0.7/get_started/os_setup.html#installing-from-sources). + +Before you run the training script for the first time, you will need to +download and convert the ImageNet data to native TFRecord format. The TFRecord +format consists of a set of sharded files where each entry is a serialized +`tf.Example` proto. Each `tf.Example` proto contains the ImageNet image (JPEG +encoded) as well as metadata such as label and bounding box information. See +[`parse_example_proto`](image_processing.py) for details. + +We provide a single +[script](data/download_and_preprocess_imagenet.sh) +for downloading and converting ImageNet data to TFRecord format. Downloading +and preprocessing the data may take several hours (up to half a day) depending +on your network and computer speed. Please be patient. + +To begin, you will need to sign up for an account with +[ImageNet](http://image-net.org) to gain access to the data. Look for the +sign up page, create an account and request an access key to download the data. + +After you have `USERNAME` and `PASSWORD`, you are ready to run our script. +Make sure that your hard disk has at least 500 GB of free space for donwloading +and storing the data. Here we select `DATA_DIR=$HOME/imagenet-data` as such a +location but feel free to edit accordingly. + +When you run the below script, please enter *USERNAME* and *PASSWORD* +when prompted. This will occur at the very beginning. Once these values are +entered, you will not need to interact with the script again. + +```shell +# location of where to place the ImageNet data +DATA_DIR=$HOME/imagenet-data + +# build the preprocessing script. +bazel build -c opt inception/download_and_preprocess_imagenet + +# run it +bazel-bin/inception/download_and_preprocess_imagenet "${DATA_DIR}$" +``` + +The final line of the output script should read: + +```shell +2016-02-17 14:30:17.287989: Finished writing all 1281167 images in data set. +``` + +When the script finishes you will find 1024 and 128 training and validation +files in the `DATA_DIR`. The files will match the patterns +`train-????-of-1024` and `validation-?????-of-00128`, respectively. + +[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) +You are now ready to train or evaluate with the ImageNet data set. + +## How to Train from Scratch + +**WARNING** Training an Inception v3 network from scratch is a computationally +intensive task and depending on your compute setup may take several days or +even weeks. + +*Before proceeding* please read the [Convolutional Neural +Networks] (https://www.tensorflow.org/tutorials/deep_cnn/index.html) +tutorial in particular focus on +[Training a Model Using Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#training-a-model-using-multiple-gpu-cards) +. The model training method is nearly identical to that +described in the CIFAR-10 multi-GPU model training. Briefly, the model training + +* Places an individual model replica on each GPU. Split the batch +across the GPUs. +* Updates model parameters synchronously by waiting for all GPUs to finish +processing a batch of data. + +The training procedure is encapsulated by this diagram of how operations and +variables are placed on CPU and GPUs respecitvely. + +
+ +
+ +Each tower computes the gradients for a portion of the batch and the gradients +are combined and averaged across the multiple towers in order to provide a +single update of the Variables stored on the CPU. + +A crucial aspect of training a network of this size is *training speed* in terms +of wall-clock time. The training speed is dictated by many factors -- most +importantly the batch size and the learning rate schedule. Both of these +parameters are heavily coupled to the hardware set up. + +Generally speaking, a batch size is a difficult parameter to tune as it requires +balancing memory demands of the model, memory available on the GPU and speed +of computation. Generally speaking, employing larger batch sizes leads to +more efficient computation and potentially more efficient training steps. + +We have tested several hardware setups for training this model from scratch but +we emphasize that depending your hardware set up, you may need to adapt the +batch size and learning rate schedule. + +Please see the comments in `inception_train.py` for a few selected learning rate +plans based on some selected hardware setups. + +To train this model, you simply need to specify the following: + +```shell +# Build the training binary to run on a GPU. If you do not have a GPU, +# then exclude '--config=cuda' +bazel build -c opt --config=cuda inception/imagenet_train + +# run it +bazel-bin/inception/imagenet_train.py --num_gpus=1 --batch_size=32 --train_dir=/tmp/imagenet_train --data_dir=/tmp/imagenet_data +``` + +The model reads in the ImageNet training data from `--data_dir`. If you followed +the instructions in [Getting Started](#getting-started), then set +`--data_dir="${DATA_DIR}"`. The script assumes that there exists a set of +sharded TFRecord files containing the ImageNet data. If you have not created +TFRecord files, please refer to [Getting Started](#getting-started) + +Here is the output of the above command line when running on a Tesla K40c: + +```shell +2016-03-07 12:24:59.922898: step 0, loss = 13.11 (5.3 examples/sec; 6.064 sec/batch) +2016-03-07 12:25:55.206783: step 10, loss = 13.71 (9.4 examples/sec; 3.394 sec/batch) +2016-03-07 12:26:28.905231: step 20, loss = 14.81 (9.5 examples/sec; 3.380 sec/batch) +2016-03-07 12:27:02.699719: step 30, loss = 14.45 (9.5 examples/sec; 3.378 sec/batch) +2016-03-07 12:27:36.515699: step 40, loss = 13.98 (9.5 examples/sec; 3.376 sec/batch) +2016-03-07 12:28:10.220956: step 50, loss = 13.92 (9.6 examples/sec; 3.327 sec/batch) +2016-03-07 12:28:43.658223: step 60, loss = 13.28 (9.6 examples/sec; 3.350 sec/batch) +... +``` + +This example highlights several important points: + +* A log entry is printed every 10 step and the line includes the +total loss (starts around 13.0-14.0) and the speed of processing in terms +of throughput (examples / sec) and batch speed (sec/batch). + +* The first step in training is always slow. The primary reason for the slow +speed is that during the first step of training, the preprocessing queue must +first fill up the several thousand example images in order to reach its minimum +capacity before training starts. + +The number of GPU devices is specified by `--num_gpus` (which defaults to 1). +Specifying `--num_gpus` greater then 1 splits the batch evenly split across +the GPU cards. + +```shell +# Build the training binary to run on a GPU. If you do not have a GPU, +# then exclude '--config=cuda' +bazel build -c opt --config=cuda inception/imagenet_train + +# run it +bazel-bin/inception/imagenet_train --num_gpus=2 --batch_size=64 --train_dir=/tmp/imagenet_train +``` + +This model splits the batch of 64 images across 2 GPUs and calculates +the average gradient by waiting for both GPUs to finish calculating the +gradients from their respective data (See diagram above). Generally speaking, +using larger numbers of GPUs leads to higher throughput as well as the +opportunity to use larger batch sizes. In turn, larger batch sizes imply +better estimates of the gradient enabling the usage of higher learning rates. +In summary, using more GPUs results in simply faster training speed. + +Note that selecting a batch size is a difficult parameter to tune as it requires +balancing memory demands of the model, memory available on the GPU and speed +of computation. Generally speaking, employing larger batch sizes leads to +more efficient computation and potentially more efficient training steps. + +Note that there is considerable noise in the loss function on individual steps +in the previous log. Because of this noise, it is difficult to discern how well +a model is learning. The solution to the last problem is to launch TensorBoard +pointing to the directory containing the events log. + +```shell +tensorboard --logdir=/tmp/imagenet_train +``` + +TensorBoard has access to the many Summaries produced by the model that +describe multitudes of statistics tracking the model behavior and the quality +of the learned model. In particular, TensorBoard tracks a exponentially smoothed +version of the loss. In practice, it is far easier to judge how well a model +learns by monitoring the smoothed version of the loss. + +## How to Evaluate + +Evaluating an Inception v3 model on the ImageNet 2012 validation data set +requires running a separate binary. + +The evaluation procedure is nearly identical to [Evaluating a Model] +(https://www.tensorflow.org/tutorials/deep_cnn/index.html#evaluating-a-model) +described in the [Convolutional Neural Network](https://www.tensorflow.org/tutorials/deep_cnn/index.html) +tutorial. + +**WARNING** Be careful not to run the evaluation and training binary on the +same GPU or else you might run out of memory. Consider running the evaluation on +a separate GPU if available or suspending the training binary while running +the evaluation on the same GPU. + +Briefly, one can evaluate the model by running: + +```shell +# Build the training binary to run on a GPU. If you do not have a GPU, +# then exclude '--config=cuda' +bazel build -c opt --config=cuda inception/imagenet_eval + +# run it +bazel-bin/inception/imagenet_eval --checkpoint_dir=/tmp/imagenet_train --eval_dir=/tmp/imagenet_eval +``` + +Note that we point ``--checkpoint_dir`` to the location of the checkpoints +saved by `inception_train.py` above. Running the above command results in the +following output: + +```shell +2016-02-17 22:32:50.391206: precision @ 1 = 0.735 +... +``` + +The script calculates the precision @ 1 over the entire validation data +periodically. The precision @ 1 measures the how often the highest scoring +prediction from the model matched the ImageNet label -- in this case, 73.5%. +If you wish to run the eval just once and not periodically, append the +`--run_once` option. + +Much like the training script, `imagenet_eval.py` also +exports summaries that may be visualized in TensorBoard. These summaries +calculate additional statistics on the predictions (e.g. recall @ 5) as well +as monitor the statistics of the model activations and weights during +evaluation. + +## How to Fine-Tune a Pre-Trained Model on a New Task + +### Getting Started +Much like training the ImageNet model we must first convert a new data set to +the sharded TFRecord format which each entry is a serialized `tf.Example` proto. + +We have provided a script demonstrating how to do this for small data set of +of a few thousand flower images spread across 5 labels: + +```shell +daisy, dandelion, roses, sunflowers, tulips +``` +There is a single automated script that downloads the data set and converts +it to the TFRecord format. Much like the ImageNet data set, each record in the +TFRecord format is a serialized `tf.Example` proto whose entries include +a JPEG-encoded string and an integer label. Please see +[`parse_example_proto`](image_processing.py) for details. + +The script just takes a few minutes to run depending your network connection +speed for downloading and processing the images. Your hard disk requires 200MB +of free storage. Here we select `DATA_DIR=$HOME/flowers-data` as such a +location but feel free to edit accordingly. + +```shell +# location of where to place the flowers data +FLOWERS_DATA_DIR=$HOME/flowers-data + +# build the preprocessing script. +bazel build -c opt inception/download_and_preprocess_flowers + +# run it +bazel-bin/inception/download_and_preprocess_flowers "${FLOWERS_DATA_DIR}$" +``` + +If the script runs successfully, the final line of the terminal output should +look like: + +```shell +2016-02-24 20:42:25.067551: Finished writing all 3170 images in data set. +``` + +When the script finishes you will find 2 shards for the training and validation +files in the `DATA_DIR`. The files will match the patterns +`train-????-of-00001` and `validation-?????-of-00001`, respectively. + +**NOTE** If you wish to prepare a custom image data set for transfer learning, +you will need to invoke [`build_image_data.py`](data/build_image_data.py) +on your custom data set. +Please see the associated options and assumptions behind this script by reading +the comments section of [`build_image_data.py`](data/build_image_data.py). + +The second piece you will need is a trained Inception v3 image model. You have +the option of either training one yourself (See +[How to Train from Scratch](#how-to-train-from-scratch) for details) or you can +download a pre-trained model like so: + +```shell +# location of where to place the Inception v3 model +DATA_DIR=$HOME/inception-v3-model +cd ${DATA_DIR} + +# download the Inception v3 model +curl -O http://download.tensorflow.org/models/image/imagenet/inception-v3-2016-03-01.tar.gz +tar xzf inception-v3-2016-03-01.tar.gz + +# this will create a directory called inception-v3 which contains the following files. +> ls inception-v3 +README.txt +checkpoint +model.ckpt-157585 +``` + +[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) +You are now ready to fine-tune your pre-trained Inception v3 model +with the flower data set. + +### How to Retrain a Trained Model on the Flowers Data + +We are now ready to fine-tune a pre-trained Inception-v3 model on +the flowers data set. This requires two distinct changes to our training +procedure: + +1. Build the exact same model as previously except we change the number +of labels in the final classification layer. + +2. Restore all weights from the pre-trained Inception-v3 except for the +final classification layer; this will get randomly initialized instead. + + + +We can perform these two operations by specifying two flags: +`--pretrained_model_checkpoint_path` and `--fine_tune`. +The first flag is a string that points to the path of a pre-trained Inception-v3 +model. If this flag is specified, it will load the entire model from the +checkpoint before the script begins training. + +The second flag `--fine_tune` is a boolean that indicates whether the last +classification layer should be randomly initialized or restored. +You may set this flag to false +if you wish to continue training a pre-trained model from a checkpoint. If you +set this flag to true, you can train a new classification layer from scratch. + +In order to understand how `--fine_tune` works, please see the discussion +on `Variables` in the TensorFlow-Slim [`README.md`](slim/README.md). + +Putting this all together you can retrain a pre-trained Inception-v3 model +on the flowers data set with the following command. + +```shell +# Build the training binary to run on a GPU. If you do not have a GPU, +# then exclude '--config=cuda' +bazel build -c opt --config=cuda inception/flowers_train + +# Path to the downloaded Inception-v3 model. +MODEL_PATH="${INCEPTION_MODEL_DIR}/model.ckpt-157585" + +# Directory where the flowers data resides. +FLOWERS_DATA_DIR=/tmp/flowers-data/ + +# Directory where to save the checkpoint and events files. +TRAIN_DIR=/tmp/flowers_train/ + +# Run the fine-tuning on the flowers data set starting from the pre-trained +# Imagenet-v3 model. +bazel-bin/inception/flowers_train \ + --train_dir="${TRAIN_DIR}" \ + --data_dir="${FLOWERS_DATA_DIR}" \ + --pretrained_model_checkpoint_path="${MODEL_PATH}" \ + --fine_tune=True \ + --initial_learning_rate=0.001 \ + --input_queue_memory_factor=1 +``` + +We have added a few extra options to the training procedure. + +* Fine-tuning a model a separate data set requires significantly lowering the +initial learning rate. We set the initial learning rate to 0.001. +* The flowers data set is quite small so we shrink the size of the shuffling +queue of examples. See [Adjusting Memory Demands](#adjusting-memory-demands) +for more details. + +The training script will only reports the loss. To evaluate the quality of the +fine-tuned model, you will need to run `flowers_eval`: + + +```shell +# Build the training binary to run on a GPU. If you do not have a GPU, +# then exclude '--config=cuda' +bazel build -c opt --config=cuda inception/flowers_eval + +# Directory where we saved the fine-tuned checkpoint and events files. +TRAIN_DIR=/tmp/flowers_train/ + +# Directory where the flowers data resides. +FLOWERS_DATA_DIR=/tmp/flowers-data/ + +# Directory where to save the evaluation events files. +EVAL_DIR=/tmp/flowers_eval/ + +# Evaluate the fine-tuned model on a hold-out of the flower data set. +blaze-bin/third_party/tensorflow_models/inception/flowers_eval \ + --eval_dir="${EVAL_DIR}" \ + --data_dir="${FLOWERS_DATA_DIR}" \ + --subset=validation \ + --num_examples=500 \ + --checkpoint_dir="${TRAIN_DIR}" \ + --input_queue_memory_factfor=1 \ + --run_once +``` + +We find that the evaluation arrives at roughly 93.4% precision@1 after the +model has been running for 2000 steps. + +```shell +Succesfully loaded model from /tmp/flowers/model.ckpt-1999 at step=1999. +2016-03-01 16:52:51.761219: starting evaluation on (validation). +2016-03-01 16:53:05.450419: [20 batches out of 20] (36.5 examples/sec; 0.684sec/batch) +2016-03-01 16:53:05.450471: precision @ 1 = 0.9340 recall @ 5 = 0.9960 [500 examples] +``` + + +## How to Construct a New Dataset for Retraining + +One can use the existing scripts supplied with this model to build a new +dataset for training or fine-tuning. The main script to employ is +[`build_image_data.py`](./build_image_data.py). Briefly, +this script takes a structured +directory of images and converts it to a sharded `TFRecord` that can be read +by the Inception model. + +In particular, you will need to create a directory of training images +that reside within `$TRAIN_DIR` and `$VALIDATION_DIR` arranged as such: + +```shell + $TRAIN_DIR/dog/image0.jpeg + $TRAIN_DIR/dog/image1.jpg + $TRAIN_DIR/dog/image2.png + ... + $TRAIN_DIR/cat/weird-image.jpeg + $TRAIN_DIR/cat/my-image.jpeg + $TRAIN_DIR/cat/my-image.JPG + ... + $VALIDATION_DIR/dog/imageA.jpeg + $VALIDATION_DIR/dog/imageB.jpg + $VALIDATION_DIR/dog/imageC.png + ... + $VALIDATION_DIR/cat/weird-image.PNG + $VALIDATION_DIR/cat/that-image.jpg + $VALIDATION_DIR/cat/cat.JPG + ... +``` +Each sub-directory in `$TRAIN_DIR` and `$VALIDATION_DIR` corresponds to a +unique label for the images that reside within that sub-directory. The images +may be JPEG or PNG images. We do not support other images types currently. + +Once the data is arranged in this directory structure, we can run +`build_image_data.py` on the data to generate the sharded `TFRecord` dataset. +Each entry of the `TFRecord` is a serialized `tf.Example` protocol buffer. +A complete list of information contained in the `tf.Example` is described +in the comments of `build_image_data.py`. + +To run `build_image_data.py`, you can run the following command line: + +```shell +# location to where to save the TFRecord data. +OUTPUT_DIRECTORY=$HOME/my-custom-data/ + +# build the preprocessing script. +bazel build -c opt inception/build_image_data + +# convert the data. +bazel-bin/inception/build_image_data \ + --train_directory="${TRAIN_DIR}" \ + --validation_directory="${VALIDATION_DIR}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --labels_file="${LABELS_FILE}" \ + --train_shards=128 \ + --validation_shards=24 \ + --num_threads=8 +``` +where the `$OUTPUT_DIRECTORY` is the location of the sharded `TFRecords`. The +`$LABELS_FILE` will be a text file that is outputted by the script that +provides a list of all of the labels. For instance, in the case flowers data +set, the `$LABELS_FILE` contained the following data: + +```shell +daisy +dandelion +roses +sunflowers +tulips +``` + +Note that each row of each label corresponds with the entry in the final +classifier in the model. That is, the `daisy` corresponds to the classifier +for entry `1`; `dandelion` is entry `2`, etc. We skip label `0` as a +background class. + +After running this script produces files that look like the following: + +```shell + $TRAIN_DIR/train-00000-of-00024 + $TRAIN_DIR/train-00001-of-00024 + ... + $TRAIN_DIR/train-00023-of-00024 + +and + + $VALIDATION_DIR/validation-00000-of-00008 + $VALIDATION_DIR/validation-00001-of-00008 + ... + $VALIDATION_DIR/validation-00007-of-00008 +``` +where 24 and 8 are the number of shards specified for each +dataset, respectively. Generally speaking, we aim for selecting the number +of shards such that roughly 1024 images reside in each shard. One this +data set is built you are ready to train or fine-tune an Inception model +on this data set. + +## Practical Considerations for Training a Model + +The model architecture and training procedure is heavily dependent on the +hardware used to train the model. If you wish to train or fine-tune this +model on your machine **you will need to adjust and empirically determine +a good set of training hyper-parameters for your setup**. What follows are +some general considerations for novices. + +### Finding Good Hyperparameters + +Roughly 5-10 hyper-parameters govern the speed at which a network is trained. +In addition to `--batch_size` and `--num_gpus`, there are several constants +defined in [inception_train.py](./inception_train.py) which dictate the +learning schedule. + +```shell +RMSPROP_DECAY = 0.9 # Decay term for RMSProp. +MOMENTUM = 0.9 # Momentum in RMSProp. +RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. +INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. +NUM_EPOCHS_PER_DECAY = 30.0 # Epochs after which learning rate decays. +LEARNING_RATE_DECAY_FACTOR = 0.16 # Learning rate decay factor. +``` + +There are many papers tha discuss the various tricks and trade-offs associated +with training a model with stochastic gradient descent. For those new to the +field, some great references are: + +* Y Bengio, [Practical recommendations for gradient-based training of deep architectures](http://arxiv.org/abs/1206.5533) +* I Goodfellow, Y Bengio and A Courville, [Deep Learning](http://www.deeplearningbook.org/) + +What follows is a summary of some general advice for identifying appropriate +model hyper-parameters in the context of this particular +model training setup. Namely, +this library provides *sycnhronous* updates to model parameters based on +batch-splitting the model across multiple GPUs. + +* Higher learning rates leads to faster training. Too high of learning rate +leads to instability and will cause model parameters to diverge to infinity +or NaN. + +* Larger batch sizes lead to higher quality estimates of the gradient and +permit training the model with higher learning rates. + +* Often the GPU memory is a bottleneck that prevents employing larger batch +sizes. Employing more GPUs allows one to user larger batch sizes because +this model splits the batch across the GPUs. + +**NOTE** If one wishes to train this model with *asynchronous* gradient updates, +one will need to substantially alter this model and new considerations need to +be factored into hyperparameter tuning. +See [Large Scale Distributed Deep Networks](http://research.google.com/archive/large_deep_networks_nips2012.html) +for a discussion in this domain. + +### Adjusting Memory Demands + +Training this model has large memory demands in terms of the CPU and GPU. Let's +discuss each item in turn. + +GPU memory is relatively small compared to CPU memory. Two items dictate the +amount of GPU memory employed -- model architecture and batch size. Assuming +that you keep the model architecture fixed, the sole parameter governing the +GPU demand is the batch size. A good rule of thumb is to try employ as large +of batch size as will fit on the GPU. + +If you run out of GPU memory, either lower the `--batch_size` or employ more +GPUs on your desktop. The model performs batch-splitting across GPUs, thus N +GPUs can handle N times the batch size of 1 GPU. + +The model requires a large amount of CPU memory as well. We have tuned the model +to employ about ~40GB of CPU memory. Thus, having access to 64 or 128 GB of +CPU memory would be ideal. + +If that is not possible, you can tune down the memory demands of the model +via lowering `--input_queue_memory_factor`. Images are preprocessed +asyncronously with respect to the main training across +`--num_preprocess_threads` threads. The preprocessed images are stored in +shuffling queue in which each GPU performs a dequeue operation in order +to receive a `batch_size` worth of images. + +In order to guarantee good shuffling across the data, we maintain a large +shuffling queue of 1024 x `input_queue_memory_factor` images. For the current +model architecture, this corresponds to 16GB of CPU memory. You may lower +`input_queue_memory_factor` in order to decrease the memory footprint. Keep +in mind though that lowering this value drastically may result in a model +with slighlty lower predictive accuracy when training from scratch. Please +see comments in [`image_processing.py`](./image_processing.py) for more details. + +## Troubleshooting + +#### The model runs out of CPU memory. + +In lieu of buying more CPU memory, an easy fix is to +decrease `--input_queue_memory_factor`. See +[Adjusting Memory Demands](#adjusting-memory-demands). + + +#### The model runs out of GPU memory. + +The data is not able to fit on the GPU card. The simplest solution is to +decrease the batch size of the model. Otherwise, you will need to think about +a more sophisticated method for specifying the training which cuts up the model +across multiple `session.run()` calls or partitions the model across multiple +GPUs. See [Using GPUs](https://www.tensorflow.org/versions/r0.7/how_tos/using_gpu/index.html) +and +[Adjusting Memory Demands](#adjusting-memory-demands) +for more information. + +#### The model training results in NaN's. + +The learning rate of the model is too high. Turn down your learning rate. + +#### I wish to train a model with a different image size. + +The simplest solution is to artificially resize your images to `299x299` +pixels. See +[Images](https://www.tensorflow.org/versions/r0.7/api_docs/python/image.html) +section for many resizing, cropping and padding methods. +Note that the entire model architecture is predicated on a `299x299` image, +thus if you wish to change the input image size, then you may need to redesign +the entire model architecture. + +#### What hardware specification are these hyper-parameters targeted for? + +We targeted a desktop with 128GB of CPU ram connected to 8 NVIDIA Tesla K40 +GPU cards but we have run this on desktops with 32GB of CPU ram and 1 NVIDIA +Tesla K40. You can get a sense of the various training configurations we +tested by reading the comments in [`inception_train.py`](./inception_train.py). + + + + + + diff --git a/inception/data/build_image_data.py b/inception/data/build_image_data.py new file mode 100644 index 000000000..d46f8bf47 --- /dev/null +++ b/inception/data/build_image_data.py @@ -0,0 +1,430 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts image data to TFRecords file format with Example protos. + +The image data set is expected to reside in JPEG files located in the +following directory structure. + + data_dir/label_0/image0.jpeg + data_dir/label_0/image1.jpg + ... + data_dir/label_1/weird-image.jpeg + data_dir/label_1/my-image.jpeg + ... + +where the sub-sirectory is the unique label associated with these images. + +This TensorFlow script converts the training and evaluation data into +a sharded data set consisting of TFRecord files + + train_directory/train-00000-of-01024 + train_directory/train-00001-of-01024 + ... + train_directory/train-00127-of-01024 + +and + + validation_directory/validation-00000-of-00128 + validation_directory/validation-00001-of-00128 + ... + validation_directory/validation-00127-of-00128 + +where we have selected 1024 and 128 shards for each data set. Each record +within the TFRecord file is a serialized Example proto. The Example proto +contains the following fields: + + image/encoded: string containing JPEG encoded image in RGB colorspace + image/height: integer, image height in pixels + image/width: integer, image width in pixels + image/colorspace: string, specifying the colorspace, always 'RGB' + image/channels: integer, specifying the number of channels, always 3 + image/format: string, specifying the format, always'JPEG' + + image/filename: string containing the basename of the image file + e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' + image/class/label: integer specifying the index in a classification layer. + The label ranges from [0, num_labels] where 0 is unused and left as + the background class. + image/class/text: string specifying the human-readable version of the label + e.g. 'dog' + +If you data set involves bounding boxes, please look at build_imagenet_data.py. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import os +import random +import sys +import threading + + +import numpy as np +import tensorflow as tf + +tf.app.flags.DEFINE_string('train_directory', '/tmp/', + 'Training data directory') +tf.app.flags.DEFINE_string('validation_directory', '/tmp/', + 'Validation data directory') +tf.app.flags.DEFINE_string('output_directory', '/tmp/', + 'Output data directory') + +tf.app.flags.DEFINE_integer('train_shards', 2, + 'Number of shards in training TFRecord files.') +tf.app.flags.DEFINE_integer('validation_shards', 2, + 'Number of shards in validation TFRecord files.') + +tf.app.flags.DEFINE_integer('num_threads', 2, + 'Number of threads to preprocess the images.') + +# The labels file contains a list of valid labels are held in this file. +# Assumes that the file contains entries as such: +# dog +# cat +# flower +# where each line corresponds to a label. We map each label contained in +# the file to an integer corresponding to the line number starting from 0. +tf.app.flags.DEFINE_string('labels_file', '', 'Labels file') + + +FLAGS = tf.app.flags.FLAGS + + +def _int64_feature(value): + """Wrapper for inserting int64 features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def _bytes_feature(value): + """Wrapper for inserting bytes features into Example proto.""" + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _convert_to_example(filename, image_buffer, label, text, height, width): + """Build an Example proto for an example. + + Args: + filename: string, path to an image file, e.g., '/path/to/example.JPG' + image_buffer: string, JPEG encoding of RGB image + label: integer, identifier for the ground truth for the network + text: string, unique human-readable, e.g. 'dog' + height: integer, image height in pixels + width: integer, image width in pixels + Returns: + Example proto + """ + + colorspace = 'RGB' + channels = 3 + image_format = 'JPEG' + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': _int64_feature(height), + 'image/width': _int64_feature(width), + 'image/colorspace': _bytes_feature(colorspace), + 'image/channels': _int64_feature(channels), + 'image/class/label': _int64_feature(label), + 'image/class/text': _bytes_feature(text), + 'image/format': _bytes_feature(image_format), + 'image/filename': _bytes_feature(os.path.basename(filename)), + 'image/encoded': _bytes_feature(image_buffer)})) + return example + + +class ImageCoder(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self): + # Create a single Session to run all image coding calls. + self._sess = tf.Session() + + # Initializes function that converts PNG to JPEG data. + self._png_data = tf.placeholder(dtype=tf.string) + image = tf.image.decode_png(self._png_data, channels=3) + self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that decodes RGB JPEG data. + self._decode_jpeg_data = tf.placeholder(dtype=tf.string) + self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) + + def png_to_jpeg(self, image_data): + return self._sess.run(self._png_to_jpeg, + feed_dict={self._png_data: image_data}) + + def decode_jpeg(self, image_data): + image = self._sess.run(self._decode_jpeg, + feed_dict={self._decode_jpeg_data: image_data}) + assert len(image.shape) == 3 + assert image.shape[2] == 3 + return image + + +def _is_png(filename): + """Determine if a file contains a PNG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a PNG. + """ + return '.png' in filename + + +def _process_image(filename, coder): + """Process a single image file. + + Args: + filename: string, path to an image file e.g., '/path/to/example.JPG'. + coder: instance of ImageCoder to provide TensorFlow image coding utils. + Returns: + image_buffer: string, JPEG encoding of RGB image. + height: integer, image height in pixels. + width: integer, image width in pixels. + """ + # Read the image file. + image_data = tf.gfile.FastGFile(filename, 'r').read() + + # Convert any PNG to JPEG's for consistency. + if _is_png(filename): + print('Converting PNG to JPEG for %s' % filename) + image_data = coder.png_to_jpeg(image_data) + + # Decode the RGB JPEG. + image = coder.decode_jpeg(image_data) + + # Check that image converted to RGB + assert len(image.shape) == 3 + height = image.shape[0] + width = image.shape[1] + assert image.shape[2] == 3 + + return image_data, height, width + + +def _process_image_files_batch(coder, thread_index, ranges, name, filenames, + texts, labels, num_shards): + """Processes and saves list of images as TFRecord in 1 thread. + + Args: + coder: instance of ImageCoder to provide TensorFlow image coding utils. + thread_index: integer, unique batch to run index is within [0, len(ranges)). + ranges: list of pairs of integers specifying ranges of each batches to + analyze in parallel. + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + texts: list of strings; each string is human readable, e.g. 'dog' + labels: list of integer; each integer identifies the ground truth + num_shards: integer number of shards for this data set. + """ + # Each thread produces N shards where N = int(num_shards / num_threads). + # For instance, if num_shards = 128, and the num_threads = 2, then the first + # thread would produce shards [0, 64). + num_threads = len(ranges) + assert not num_shards % num_threads + num_shards_per_batch = int(num_shards / num_threads) + + shard_ranges = np.linspace(ranges[thread_index][0], + ranges[thread_index][1], + num_shards_per_batch + 1).astype(int) + num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] + + counter = 0 + for s in xrange(num_shards_per_batch): + # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' + shard = thread_index * num_shards_per_batch + s + output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) + output_file = os.path.join(FLAGS.output_directory, output_filename) + writer = tf.python_io.TFRecordWriter(output_file) + + shard_counter = 0 + files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) + for i in files_in_shard: + filename = filenames[i] + label = labels[i] + text = texts[i] + + image_buffer, height, width = _process_image(filename, coder) + + example = _convert_to_example(filename, image_buffer, label, + text, height, width) + writer.write(example.SerializeToString()) + shard_counter += 1 + counter += 1 + + if not counter % 1000: + print('%s [thread %d]: Processed %d of %d images in thread batch.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + print('%s [thread %d]: Wrote %d images to %s' % + (datetime.now(), thread_index, shard_counter, output_file)) + sys.stdout.flush() + shard_counter = 0 + print('%s [thread %d]: Wrote %d images to %d shards.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + +def _process_image_files(name, filenames, texts, labels, num_shards): + """Process and save list of images as TFRecord of Example protos. + + Args: + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + texts: list of strings; each string is human readable, e.g. 'dog' + labels: list of integer; each integer identifies the ground truth + num_shards: integer number of shards for this data set. + """ + assert len(filenames) == len(texts) + assert len(filenames) == len(labels) + + # Break all images into batches with a [ranges[i][0], ranges[i][1]]. + spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) + ranges = [] + threads = [] + for i in xrange(len(spacing) - 1): + ranges.append([spacing[i], spacing[i+1]]) + + # Launch a thread for each batch. + print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) + sys.stdout.flush() + + # Create a mechanism for monitoring when all threads are finished. + coord = tf.train.Coordinator() + + # Create a generic TensorFlow-based utility for converting all image codings. + coder = ImageCoder() + + threads = [] + for thread_index in xrange(len(ranges)): + args = (coder, thread_index, ranges, name, filenames, + texts, labels, num_shards) + t = threading.Thread(target=_process_image_files_batch, args=args) + t.start() + threads.append(t) + + # Wait for all the threads to terminate. + coord.join(threads) + print('%s: Finished writing all %d images in data set.' % + (datetime.now(), len(filenames))) + sys.stdout.flush() + + +def _find_image_files(data_dir, labels_file): + """Build a list of all images files and labels in the data set. + + Args: + data_dir: string, path to the root directory of images. + + Assumes that the image data set resides in JPEG files located in + the following directory structure. + + data_dir/dog/another-image.JPEG + data_dir/dog/my-image.jpg + + where 'dog' is the label associated with these images. + + labels_file: string, path to the labels file. + + The list of valid labels are held in this file. Assumes that the file + contains entries as such: + dog + cat + flower + where each line corresponds to a label. We map each label contained in + the file to an integer starting with the integer 0 corresponding to the + label contained in the first line. + + Returns: + filenames: list of strings; each string is a path to an image file. + texts: list of strings; each string is the class, e.g. 'dog' + labels: list of integer; each integer identifies the ground truth. + """ + print('Determining list of input files and labels from %s.' % data_dir) + unique_labels = [l.strip() for l in tf.gfile.FastGFile( + labels_file, 'r').readlines()] + + labels = [] + filenames = [] + texts = [] + + # Leave label index 0 empty as a background class. + label_index = 1 + + # Construct the list of JPEG files and labels. + for text in unique_labels: + jpeg_file_path = '%s/%s/*' % (data_dir, text) + matching_files = tf.gfile.Glob(jpeg_file_path) + + labels.extend([label_index] * len(matching_files)) + texts.extend([text] * len(matching_files)) + filenames.extend(matching_files) + + if not label_index % 100: + print('Finished finding files in %d of %d classes.' % ( + label_index, len(labels))) + label_index += 1 + + # Shuffle the ordering of all image files in order to guarantee + # random ordering of the images with respect to label in the + # saved TFRecord files. Make the randomization repeatable. + shuffled_index = range(len(filenames)) + random.seed(12345) + random.shuffle(shuffled_index) + + filenames = [filenames[i] for i in shuffled_index] + texts = [texts[i] for i in shuffled_index] + labels = [labels[i] for i in shuffled_index] + + print('Found %d JPEG files across %d labels inside %s.' % + (len(filenames), len(unique_labels), data_dir)) + return filenames, texts, labels + + +def _process_dataset(name, directory, num_shards, labels_file): + """Process a complete data set and save it as a TFRecord. + + Args: + name: string, unique identifier specifying the data set. + directory: string, root path to the data set. + num_shards: integer number of shards for this data set. + labels_file: string, path to the labels file. + """ + filenames, texts, labels = _find_image_files(directory, labels_file) + _process_image_files(name, filenames, texts, labels, num_shards) + + +def main(unused_argv): + assert not FLAGS.train_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') + assert not FLAGS.validation_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with ' + 'FLAGS.validation_shards') + print('Saving results to %s' % FLAGS.output_directory) + + # Run it! + _process_dataset('validation', FLAGS.validation_directory, + FLAGS.validation_shards, FLAGS.labels_file) + _process_dataset('train', FLAGS.train_directory, + FLAGS.train_shards, FLAGS.labels_file) + + +if __name__ == '__main__': + tf.app.run() diff --git a/inception/data/build_imagenet_data.py b/inception/data/build_imagenet_data.py new file mode 100644 index 000000000..77c5a816a --- /dev/null +++ b/inception/data/build_imagenet_data.py @@ -0,0 +1,702 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts ImageNet data to TFRecords file format with Example protos. + +The raw ImageNet data set is expected to reside in JPEG files located in the +following directory structure. + + data_dir/n01440764/ILSVRC2012_val_00000293.JPEG + data_dir/n01440764/ILSVRC2012_val_00000543.JPEG + ... + +where 'n01440764' is the unique synset label associated with +these images. + +The training data set consists of 1000 sub-directories (i.e. labels) +each containing 1200 JPEG images for a total of 1.2M JPEG images. + +The evaluation data set consists of 1000 sub-directories (i.e. labels) +each containing 50 JPEG images for a total of 50K JPEG images. + +This TensorFlow script converts the training and evaluation data into +a sharded data set consisting of 1024 and 128 TFRecord files, respectively. + + train_directory/train-00000-of-01024 + train_directory/train-00001-of-01024 + ... + train_directory/train-00127-of-01024 + +and + + validation_directory/validation-00000-of-00128 + validation_directory/validation-00001-of-00128 + ... + validation_directory/validation-00127-of-00128 + +Each validation TFRecord file contains ~390 records. Each training TFREcord +file contains ~1250 records. Each record within the TFRecord file is a +serialized Example proto. The Example proto contains the following fields: + + image/encoded: string containing JPEG encoded image in RGB colorspace + image/height: integer, image height in pixels + image/width: integer, image width in pixels + image/colorspace: string, specifying the colorspace, always 'RGB' + image/channels: integer, specifying the number of channels, always 3 + image/format: string, specifying the format, always'JPEG' + + image/filename: string containing the basename of the image file + e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' + image/class/label: integer specifying the index in a classification layer. + The label ranges from [1, 1000] where 0 is not used. + image/class/synset: string specifying the unique ID of the label, + e.g. 'n01440764' + image/class/text: string specifying the human-readable version of the label + e.g. 'red fox, Vulpes vulpes' + + image/object/bbox/xmin: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/xmax: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/ymin: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/ymax: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/label: integer specifying the index in a classification + layer. The label ranges from [1, 1000] where 0 is not used. Note this is + always identical to the image label. + +Note that the length of xmin is identical to the length of xmax, ymin and ymax +for each example. + +Running this script using 16 threads may take around ~2.5 hours on a HP Z420. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import os +import random +import sys +import threading + + +import numpy as np +import tensorflow as tf + +tf.app.flags.DEFINE_string('train_directory', '/tmp/', + 'Training data directory') +tf.app.flags.DEFINE_string('validation_directory', '/tmp/', + 'Validation data directory') +tf.app.flags.DEFINE_string('output_directory', '/tmp/', + 'Output data directory') + +tf.app.flags.DEFINE_integer('train_shards', 1024, + 'Number of shards in training TFRecord files.') +tf.app.flags.DEFINE_integer('validation_shards', 128, + 'Number of shards in validation TFRecord files.') + +tf.app.flags.DEFINE_integer('num_threads', 8, + 'Number of threads to preprocess the images.') + +# The labels file contains a list of valid labels are held in this file. +# Assumes that the file contains entries as such: +# n01440764 +# n01443537 +# n01484850 +# where each line corresponds to a label expressed as a synset. We map +# each synset contained in the file to an integer (based on the alphabetical +# ordering). See below for details. +tf.app.flags.DEFINE_string('labels_file', + 'imagenet_lsvrc_2015_synsets.txt', + 'Labels file') + +# This file containing mapping from synset to human-readable label. +# Assumes each line of the file looks like: +# +# n02119247 black fox +# n02119359 silver fox +# n02119477 red fox, Vulpes fulva +# +# where each line corresponds to a unique mapping. Note that each line is +# formatted as \t. +tf.app.flags.DEFINE_string('imagenet_metadata_file', + 'imagenet_metadata.txt', + 'ImageNet metadata file') + +# This file is the output of process_bounding_box.py +# Assumes each line of the file looks like: +# +# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 +# +# where each line corresponds to one bounding box annotation associated +# with an image. Each line can be parsed as: +# +# , , , , +# +# Note that there might exist mulitple bounding box annotations associated +# with an image file. +tf.app.flags.DEFINE_string('bounding_box_file', + './imagenet_2012_bounding_boxes.csv', + 'Bounding box file') + +FLAGS = tf.app.flags.FLAGS + + +def _int64_feature(value): + """Wrapper for inserting int64 features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def _float_feature(value): + """Wrapper for inserting float features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + +def _bytes_feature(value): + """Wrapper for inserting bytes features into Example proto.""" + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _convert_to_example(filename, image_buffer, label, synset, human, bbox, + height, width): + """Build an Example proto for an example. + + Args: + filename: string, path to an image file, e.g., '/path/to/example.JPG' + image_buffer: string, JPEG encoding of RGB image + label: integer, identifier for the ground truth for the network + synset: string, unique WordNet ID specifying the label, e.g., 'n02323233' + human: string, human-readable label, e.g., 'red fox, Vulpes vulpes' + bbox: list of bounding boxes; each box is a list of integers + specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to + the same label as the image label. + height: integer, image height in pixels + width: integer, image width in pixels + Returns: + Example proto + """ + xmin = [] + ymin = [] + xmax = [] + ymax = [] + for b in bbox: + assert len(b) == 4 + # pylint: disable=expression-not-assigned + [l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)] + # pylint: enable=expression-not-assigned + + colorspace = 'RGB' + channels = 3 + image_format = 'JPEG' + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': _int64_feature(height), + 'image/width': _int64_feature(width), + 'image/colorspace': _bytes_feature(colorspace), + 'image/channels': _int64_feature(channels), + 'image/class/label': _int64_feature(label), + 'image/class/synset': _bytes_feature(synset), + 'image/class/text': _bytes_feature(human), + 'image/object/bbox/xmin': _float_feature(xmin), + 'image/object/bbox/xmax': _float_feature(xmax), + 'image/object/bbox/ymin': _float_feature(ymin), + 'image/object/bbox/ymax': _float_feature(ymax), + 'image/object/bbox/label': _int64_feature([label] * len(xmin)), + 'image/format': _bytes_feature(image_format), + 'image/filename': _bytes_feature(os.path.basename(filename)), + 'image/encoded': _bytes_feature(image_buffer)})) + return example + + +class ImageCoder(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self): + # Create a single Session to run all image coding calls. + self._sess = tf.Session() + + # Initializes function that converts PNG to JPEG data. + self._png_data = tf.placeholder(dtype=tf.string) + image = tf.image.decode_png(self._png_data, channels=3) + self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that converts CMYK JPEG data to RGB JPEG data. + self._cmyk_data = tf.placeholder(dtype=tf.string) + image = tf.image.decode_jpeg(self._cmyk_data, channels=0) + self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that decodes RGB JPEG data. + self._decode_jpeg_data = tf.placeholder(dtype=tf.string) + self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) + + def png_to_jpeg(self, image_data): + return self._sess.run(self._png_to_jpeg, + feed_dict={self._png_data: image_data}) + + def cmyk_to_rgb(self, image_data): + return self._sess.run(self._cmyk_to_rgb, + feed_dict={self._cmyk_data: image_data}) + + def decode_jpeg(self, image_data): + image = self._sess.run(self._decode_jpeg, + feed_dict={self._decode_jpeg_data: image_data}) + assert len(image.shape) == 3 + assert image.shape[2] == 3 + return image + + +def _is_png(filename): + """Determine if a file contains a PNG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a PNG. + """ + # File list from: + # https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU + return 'n02105855_2933.JPEG' in filename + + +def _is_cmyk(filename): + """Determine if file contains a CMYK JPEG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a JPEG encoded with CMYK color space. + """ + # File list from: + # https://github.com/cytsai/ilsvrc-cmyk-image-list + blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG', + 'n02447366_23489.JPEG', 'n02492035_15739.JPEG', + 'n02747177_10752.JPEG', 'n03018349_4028.JPEG', + 'n03062245_4620.JPEG', 'n03347037_9675.JPEG', + 'n03467068_12171.JPEG', 'n03529860_11437.JPEG', + 'n03544143_17228.JPEG', 'n03633091_5218.JPEG', + 'n03710637_5125.JPEG', 'n03961711_5286.JPEG', + 'n04033995_2932.JPEG', 'n04258138_17003.JPEG', + 'n04264628_27969.JPEG', 'n04336792_7448.JPEG', + 'n04371774_5854.JPEG', 'n04596742_4225.JPEG', + 'n07583066_647.JPEG', 'n13037406_4650.JPEG'] + return filename.split('/')[-1] in blacklist + + +def _process_image(filename, coder): + """Process a single image file. + + Args: + filename: string, path to an image file e.g., '/path/to/example.JPG'. + coder: instance of ImageCoder to provide TensorFlow image coding utils. + Returns: + image_buffer: string, JPEG encoding of RGB image. + height: integer, image height in pixels. + width: integer, image width in pixels. + """ + # Read the image file. + image_data = tf.gfile.FastGFile(filename, 'r').read() + + # Clean the dirty data. + if _is_png(filename): + # 1 image is a PNG. + print('Converting PNG to JPEG for %s' % filename) + image_data = coder.png_to_jpeg(image_data) + elif _is_cmyk(filename): + # 22 JPEG images are in CMYK colorspace. + print('Converting CMYK to RGB for %s' % filename) + image_data = coder.cmyk_to_rgb(image_data) + + # Decode the RGB JPEG. + image = coder.decode_jpeg(image_data) + + # Check that image converted to RGB + assert len(image.shape) == 3 + height = image.shape[0] + width = image.shape[1] + assert image.shape[2] == 3 + + return image_data, height, width + + +def _process_image_files_batch(coder, thread_index, ranges, name, filenames, + synsets, labels, humans, bboxes, num_shards): + """Processes and saves list of images as TFRecord in 1 thread. + + Args: + coder: instance of ImageCoder to provide TensorFlow image coding utils. + thread_index: integer, unique batch to run index is within [0, len(ranges)). + ranges: list of pairs of integers specifying ranges of each batches to + analyze in parallel. + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + synsets: list of strings; each string is a unique WordNet ID + labels: list of integer; each integer identifies the ground truth + humans: list of strings; each string is a human-readable label + bboxes: list of bounding boxes for each image. Note that each entry in this + list might contain from 0+ entries corresponding to the number of bounding + box annotations for the image. + num_shards: integer number of shards for this data set. + """ + # Each thread produces N shards where N = int(num_shards / num_threads). + # For instance, if num_shards = 128, and the num_threads = 2, then the first + # thread would produce shards [0, 64). + num_threads = len(ranges) + assert not num_shards % num_threads + num_shards_per_batch = int(num_shards / num_threads) + + shard_ranges = np.linspace(ranges[thread_index][0], + ranges[thread_index][1], + num_shards_per_batch + 1).astype(int) + num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] + + counter = 0 + for s in xrange(num_shards_per_batch): + # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' + shard = thread_index * num_shards_per_batch + s + output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) + output_file = os.path.join(FLAGS.output_directory, output_filename) + writer = tf.python_io.TFRecordWriter(output_file) + + shard_counter = 0 + files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) + for i in files_in_shard: + filename = filenames[i] + label = labels[i] + synset = synsets[i] + human = humans[i] + bbox = bboxes[i] + + image_buffer, height, width = _process_image(filename, coder) + + example = _convert_to_example(filename, image_buffer, label, + synset, human, bbox, + height, width) + writer.write(example.SerializeToString()) + shard_counter += 1 + counter += 1 + + if not counter % 1000: + print('%s [thread %d]: Processed %d of %d images in thread batch.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + print('%s [thread %d]: Wrote %d images to %s' % + (datetime.now(), thread_index, shard_counter, output_file)) + sys.stdout.flush() + shard_counter = 0 + print('%s [thread %d]: Wrote %d images to %d shards.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + +def _process_image_files(name, filenames, synsets, labels, humans, + bboxes, num_shards): + """Process and save list of images as TFRecord of Example protos. + + Args: + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + synsets: list of strings; each string is a unique WordNet ID + labels: list of integer; each integer identifies the ground truth + humans: list of strings; each string is a human-readable label + bboxes: list of bounding boxes for each image. Note that each entry in this + list might contain from 0+ entries corresponding to the number of bounding + box annotations for the image. + num_shards: integer number of shards for this data set. + """ + assert len(filenames) == len(synsets) + assert len(filenames) == len(labels) + assert len(filenames) == len(humans) + assert len(filenames) == len(bboxes) + + # Break all images into batches with a [ranges[i][0], ranges[i][1]]. + spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) + ranges = [] + threads = [] + for i in xrange(len(spacing) - 1): + ranges.append([spacing[i], spacing[i+1]]) + + # Launch a thread for each batch. + print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) + sys.stdout.flush() + + # Create a mechanism for monitoring when all threads are finished. + coord = tf.train.Coordinator() + + # Create a generic TensorFlow-based utility for converting all image codings. + coder = ImageCoder() + + threads = [] + for thread_index in xrange(len(ranges)): + args = (coder, thread_index, ranges, name, filenames, + synsets, labels, humans, bboxes, num_shards) + t = threading.Thread(target=_process_image_files_batch, args=args) + t.start() + threads.append(t) + + # Wait for all the threads to terminate. + coord.join(threads) + print('%s: Finished writing all %d images in data set.' % + (datetime.now(), len(filenames))) + sys.stdout.flush() + + +def _find_image_files(data_dir, labels_file): + """Build a list of all images files and labels in the data set. + + Args: + data_dir: string, path to the root directory of images. + + Assumes that the ImageNet data set resides in JPEG files located in + the following directory structure. + + data_dir/n01440764/ILSVRC2012_val_00000293.JPEG + data_dir/n01440764/ILSVRC2012_val_00000543.JPEG + + where 'n01440764' is the unique synset label associated with these images. + + labels_file: string, path to the labels file. + + The list of valid labels are held in this file. Assumes that the file + contains entries as such: + n01440764 + n01443537 + n01484850 + where each line corresponds to a label expressed as a synset. We map + each synset contained in the file to an integer (based on the alphabetical + ordering) starting with the integer 1 corresponding to the synset + contained in the first line. + + The reason we start the integer labels at 1 is to reserve label 0 as an + unused background class. + + Returns: + filenames: list of strings; each string is a path to an image file. + synsets: list of strings; each string is a unique WordNet ID. + labels: list of integer; each integer identifies the ground truth. + """ + print('Determining list of input files and labels from %s.' % data_dir) + challenge_synsets = [l.strip() for l in + tf.gfile.FastGFile(labels_file, 'r').readlines()] + + labels = [] + filenames = [] + synsets = [] + + # Leave label index 0 empty as a background class. + label_index = 1 + + # Construct the list of JPEG files and labels. + for synset in challenge_synsets: + jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset) + matching_files = tf.gfile.Glob(jpeg_file_path) + + labels.extend([label_index] * len(matching_files)) + synsets.extend([synset] * len(matching_files)) + filenames.extend(matching_files) + + if not label_index % 100: + print('Finished finding files in %d of %d classes.' % ( + label_index, len(challenge_synsets))) + label_index += 1 + + # Shuffle the ordering of all image files in order to guarantee + # random ordering of the images with respect to label in the + # saved TFRecord files. Make the randomization repeatable. + shuffled_index = range(len(filenames)) + random.seed(12345) + random.shuffle(shuffled_index) + + filenames = [filenames[i] for i in shuffled_index] + synsets = [synsets[i] for i in shuffled_index] + labels = [labels[i] for i in shuffled_index] + + print('Found %d JPEG files across %d labels inside %s.' % + (len(filenames), len(challenge_synsets), data_dir)) + return filenames, synsets, labels + + +def _find_human_readable_labels(synsets, synset_to_human): + """Build a list of human-readable labels. + + Args: + synsets: list of strings; each string is a unique WordNet ID. + synset_to_human: dict of synset to human labels, e.g., + 'n02119022' --> 'red fox, Vulpes vulpes' + + Returns: + List of human-readable strings corresponding to each synset. + """ + humans = [] + for s in synsets: + assert s in synset_to_human, ('Failed to find: %s' % s) + humans.append(synset_to_human[s]) + return humans + + +def _find_image_bounding_boxes(filenames, image_to_bboxes): + """Find the bounding boxes for a given image file. + + Args: + filenames: list of strings; each string is a path to an image file. + image_to_bboxes: dictionary mapping image file names to a list of + bounding boxes. This list contains 0+ bounding boxes. + Returns: + List of bounding boxes for each image. Note that each entry in this + list might contain from 0+ entries corresponding to the number of bounding + box annotations for the image. + """ + num_image_bbox = 0 + bboxes = [] + for f in filenames: + basename = os.path.basename(f) + if basename in image_to_bboxes: + bboxes.append(image_to_bboxes[basename]) + num_image_bbox += 1 + else: + bboxes.append([]) + print('Found %d images with bboxes out of %d images' % ( + num_image_bbox, len(filenames))) + return bboxes + + +def _process_dataset(name, directory, num_shards, synset_to_human, + image_to_bboxes): + """Process a complete data set and save it as a TFRecord. + + Args: + name: string, unique identifier specifying the data set. + directory: string, root path to the data set. + num_shards: integer number of shards for this data set. + synset_to_human: dict of synset to human labels, e.g., + 'n02119022' --> 'red fox, Vulpes vulpes' + image_to_bboxes: dictionary mapping image file names to a list of + bounding boxes. This list contains 0+ bounding boxes. + """ + filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file) + humans = _find_human_readable_labels(synsets, synset_to_human) + bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes) + _process_image_files(name, filenames, synsets, labels, + humans, bboxes, num_shards) + + +def _build_synset_lookup(imagenet_metadata_file): + """Build lookup for synset to human-readable label. + + Args: + imagenet_metadata_file: string, path to file containing mapping from + synset to human-readable label. + + Assumes each line of the file looks like: + + n02119247 black fox + n02119359 silver fox + n02119477 red fox, Vulpes fulva + + where each line corresponds to a unique mapping. Note that each line is + formatted as \t. + + Returns: + Dictionary of synset to human labels, such as: + 'n02119022' --> 'red fox, Vulpes vulpes' + """ + lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines() + synset_to_human = {} + for l in lines: + if l: + parts = l.strip().split('\t') + assert len(parts) == 2 + synset = parts[0] + human = parts[1] + synset_to_human[synset] = human + return synset_to_human + + +def _build_bounding_box_lookup(bounding_box_file): + """Build a lookup from image file to bounding boxes. + + Args: + bounding_box_file: string, path to file with bounding boxes annotations. + + Assumes each line of the file looks like: + + n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 + + where each line corresponds to one bounding box annotation associated + with an image. Each line can be parsed as: + + , , , , + + Note that there might exist mulitple bounding box annotations associated + with an image file. This file is the output of process_bounding_boxes.py. + + Returns: + Dictionary mapping image file names to a list of bounding boxes. This list + contains 0+ bounding boxes. + """ + lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines() + images_to_bboxes = {} + num_bbox = 0 + num_image = 0 + for l in lines: + if l: + parts = l.split(',') + assert len(parts) == 5, ('Failed to parse: %s' % l) + filename = parts[0] + xmin = float(parts[1]) + ymin = float(parts[2]) + xmax = float(parts[3]) + ymax = float(parts[4]) + box = [xmin, ymin, xmax, ymax] + + if filename not in images_to_bboxes: + images_to_bboxes[filename] = [] + num_image += 1 + images_to_bboxes[filename].append(box) + num_bbox += 1 + + print('Successfully read %d bounding boxes ' + 'across %d images.' % (num_bbox, num_image)) + return images_to_bboxes + + +def main(unused_argv): + assert not FLAGS.train_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') + assert not FLAGS.validation_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with ' + 'FLAGS.validation_shards') + print('Saving results to %s' % FLAGS.output_directory) + + # Build a map from synset to human-readable label. + synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file) + image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file) + + # Run it! + _process_dataset('validation', FLAGS.validation_directory, + FLAGS.validation_shards, synset_to_human, image_to_bboxes) + _process_dataset('train', FLAGS.train_directory, FLAGS.train_shards, + synset_to_human, image_to_bboxes) + + +if __name__ == '__main__': + tf.app.run() diff --git a/inception/data/download_and_preprocess_flowers.sh b/inception/data/download_and_preprocess_flowers.sh new file mode 100755 index 000000000..bdf15083f --- /dev/null +++ b/inception/data/download_and_preprocess_flowers.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess the flowers data set. This data set +# provides a demonstration for how to perform fine-tuning (i.e. tranfer +# learning) from one model to a new data set. +# +# This script provides a demonstration for how to prepare an arbitrary +# data set for training an Inception v3 model. +# +# We demonstrate this with the flowers data set which consists of images +# of labeled flower images from 5 classes: +# +# daisy, dandelion, roses, sunflowers, tulips +# +# The final output of this script are sharded TFRecord files containing +# serialized Example protocol buffers. See build_image_data.py for +# details of how the Example protocol buffer contains image data. +# +# usage: +# ./download_and_preprocess_flowers.sh [data-dir] +set -e + +if [ -z "$1" ]; then + echo "usage download_and_preprocess_flowers.sh [data dir]" + exit +fi + +# Create the output and temporary directories. +DATA_DIR="${1%/}" +SCRATCH_DIR="${DATA_DIR}/raw-data/" +mkdir -p "${DATA_DIR}" +mkdir -p "${SCRATCH_DIR}" +WORK_DIR="$0.runfiles/inception" + +# Download the flowers data. +DATA_URL="http://download.tensorflow.org/example_images/flower_photos.tgz" +CURRENT_DIR=$(pwd) +cd "${DATA_DIR}" +TARBALL="flower_photos.tgz" +if [ ! -f ${TARBALL} ]; then + echo "Downloading flower data set." + wget -O ${TARBALL} "${DATA_URL}" +else + echo "Skipping download of flower data." +fi + +# Note the locations of the train and validation data. +TRAIN_DIRECTORY="${SCRATCH_DIR}train/" +VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/" + +# Expands the data into the flower_photos/ directory and rename it as the +# train directory. +tar xf flower_photos.tgz +rm -rf "${TRAIN_DIRECTORY}" "${VALIDATION_DIRECTORY}" +mv flower_photos "${TRAIN_DIRECTORY}" + +# Generate a list of 5 labels: daisy, dandelion, roses, sunflowers, tulips +LABELS_FILE="${SCRATCH_DIR}/labels.txt" +ls -1 "${TRAIN_DIRECTORY}" | grep -v 'LICENSE' | sed 's/\///' | sort > "${LABELS_FILE}" + +# Generate the validation data set. +while read LABEL; do + VALIDATION_DIR_FOR_LABEL="${VALIDATION_DIRECTORY}${LABEL}" + TRAIN_DIR_FOR_LABEL="${TRAIN_DIRECTORY}${LABEL}" + + # Move the first randomly selected 100 images to the validation set. + mkdir -p "${VALIDATION_DIR_FOR_LABEL}" + VALIDATION_IMAGES=$(ls -1 "${TRAIN_DIR_FOR_LABEL}" | shuf | head -100) + for IMAGE in ${VALIDATION_IMAGES}; do + mv -f "${TRAIN_DIRECTORY}${LABEL}/${IMAGE}" "${VALIDATION_DIR_FOR_LABEL}" + done +done < "${LABELS_FILE}" + +# Build the TFRecords version of the image data. +cd "${CURRENT_DIR}" +BUILD_SCRIPT="${WORK_DIR}/build_image_data" +OUTPUT_DIRECTORY="${DATA_DIR}" +"${BUILD_SCRIPT}" \ + --train_directory="${TRAIN_DIRECTORY}" \ + --validation_directory="${VALIDATION_DIRECTORY}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --labels_file="${LABELS_FILE}" diff --git a/inception/data/download_and_preprocess_imagenet.sh b/inception/data/download_and_preprocess_imagenet.sh new file mode 100755 index 000000000..f222d63cf --- /dev/null +++ b/inception/data/download_and_preprocess_imagenet.sh @@ -0,0 +1,101 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess ImageNet Challenge 2012 +# training and validation data set. +# +# The final output of this script are sharded TFRecord files containing +# serialized Example protocol buffers. See build_imagenet_data.py for +# details of how the Example protocol buffers contain the ImageNet data. +# +# The final output of this script appears as such: +# +# data_dir/train-00000-of-01024 +# data_dir/train-00001-of-01024 +# ... +# data_dir/train-00127-of-01024 +# +# and +# +# data_dir/validation-00000-of-00128 +# data_dir/validation-00001-of-00128 +# ... +# data_dir/validation-00127-of-00128 +# +# Note that this script may take several hours to run to completion. The +# conversion of the ImageNet data to TFRecords alone takes 2-3 hours depending +# on the speed of your machine. Please be patient. +# +# **IMPORTANT** +# To download the raw images, the user must create an account with image-net.org +# and generate a username and access_key. The latter two are required for +# downloading the raw images. +# +# usage: +# ./download_and_preprocess_imagenet.sh [data-dir] +set -e + +if [ -z "$1" ]; then + echo "usage download_and_preprocess_imagenet.sh [data dir]" + exit +fi + +# Create the output and temporary directories. +DATA_DIR="${1%/}" +SCRATCH_DIR="${DATA_DIR}/raw-data/" +mkdir -p "${DATA_DIR}" +mkdir -p "${SCRATCH_DIR}" +WORK_DIR="$0.runfiles/inception" + +# Download the ImageNet data. +LABELS_FILE="${WORK_DIR}/data/imagenet_lsvrc_2015_synsets.txt" +DOWNLOAD_SCRIPT="${WORK_DIR}/data/download_imagenet.sh" +"${DOWNLOAD_SCRIPT}" "${SCRATCH_DIR}" "${LABELS_FILE}" + +# Note the locations of the train and validation data. +TRAIN_DIRECTORY="${SCRATCH_DIR}train/" +VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/" + +# Preprocess the validation data by moving the images into the appropriate +# sub-directory based on the label (synset) of the image. +echo "Organizing the validation data into sub-directories." +PREPROCESS_VAL_SCRIPT="${WORK_DIR}/data/preprocess_imagenet_validation_data.py" +VAL_LABELS_FILE="${WORK_DIR}/data/imagenet_2012_validation_synset_labels.txt" + +"${PREPROCESS_VAL_SCRIPT}" "${VALIDATION_DIRECTORY}" "${VAL_LABELS_FILE}" + +# Convert the XML files for bounding box annotations into a single CSV. +echo "Extracting bounding box information from XML." +BOUNDING_BOX_SCRIPT="${WORK_DIR}/data/process_bounding_boxes.py" +BOUNDING_BOX_FILE="${SCRATCH_DIR}/imagenet_2012_bounding_boxes.csv" +BOUNDING_BOX_DIR="${SCRATCH_DIR}bounding_boxes/" + +"${BOUNDING_BOX_SCRIPT}" "${BOUNDING_BOX_DIR}" "${LABELS_FILE}" \ + | sort >"${BOUNDING_BOX_FILE}" +echo "Finished downloading and preprocessing the ImageNet data." + +# Build the TFRecords version of the ImageNet data. +BUILD_SCRIPT="${WORK_DIR}/build_imagenet_data" +OUTPUT_DIRECTORY="${DATA_DIR}" +IMAGENET_METADATA_FILE="${WORK_DIR}/data/imagenet_metadata.txt" + +"${BUILD_SCRIPT}" \ + --train_directory="${TRAIN_DIRECTORY}" \ + --validation_directory="${VALIDATION_DIRECTORY}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --imagenet_metadata_file="${IMAGENET_METADATA_FILE}" \ + --labels_file="${LABELS_FILE}" \ + --bounding_box_file="${BOUNDING_BOX_FILE}" diff --git a/inception/data/download_imagenet.sh b/inception/data/download_imagenet.sh new file mode 100755 index 000000000..966355469 --- /dev/null +++ b/inception/data/download_imagenet.sh @@ -0,0 +1,100 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download ImageNet Challenge 2012 training and validation data set. +# +# Downloads and decompresses raw images and bounding boxes. +# +# **IMPORTANT** +# To download the raw images, the user must create an account with image-net.org +# and generate a username and access_key. The latter two are required for +# downloading the raw images. +# +# usage: +# ./download_imagenet.sh [dirname] +set -e + +if [ "x$IMAGENET_ACCESS_KEY" == x -o "x$IMAGENET_USERNAME" == x ]; then + cat < ') + sys.exit(-1) + data_dir = sys.argv[1] + validation_labels_file = sys.argv[2] + + # Read in the 50000 synsets associated with the validation data set. + labels = [l.strip() for l in open(validation_labels_file).readlines()] + unique_labels = set(labels) + + # Make all sub-directories in the validation data dir. + for label in unique_labels: + labeled_data_dir = os.path.join(data_dir, label) + os.makedirs(labeled_data_dir) + + # Move all of the image to the appropriate sub-directory. + for i in xrange(len(labels)): + basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1) + original_filename = os.path.join(data_dir, basename) + if not os.path.exists(original_filename): + print('Failed to find: ' % original_filename) + sys.exit(-1) + new_filename = os.path.join(data_dir, labels[i], basename) + os.rename(original_filename, new_filename) diff --git a/inception/data/process_bounding_boxes.py b/inception/data/process_bounding_boxes.py new file mode 100755 index 000000000..b0ee6549c --- /dev/null +++ b/inception/data/process_bounding_boxes.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Process the ImageNet Challenge bounding boxes for TensorFlow model training. + +This script is called as + +process_bounding_boxes.py [synsets-file] + +Where is a directory containing the downloaded and unpacked bounding box +data. If [synsets-file] is supplied, then only the bounding boxes whose +synstes are contained within this file are returned. Note that the +[synsets-file] file contains synset ids, one per line. + +The script dumps out a CSV text file in which each line contains an entry. + n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 + +The entry can be read as: + , , , , + +The bounding box for contains two points (xmin, ymin) and +(xmax, ymax) specifying the lower-left corner and upper-right corner of a +bounding box in *relative* coordinates. + +The user supplies a directory where the XML files reside. The directory +structure in the directory is assumed to look like this: + +/nXXXXXXXX/nXXXXXXXX_YYYY.xml + +Each XML file contains a bounding box annotation. The script: + + (1) Parses the XML file and extracts the filename, label and bounding box info. + + (2) The bounding box is specified in the XML files as integer (xmin, ymin) and + (xmax, ymax) *relative* to image size displayed to the human annotator. The + size of the image displayed to the human annotator is stored in the XML file + as integer (height, width). + + Note that the displayed size will differ from the actual size of the image + downloaded from image-net.org. To make the bounding box annotation useable, + we convert bounding box to floating point numbers relative to displayed + height and width of the image. + + Note that each XML file might contain N bounding box annotations. + + Note that the points are all clamped at a range of [0.0, 1.0] because some + human annotations extend outside the range of the supplied image. + + See details here: http://image-net.org/download-bboxes + +(3) By default, the script outputs all valid bounding boxes. If a + [synsets-file] is supplied, only the subset of bounding boxes associated + with those synsets are outputted. Importantly, one can supply a list of + synsets in the ImageNet Challenge and output the list of bounding boxes + associated with the training images of the ILSVRC. + + We use these bounding boxes to inform the random distortion of images + supplied to the network. + +If you run this script successfully, you will see the following output +to stderr: +> Finished processing 544546 XML files. +> Skipped 0 XML files not in ImageNet Challenge. +> Skipped 0 bounding boxes not in ImageNet Challenge. +> Wrote 615299 bounding boxes from 544546 annotated images. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import glob +import os.path +import sys +import xml.etree.ElementTree as ET + + +class BoundingBox(object): + pass + + +def GetItem(name, root, index=0): + count = 0 + for item in root.iter(name): + if count == index: + return item.text + count += 1 + # Failed to find "index" occurrence of item. + return -1 + + +def GetInt(name, root, index=0): + return int(GetItem(name, root, index)) + + +def FindNumberBoundingBoxes(root): + index = 0 + while True: + if GetInt('xmin', root, index) == -1: + break + index += 1 + return index + + +def ProcessXMLAnnotation(xml_file): + """Process a single XML file containing a bounding box.""" + # pylint: disable=broad-except + try: + tree = ET.parse(xml_file) + except Exception: + print('Failed to parse: ' + xml_file, file=sys.stderr) + return None + # pylint: enable=broad-except + root = tree.getroot() + + num_boxes = FindNumberBoundingBoxes(root) + boxes = [] + + for index in xrange(num_boxes): + box = BoundingBox() + # Grab the 'index' annotation. + box.xmin = GetInt('xmin', root, index) + box.ymin = GetInt('ymin', root, index) + box.xmax = GetInt('xmax', root, index) + box.ymax = GetInt('ymax', root, index) + + box.width = GetInt('width', root) + box.height = GetInt('height', root) + box.filename = GetItem('filename', root) + '.JPEG' + box.label = GetItem('name', root) + + xmin = float(box.xmin) / float(box.width) + xmax = float(box.xmax) / float(box.width) + ymin = float(box.ymin) / float(box.height) + ymax = float(box.ymax) / float(box.height) + + # Some images contain bounding box annotations that + # extend outside of the supplied image. See, e.g. + # n03127925/n03127925_147.xml + # Additionally, for some bounding boxes, the min > max + # or the box is entirely outside of the image. + min_x = min(xmin, xmax) + max_x = max(xmin, xmax) + box.xmin_scaled = min(max(min_x, 0.0), 1.0) + box.xmax_scaled = min(max(max_x, 0.0), 1.0) + + min_y = min(ymin, ymax) + max_y = max(ymin, ymax) + box.ymin_scaled = min(max(min_y, 0.0), 1.0) + box.ymax_scaled = min(max(max_y, 0.0), 1.0) + + boxes.append(box) + + return boxes + +if __name__ == '__main__': + if len(sys.argv) < 2 or len(sys.argv) > 3: + print('Invalid usage\n' + 'usage: process_bounding_boxes.py [synsets-file]', + file=sys.stderr) + sys.exit(-1) + + xml_files = glob.glob(sys.argv[1] + '/*/*.xml') + print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]), + file=sys.stderr) + + if len(sys.argv) == 3: + labels = set([l.strip() for l in open(sys.argv[2]).readlines()]) + print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]), + file=sys.stderr) + else: + labels = None + + skipped_boxes = 0 + skipped_files = 0 + saved_boxes = 0 + saved_files = 0 + for file_index, one_file in enumerate(xml_files): + # Example: <...>/n06470073/n00141669_6790.xml + label = os.path.basename(os.path.dirname(one_file)) + + # Determine if the annotation is from an ImageNet Challenge label. + if labels is not None and label not in labels: + skipped_files += 1 + continue + + bboxes = ProcessXMLAnnotation(one_file) + assert bboxes is not None, 'No bounding boxes found in ' + one_file + + found_box = False + for bbox in bboxes: + if labels is not None: + if bbox.label != label: + # Note: There is a slight bug in the bounding box annotation data. + # Many of the dog labels have the human label 'Scottish_deerhound' + # instead of the synset ID 'n02092002' in the bbox.label field. As a + # simple hack to overcome this issue, we only exclude bbox labels + # *which are synset ID's* that do not match original synset label for + # the XML file. + if bbox.label in labels: + skipped_boxes += 1 + continue + + # Guard against improperly specified boxes. + if (bbox.xmin_scaled >= bbox.xmax_scaled or + bbox.ymin_scaled >= bbox.ymax_scaled): + skipped_boxes += 1 + continue + + # Note bbox.filename occasionally contains '%s' in the name. This is + # data set noise that is fixed by just using the basename of the XML file. + image_filename = os.path.splitext(os.path.basename(one_file))[0] + print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' % + (image_filename, + bbox.xmin_scaled, bbox.ymin_scaled, + bbox.xmax_scaled, bbox.ymax_scaled)) + + saved_boxes += 1 + found_box = True + if found_box: + saved_files += 1 + else: + skipped_files += 1 + + if not file_index % 5000: + print('--> processed %d of %d XML files.' % + (file_index + 1, len(xml_files)), + file=sys.stderr) + print('--> skipped %d boxes and %d XML files.' % + (skipped_boxes, skipped_files), file=sys.stderr) + + print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr) + print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files, + file=sys.stderr) + print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes, + file=sys.stderr) + print('Wrote %d bounding boxes from %d annotated images.' % + (saved_boxes, saved_files), + file=sys.stderr) + print('Finished.', file=sys.stderr) diff --git a/inception/dataset.py b/inception/dataset.py new file mode 100644 index 000000000..752c97e03 --- /dev/null +++ b/inception/dataset.py @@ -0,0 +1,103 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Small library that points to a data set. + +Methods of Data class: + data_files: Returns a python list of all (sharded) data set files. + num_examples_per_epoch: Returns the number of examples in the data set. + num_classes: Returns the number of classes in the data set. + reader: Return a reader for a single entry from the data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod +import os + + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +# Basic model parameters. +tf.app.flags.DEFINE_string('data_dir', '/tmp/mydata', + """Path to the processed data, i.e. """ + """TFRecord of Example protos.""") + + +class Dataset(object): + """A simple class for handling data sets.""" + __metaclass__ = ABCMeta + + def __init__(self, name, subset): + """Initialize dataset using a subset and the path to the data.""" + assert subset in self.available_subsets(), self.available_subsets() + self.name = name + self.subset = subset + + @abstractmethod + def num_classes(self): + """Returns the number of classes in the data set.""" + pass + # return 10 + + @abstractmethod + def num_examples_per_epoch(self): + """Returns the number of examples in the data subset.""" + pass + # if self.subset == 'train': + # return 10000 + # if self.subset == 'validation': + # return 1000 + + @abstractmethod + def download_message(self): + """Prints a download message for the Dataset.""" + pass + + def available_subsets(self): + """Returns the list of available subsets.""" + return ['train', 'validation'] + + def data_files(self): + """Returns a python list of all (sharded) data subset files. + + Returns: + python list of all (sharded) data set files. + Raises: + ValueError: if there are not data_files matching the subset. + """ + tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset) + data_files = tf.gfile.Glob(tf_record_pattern) + if not data_files: + print('No files found for dataset %s/%s at %s' % (self.name, + self.subset, + FLAGS.data_dir)) + + self.download_message() + exit(-1) + return data_files + + def reader(self): + """Return a reader for a single entry from the data set. + + See io_ops.py for details of Reader class. + + Returns: + Reader object that reads the data set. + """ + return tf.TFRecordReader() diff --git a/inception/flowers_data.py b/inception/flowers_data.py new file mode 100644 index 000000000..022b5234d --- /dev/null +++ b/inception/flowers_data.py @@ -0,0 +1,52 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Small library that points to the flowers data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from inception.dataset import Dataset + + +class FlowersData(Dataset): + """Flowers data set.""" + + def __init__(self, subset): + super(FlowersData, self).__init__('Flowers', subset) + + def num_classes(self): + """Returns the number of classes in the data set.""" + return 5 + + def num_examples_per_epoch(self): + """Returns the number of examples in the data subset.""" + if self.subset == 'train': + return 3170 + if self.subset == 'validation': + return 500 + + def download_message(self): + """Instruction to download and extract the tarball from Flowers website.""" + + print('Failed to find any Flowers %s files'% self.subset) + print('') + print('If you have already downloaded and processed the data, then make ' + 'sure to set --data_dir to point to the directory containing the ' + 'location of the sharded TFRecords.\n') + print('Please see README.md for instructions on how to build ' + 'the flowers dataset using download_and_preprocess_flowers.\n') diff --git a/inception/flowers_eval.py b/inception/flowers_eval.py new file mode 100644 index 000000000..ae3e9dc14 --- /dev/null +++ b/inception/flowers_eval.py @@ -0,0 +1,40 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to evaluate Inception on the flowers data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception import inception_eval +from inception.flowers_data import FlowersData + +FLAGS = tf.app.flags.FLAGS + + +def main(unused_argv=None): + dataset = FlowersData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.eval_dir): + tf.gfile.DeleteRecursively(FLAGS.eval_dir) + tf.gfile.MakeDirs(FLAGS.eval_dir) + inception_eval.evaluate(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/inception/flowers_train.py b/inception/flowers_train.py new file mode 100644 index 000000000..1f044a539 --- /dev/null +++ b/inception/flowers_train.py @@ -0,0 +1,41 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to train Inception on the flowers data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import tensorflow as tf + +from inception import inception_train +from inception.flowers_data import FlowersData + +FLAGS = tf.app.flags.FLAGS + + +def main(_): + dataset = FlowersData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.train_dir): + tf.gfile.DeleteRecursively(FLAGS.train_dir) + tf.gfile.MakeDirs(FLAGS.train_dir) + inception_train.train(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/inception/g3doc/inception_v3_architecture.png b/inception/g3doc/inception_v3_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..91fb734a104b2f63114ade7c8f9b2f95ce6334a6 GIT binary patch literal 346842 zcmeFZWl&sgwl<8rOCY$rySoN=ch|-tIKi!Pw**K6!6CRq;}!zJwb9@b+`fKh=A4;2 z!@U2$s_81KHhbUembI?EygOE1RURFM2n7lX3SCh_MiUAO(Gm&@rWXkYatHqKx&aCb zn$SU7T3u0EnnK+JVC&#)0|mv9Vr6cwsldW8Y=L8LK0Lw9h~nX^855JBX&%zkgBB5p zmXG!(c4Pn^2n{WUCIMBV(cA2VmOyb|&o<1W{JtZUnYK~jt-=d5_-Ok>F%AO4AR2{H zYey**=mff6HnqO@2%f?R`$w3>^&2c<(mpY0U092C=n7!g~bTZ*0EUr85Y?NHgj5aWZd|1XuAhl34-w||j|asGqkU+?{6ya@YW?EM#Z z|6P`Uy9*Jf7>Wq{{{lk{MJ_d?8VX7RN>N5q+YkDv^LG~M(EG1BzBTZ|b=Wit$y73| z=`xzL;On1;hAuaC{{Df3cK&Sv_tGyWm%ZF}lleJyd*cgYE=HP4tVVdW-x^cWTL55~ zRH!=G#ueVZ_tdVmuJ{sI@OYt2EA$2(G2;md34UwFR3Ue}#EtbVsPxDR5->Q)p>Q-5 z(Es-`TNmca!KLLm^VO$+qY0zU4#Um&<Xd7@jUw2~Ge_RCu1XlS5mdCJL-1RA6_7Kg#jvN#@F&XVv5mlSJ| zqZ41riSIb!KM@&aBz`z{a?oI6`1-gC1v6sG?98d24Zl>%G5L`}V#=&)pP(cjevYKh zR(-a1s>`P`A?#d2oULe+cWMthjyheJWT-3KYoVYkZ`h{! zto@vmB6KzN5k{Lr96?g&)e4}n{1eTb+?5C3DBx%E9cfc8W)N8GAxXseJ8@z$N#-Z) z8~<=QBZqz6W!DbzO5G?TV<)@?Y(~O>MF03J+xOSU&1QioSqgnu#qs+|z=RKG#TNuu za$lb|U@=bxAK!XC`@2L$tn4yud=3>lcj4M9_-<4K@0-f&68}oftR?wz8YO9#?&LG) zRs7)Dic!tey`5U{!Ay~in~YIHk!1wHEtSA&EPQiD?P<$Osk6W6!Lo|e`B!dCe1Q*c z80TGUYC(r$jj9$Nc=)OC$OX73v^q;Qy<=U?Ay;U!X45r=k;q~Bf(e2ach#oCG~3&5xMLQc{*7ay*Put@+Yh_J)DoX zNOT?nHX8bVF$p}co9eheXc!w>XNy*9wA2S8DPzYqq`|pD!sT;x7%$J)B6_z&x-LUAxTF|A9NP^M_ zZLC0;uUmDR`8(h`<^Zj(TYF4n$}1#m2x)VRY~U27A7+N@%!Y8b>J{Uo{dXR-~MlgJk0% zC<x&-P0!gGf)KKqLupV_-4)vEG_vzcKGhwW7ms6U_&} zE3~OdG96z)<5mKfpb}UDxB6;6V#Nc7wL>AgzrGjB$0q2#d$}NYniPHs9T%M;T5KlW zcLhzqFV~2i<0cw4oJR&ACM3z$MJh~;0md^K`?2>#hgr-G&+X7~gURy|SQtP1E&Ooj zk%crem|HjskKj@cS{HVZkLK8+z zw-FoCkHq?F#%0A2#zt{F#$Oah9;?$wV7YMM_`5=IzXQJ0%sSF>UDW#dM-pD5#MH2# z>V$~lF#^-h4Q!4eatTZ#D2XjB_az8&3G?v2OkB&}O33>vflBV}+^-?878)+KhS8GhxmUJR z2S_-^_Fa}lm6627y(6A56~m+snB-nHT^7ymC=Lhy5ZW}dBVcEI?rpG^BRt>Kr2OwGr!e7k2_%p~_dp%bU^z9D zWC1ORUE(=WzmayD!GppW3!-lA1p4UlGg6ZAhJlnygK_D!W68K>gd(h2Vy00`Ew&>v zwrTSUP*0>I!8*2&p$4G_XBUG}pu!%h5eX{;!OD?0oWZ1ZvF@U-GRc-O8WikycZ7)o zV4X_*n8kgM3X?8t9hdwZS4d`W#wEn)mCRnx7y8h)AsOE34?`I$*pbp>E+5fx-&tZw z&qazDlYbISWsHyot0dEYScqD7*O7XAc!9T*lBY0WpRMR>%4!8{*y*gmt1)i}hNK4Ic@wTNVuo|BSINUi(5nBFB9qh7qX}TGEabtbLP9$-I=<^* zgUKvxXwYuk+JF-ZNzFxvy`sx;7E0mhbh?cP)WeaLNT!{KP>~GdT?xs)aAFwdVx>rP z*^Z+2!Dgt#kC%{L#2ayH>BzJ+L@j0W*r$UQm|vn1-6xAetcbrJQe)jam-V&;ron{l zUu=@*kEe{GWu4Jom-8lHX$5ncrv8!U9ZY`waz<6B0+)xeET7Am<77j}ip=ad6wz0d zL|c1-abU%rEBWuDhG?FRIh+>b+kV`PVOY3yLi7h&#UC(Hg8gQM#{&7v)N;M0s`}Kn zoFTX6^`s+`OW$ND$S|bcteH~{(h}~@@umSe>V%gtKbb2YZ`zQ|iPLXoI7>i8*(rZw z{6H2^N1^awQVG~rLRNt{v~>btC2Kv)HxcRMc^{@J2CMqn1jq?CIf5NfEd2W1md? z_fGEZb>0;M0weBA2eoYlGHO(lG2c0YJd2zr#<95eUaa!(o*qZg7jsKe`B?+2cZ&yuEQ0M#L-0F86HI(emTiCo7X6N{PTQ9CdFhf4wfG`qu$V zq8mEhnTu|;bRZR} z;gSt+ziB8;1I?PDqa_mFW>*oa4%E_R|8Z-nb;A&5V_~-#xLHX;a*k zg#n&XSd$Td1x_Zhzi%N*a^{%5S>)r2bZD8+!Y_M0kO5@MNszr?++SnSBO`S>6@zGM( zRyFD6vfbYG(K^4d@N)5ftYW7h%;0@0T$sMTzDUlI-}6e&&cNqjU+5ga2cU_?P>xSK z=h7UT?NWhU!`v||eqzq|@UsVuhsX$Wx`EM8d|6lV1aAsMo(DdA_-G$u$ahNeuaR?n z!uI4iQkYp;S$TeXXrx}rua1Nm<3@F^q*q^LLYRzjdPe{VZP;p*HVl)LZwOJ})Ktvp zXaSKt&OoUQ7A(#_5ky0q#ZmgI9z0Lz8#%-B(kH z10s$gRfOp~%1sym>^B4i?4})l&yNr+O?BNLRIu~{Z&rQRJ8#y6#l_7?J#?O!1624* z4ec%ckt#`mGc%S(O#kl6gu=PvLc@Vb#y|axVRpgDNs8R}2XsI2nf4=MlF58}I?(GL zy?D3u&|(-(%;|4=Eka6(iMv4o0{?K+h(wm)91R{!gO6-xdLB;HwR#5NA_1* zF=h&%y*+6^ecqQBFJ6)dlH7sl#im`GJ)f9G(_QB6LDsg#OUh{vt^GgKd`cbknz!_V z0RzB@bHZ?i)MtT-`fb|$Dru76F;*l%(C{N;NH8-NosHh3TW52xu-|t(5O)r6P5ZjpqwPpFLk)MXE_YnF$oRhU7rp$u$UBpgvd00U2YKw^tanDQ*J{aS=%j(VV*`GHh~Q zGUm!{oG4Z}Dbkv%sn$qoHsN)%j=ZyjodGvu_xmM~>frmWMEiB#OCZIZdB zrzLd1AR;?VaoCLem;e_$E)X|;JeX?v82>$gx4xKIF=AB1eo#AH(6R62$J_4P0d}$L z1*@8l3l!DzXbjT(aXT^TeIwmZ*q2JH*$Uf?fU#ZQGns%otM2tF$LFC%Y@AwEBEZ~_ zy6HG>H$v?82wcuxRP64bF(Y_yN0Pk*; zTiKspl(8U%!B7j$*UaXMHB8G#DwbmGG{4IU{*cSOb&<0k*pU0LA)~2BnekkCutdu5 zg0&rgIcFky>PR!ll+#+e%mtpaphxm+Z`PR_ClDDJSxZ*7@l^SJ8pN?4(C{!vw@LkU zW*H~Y%8ZMDtDPHsRblqDfhbN9`#nfj)=x)AkFUG`dBO*jK5spQlXZf`oDy+s-SMZv zvZ#YZ%Ia`-j6+`rwO31^hw=h*I3d#Ni|L;cefHuX+Y0$_hM4Hahh?aY!g!F6vlnhX zc_G5oE7=ly6~c9J^_Rc<`ufy@FK|mY0l}AfAUAUY18O->8R}gE6L#SP+qxPYQ+oU! z9egFz&oR;1$!~ro_nxJ`F#1!XaFyh93KIgHtg_feB# z3#3P)MU9dnr&GfIOplHqf6kBb^(@AiL~KYyF7t(vXRNo*C6_9|a9Be|N|=u9**LN1 zfL}px?Dwk&qzEIX3e6k>QFyOEtlbb|pI|<3VE%|{NRGx(qI$sIGfM}vOxh;0pZc-D zP+{p3S;|mK9LP$@EXc+rtkUsqHKm9YU11Co zr(m-s^xzj*(K*TaQ~uH5ifw{rbfO(3)C(h2elUu+br;oUt!1u&Vd~p0iBYY|8q%6! zmr(*HT(vuB^UGIEb&EG#@-Y(X!}9v`^-ajj4SC>MZ)I8W`VLX@a4KiRc1P|SQL$Sp zQW}Vm$n9sMr#eiBN*WUh>|zQbl}4r?u3jotD>$jM79UIto#+XVmFLuGYoE)kRSm1; zNt+{UvCXfR&?KJ>?!JxhHb?5J#7j$ausIaIThB5JZmqX+-5XlqEHj45`O+T%Up<=|L@Oxg#j4iD?ty}b@41~I)-FQ$=BaZ%v1oRn_MCXSDF1qJl6e&)AnEm z5h@8tR9}g`*fhu<5^m_Jf68;FMS49HIn?w>$w!ZkQa7qLMt z4}ARk@~5cl<>{1MJXmBtKU1}dfqkW|TX0g&W@zO*ug~~kf%BE2@E7cZP&dUPl{UBW zEZLg&^rM(Oj$n`<46bLXs58$)cD*TiskpMi>=uAS>K^dR$puJeX+Sd&qTF0Rrh{W$ zpepRTN`033>+z>U$e7P>;QzXi|Kk7lkg9z|ArJe?nkdU3c)e&>6!Ls0{?H(P=p|&J zNz=v3WKDZe6|RV4p`>8kr-Ya2Uo~xLc+ix4`~zyt5KTLJO&|^~EEY8|{_RHfG!f7- z(Y6BVQyoL}Q|xNw(lEO6q)3|M#on+)M?A#r9OlufaHIPelNUHlR4I#8Wj3$ABJdR+7zIGa z3nO*Mrb}Gy?OMQT5CqI}Zq}^d+-v5?9*SmBHrHc#Tk0GMpv9WPao7~5mlG(?8m`u;S=n}>BRYDmVo$hpiO!36Ybg$aFHS9F)#6U{5^MDEvkUv?LP>|M9uX!76|S7B3)_wpce;UxL* zm@@Zwt)z*Q#x>$Z82$sHJs_I~hP*2xw!sbpiFdqlyW0azl}?A&#C5*t7!K*ULV zW+9tZ8bO?`-sTyIe4u9aHvSZ%dPmhl?q^(^m57uI*x(vOUg!$w#3`0wd(9VJqcJB5B7J^E&}k|cN-jsKu2*t2GSLDU-V@_e!UX8#l7G4rlxTDk>_R z^=-fSWB)sXlbeT1DWwUR^HTXeUR@^Sc=VNEY7IW0)gZsAweTTe(LVZpr$sl5ruH%d zH12{%GfUP%!7-rIw_dP2U}!Ts&X8F~5v(kOv?YSQKmo=JsZ^DTyiDgG!*NudDU*gc z5u*NohQyyaRa*=KNeH@aUUGOQ?k@L2t{aN5O<|iYiDZZYE5LLur;pfn1Ko{VKT--F zPTt>ybIbbH%^5jjWGy!af1LyT8bv@L0xnhMSe*uQ@HYf3cVgn(R4^EBG0Ys&bXX47 zZQYUrhP=T!KaVN~CGs^9_>5k2Dx7(s(_4(U&0em|LVhcmwLw(ek4S?LTP~)+^aPQi zK9VP&f(QAHDqmy?b@l^JK9d?klfd8f;vH_A%G85(%rOj-AIdN97fq>&Dsw^9klTLplhyW{6nV0o z@!H8bm*S&V4efov%Qc|L4~$%TR*7L2O?nBP(l1dmwuMk=+~))rZyVFT^l{#PgSF$& zqgwEa@QrYv%6BAnt-MWJwLP6CdAW!LxGqXN!fD7`zEpIJ#YZLUOIcEj}HH z0>~{NhbVvy+ve%l6o4{J#_hwO8MUoIh_{<$tC1bF%YkQLO`}q5*a##_ypI`==`f%~ zD`+-mi|RbN<34KRCrP+xL7r~X1yIXPO*R<{pT;rZUM<$!~e*# zDc%w!PL`PxfB7*x^1&)7q%;332zWTaRdAOP`*r4|$?)HC5SFL9Q2q!&eVGP_W(s?bhtu<~x zXPGWR!tO9wM5glZsoSo0-hjz2&r3;3s^%M1v)jIknyxs8aU2@irAZm0X%V|FLHyQd z%|_~@dIz>TYc>GA7!z3D4tGpkV-@19W-=?6_(c8K+7mjncUT@9t6eWWM&ECBAJzD533z`zW zA*o2lYw%IN-x_Np739k!rFU`X{z*P?TXGa5?hX7>AgKco6Z=Mz1(1bA9Y)iRa|W-V zUPThhfECqlHcF-%7ZsWR{H!0dd6_eigoVSI&VKQN#qrON~yeyZjA&gT^YIJ?5^e=>ln?ug3D%mk8)KQB%NXTp6T(sJ!}x$0FW0t=jRc>KtC9&~D0 zY7yHvApH_{n{UfYLR6YWCVJn8%3d3>xbk?E-q!UKpm;K2Gc;y}W+n+=I2*ZW?%@*% zN+dA-^qraei9tB{ex_)UGthY79z+>rgt^PCxAvJ|K_cfC0lKaA6@NfW1u9XXS$wQy z9Q^KaL@hXVadFYWtC@w0+i0ib(vR}FnAjG^J|<%cDf+K80C*str`0eIHxFIProdSA zSinJk`#FtX%{ChB+~RybVJma!NeyW^1w<4szY@+v+EMFEhuL!tRg1(!oVF^8=**!$(|u7yoQWMMYjyIhHhh@o{*LFs8$V#+F@Z4gfh%778^pRQRHZd7j{IGtv^_WEmhUY}pQRwmYtuoJdBje`MU| zs=GESSFm9jRb9;K{X0sakylh<7PnH(BDlA?y$1Q${I z7J}r10QQ!6!W7$ddhkI9T1Eal8G7FgB&tT#tJ)G{ok@fF<4K)lk`-n$66=d!s#Bxb z&02EzGvW;k=qF63fdY8cH+uwR`fpc@)|s7;idmH;JY5ZD4BTIdmieHezst^UR$@~Z zQ+^Ht;}&IC61>gUPhEAO*IRAld8onn!`fSSqXp)ir`uU6X1Lh=YI~k2S*Ti!7$&}f zc(sQ$-IE?N3(LqBDigX>mL(I`LmFY4#>5%$Ar>uTFS>dmEaw)W&3%-dncXd}lI;pr zz3*F)#N{XHXg}1xFAFIIlzOcF8AeHv1m;{r4(}`N+~#Rm!;@bhHOV0yTMzVH?+QXH z$t(5O;bqbgpf6)jiI|QU!{f}39FAe8KCaSCuE42gPZ`;WLCGPh3H4C7s;T*&*Zk|t zcPk`NmYPLnec5?DcJuvS&Bb8p zymQ{XHm#ZPewF2r6Cg#9ogPwH5X8-Tt&=>=i7K=sn;Jj)VWrj6{-Yl*Knk$9lKlg# z;Tz%oW@vYEoDvT9*HA~ZlMsIuyy{%W_}zrxLs0hCvZ*1f5G8`gEwKifi;45A}XE)N;OuFr~vRVu=>#csCm)r2ih<9RMG=4=3DQsZS zbNnWLm@@*9iUkt(rZ_b45@(ry@7HF90|j#u;qBBi=srhvTFy$J5AyQT4B|;!N#|k- zSN$S9IT}u(9!er7M?;vfO^YPfyhV6Zzj%}_+L2aB+_jofFHAx!jN?I-{uKO(|5P0H zLro}wP^s1mDR{?Tpt30uzC1f4bsiA_5n@mk*Nr30ZwiCXVZm2WPeT#rL>U@Si(*)~ z7X07>`k_*WJ%fb+l!gb^jh%7JvQ7$F>EdIPc+SS^%gV$M0#6N>f?3~lp_f|7i7Yod z%?F%z4(aA$P46~r;VXx(=3ZG3AxVj#ISrb7F+A@-cNX)p9E^Y}nTC9fFS{%!jvDnj z_7<6ZIZR5PYu?j4K_ZKwp_8}1+)~tGBP3k3Il@x`lRsJUqjCMsM z89U{AixzxLDLZS4??1@zdWF6FzZ{jJrbUza$AlfZF|5@V1EeJL27n}zjHz&MzCBZo z!VRME7Oig!lcY%3Zhb7X8#9lxGM?&NqoH8NY9owY- zs_r6zf%ELqAb>~L13#R!Clw^GNdQ*RE4&%zWzq@r#l0_{ZjxB@#iKi7te9}Y9CgUM zw#}!aem7mP(Q$)O$7vs!#5fkaLtg&MolV4+ecRh@w3_pojyb<00Zu|{GLN7Dg2{rR zjBIL-G7M^rffW)Tk=l?KQ30|og2Kn4yMo%^-VPxAA{E!6Sf5;Den=kMo7xlhH9jUI zTSsE(Em{o$gPsn}>^5y@W6I~;J~6Zn6+WDw`GAKHaQ~|8`5%F1Ya~#=o9$l_%+a8=Ray zz0+!0>vyzRL@InecVe=97vpzrP=jW*W_w z!2SC|vwN|S6kJNAY01z!`;sCrb_r1|c%sHh{I*j1r4%Sr1C%U<0`8V7ewI+X&cqF4 zhpaYQ%bX9eL)qR}S*C&58BHuv9XTH-?VlR0dxJw_Y;xdxN|MCx?+KMKaBrRM6Kk;Xx4c|Jal z2K!{v;ugl!ZZ#^rgod&;SG(2fDR4PCH3eI{e4JU5?Al-)&(C=;?3AM(ZB(_}_j`ue zUjBBQfbxhaGIzB=*w+8xkI>*()Sf5vM)K~ z5QFX3B{w?WkfGV35zuks!IwoU7&ACD&E#udYEgRq`7`b8^>{Lz3IZJ>2oZAO!ythC}Dqb;&g zK{>z6X)#3`)d=zNL3oS3zJ`jbwBJ`^F>U|dG11>3DYRj1Y;4(g{3am2Y$Pk!x4-mws$yjnJKd_4Z5Hs(h&AiNpKQO_GX+illV`q+nMBz^rv>T=CxJ2iCW6wdu z&|Aw8&=1W;+H=F=gExOZzCw7*I5;76AUFWk%$fwU&dtV!2ZE4-Xt#cCavDpc zlVx-oc9Nh4lvrk0B*u;DCK{&EX}eV7%uiQCa^J?gDRcTBXB)j+LrEpvyD*-Gj{&6Z z`tx*7OPUj|V#2^Y_)KLw?Q0F>eq+02+cXO`XQR0T(Lq`oTuK>DW2Ac7sE+qIes#eM z;!jnmhDU-UPc#bvo1v$Nn<4>saMOZ&=N11;BUX#3UbRzs&ihxQ(c~UF%H+t&Z(IhS zq=nnvHeodI2Bp@j65k~lX2A+4v>!+N zroBMl_+dU*+1T;z^TTNkV{E?Qa_-KgYcR~#4PRb&@!ad?pAE*|<^8+?%vi%C7(Za6 zC#-`>$)UM2>c~~4s$Mtd-S9=M&L<>-MxkEEoZMr%hBby!3qc~he(1shY(J{sQO+MC z6rPq7b?RanlOqMc^!giRRd6CF;#{As2Ar%2$|#SHdruCiewlACaT*s7qfI{)89Uu= zpqAqJHFhBWsI`HS-{~Zi6T?`n_T6x&EXq4MQX|?yc0h2sZjt7Lf}^bmGZ{w7cZ4~! zmvl*;ojn*PVMxfBAFb2e13UzT83l?!cgBhrrUbD6&`CH zGM_;y5`c+R_p7nsG=DUZv)EQG5-4@)1~QYa_S9RsO4<#AKr zLxu>(tmI!7eYjgOCdZMfIUa=_>qcir$3PI|B$M;KP%;89T$C_-t;|c<{?<{fvG~H* zRuB4NYVHb?3*uRC$7-Bl@Nn}q9h)Fg63J$)))iZ){D}OR_i=BcD?P;HSyo&RdL{o{ ztAi9_>YI}uJJ$+vdcGl3j;^LMR(MmAf6@58K;KSRz}&Nh@8Iiou|1+^`hI6;{dpJC z8KI3KDbOcAixuP4FII;%99zBc4L@DJGY z_V|I>o;C@?X$8x*?9eoNhAuxxirNBEBKPvj)kML^u}SsR_(rgoOJh8HU#QG~#H!d} zFjN0~^wmR0tL|W7?$2T&n}129c6>0z&}}a61uyRe3yJBJVlEGCo#Q@_6N1|3%EX3;8%EH|dhyJwlBn;ra6|};Nyqw@w)K}|nl+vn1{7W9EYX7dCZzTyTDu_d_`}Z5 zb`M!++Ic4hIhLI34RAiv6}{QHMl|5sTpj~I4vb`Bk>O$znfIO>Qo~o!?o6s&a}KsdU|&*8#Imm&kM_r!1Zfuikh%7dbBo58xCtI!BcmpaHkL9)VYmU*tjVHT7So?LUh+RJ}Ppy9o-#m`m@Ilu`U;Yz+GH-e}RleeS;N`>~r1l8$^2{JzJJUV}g^^ZsbEzx}S7k;u z>(YN@HAyi1w=F0!1|s+*49Nb(if}G8xtyS425(*#y*w^Mx_)N0)LhoPo;-^lNS?c6 zrVp!Z_+>2pqI3P$==1u7)3-5wllp~E#naYG zQ6^WRU7bV7TibI76OXQ>Ru{-x8mBlMvFtguvm7kF6uWHy^IHIfyL<5X%$(+S3#%6^=NY8ND~evO6-fL?x~bUdiLrjKfp0Qng(O1Z=lHS_#dz(`s^? znwdwb$_!cXI${6lLV8P!0VP(P|7524BYO<3L^q9%kXL$*`AVhOuQD6Ev?B; z9ClWduJ3Sk=1NANNwEf3UqHA0q@1K`bF2hGzic#xYLoSLz5Q;5dA+G3IS1ytsar6Y zH~6RC-qH{Vd7-Dpm{C4ZkQ7nTV9ZH@w?qIKp3dzd?1BV6N%W;=g{Z|e~)4;U3VE{2PIQHzc%Hz@bpShb(Ij-f`u>1h!PCY7ZWmn+A$b2+&5A&?X<=E>?!P4^XRr3lp=M@E9D zPYegia)ND~g;Uf5UO2prRFmnqsq|w3Wr!H%hP;yA8mgYgQv)+HIJ2=$vmL z+Nbp0DQNcq(zPf!(l&vhA(PuEpcYG_NsMt$$0ncXTGdPCuX2S*_P7m;83-;|@4OrP zTC~!@ezZU_iR5EHC_S*ibB>gaA++lmd7+UX%pHfKV8fWXp{^W}2C4YkpxP!lEuTK> zQq(rqOdEyOdUw#lv_|9bH+!v;T(E?)XjU};4*b!tz~A%(pZXpb(WPRe0)=O@rS@k? z?7HE!vPrhnQbCR@4m6MDJ-kSp1#XvioL%^{$Mm+`%nsQM;SI`ypeRDyH<3w;??3KG zh)Cm*#;4p+O88918*!DspBXani(u+hra468b6Mj&$`0B5*fQc-n+JucgF{I{DGy@3bXAE~Tn z+*UV|0<1=Q^di_|f(BWWR7DK>6{ZEz(A`_tTn7d+V?WjD56AUNK|1ld4J&ZjVL80o z6zdIi&S=enYIQ?-(ps&YsxIxwwDo*+>&@OSzYuk8VQOCZ0EyY>P znnaFo!MKpC)b)A@Col_quHrX)39kwC9=g7xlao}FqfJn@I{QP^8mOte@&rz|(HTu8 z8_?VXLk)S7q32Brg+O)?}MFc(Z=YoOSP~}B^B3$bLaG8UC`c1z-hcH6Pd*0m*s^j z=h%m~e2JC)b2y?|A!9>7vW;W=7jTX*&_uw~U*O1fOk_6WfIG%6AY^&P$Df4gu0w6F($V(4Eaw8}qhSfs*9shk ztbVvc=7rlc;Gt!Qjbsr?<1L~uM#NYzc*J+zNa#U#gyT(zLa)WgMThS~S2?OBi(-Da z&&8resQoOj`Sma-3Gbso8<04lInfa09v|Ab^fciklc&kyg zJmnt0k47-X^aUP#xJC6jnF4$Tz{)v!%VONkE_5Vj@0|p!T5qFp+vHv-9Wo9Ho~%M!VrRYewsx6=mcSPM}ct-{bro z7@>jCoq>Sg8?AwLO4UUY2O=PcY;lX&kl#I96#`6I>~lhmkvD6iAt$qak)ATc+W~f< zAv!$`kyTsU6G5*Kr#;hr!{3-uWI6u68k$_V%^W|)KALtrRCn0;breeswBnpT6THul z^L<(UOrtV=tgzPoiEk3mFJlhLXO#WiMe8P_>cWoy6D$=S=jhwMEviYlfuBE-9*T`E z-x(vOknOef40iRki>s<15q))7VXZN4u4+qH(z+)70_~NAtvNkRN1T>Xf9O`GY&R1j zrH7x}haQ1N_|*sp_ILUavUCz@0Ncw^w`ij3?NG=C``KqlD@&3Z%{hx+c`eEZOR+VW z1UGv5Uo~4i2|;}~{GHd58}e^Kn2llyWEji`f#brJmzw6~Gmhw*+9;~$$IH890e>z} zA$98+@gN-8Dj8rFeq2(NQl#bYGy9SoL z@?EcAfAX?=lz(+`P<^4PMQFFOpnb!gS9MqSOW$GV7uVc5OccQ7a|sy{=aChFFu}f0 zJm{WLxXz`6+*vzH0&PDzmecSju$;vv5>~M|Z|rV0*wMBP%6(wd_Kn{7AMA-*y^5=` z0nhH|XEQ)vQ9;q68#KAw4p?eogFF6O6W|73U5hLdze{}CTpt=clWUk7;}rAoY^#Ek z+8#<5IZUiIuh}nr8A$3H+EcoWG4LYDg%xQg>VI}??G~Wol05&z-FU4gO|0K`f3g5H>zl;hsgOTUYd`PWAWwsSF;Awim?D+^2(mR-xmWba`NMil@_KWt6R84_|X3Zdb zc2f8KQC+i{Yh!$l8-?6^P;*hCWL2}a58@NQ>AfIqIr-ybH%~XJQ#?iSb@3_EBj`1U^_yPaJ z&PHGBrW3o!HYD?^_>%eJbpN-DQiSnEZ zY-Fm%BdleO#;ZXFq!qb3Hr1KN(w0{{s29@>s#KyHYYPEK;ycLk8{gJH&)}*W=z*KSFx;A^qkG^r~Ap4H@&X!*Cx6^P3^-Xj9~dIvIUF|yM;6Apuq1)m?g@iEW)C@Me>teBCzYp7X~)WIjrS0! zY>U<$9a0^B0*MulXCkYyady%oG2jA_xmN`Qfzi}QZq=K1n& z0JrGrkF-jDbU?U|%Sp9pd)w2_@nwUTw$J3$X?DTObjVY5&C_|e!%1h>qy_Z{r7z3x zf<4y%z_|k8x&7FT|MW@iD`=ILJYV6nwDjR#)>agG>r2?eIbP&{|<-~G#9{<7SD^zNgd#&;k;=|(l^ zf^g==4Q%z451q@Gug>M%x(LK6l>FLOZVb`-IDhQIlz@18mF-Xov>1a~VGjk25x z5ug46Uj*W!+L_t2ZT883{O&jZ<;?7XBjMi2|EZ`fdr@w-*t+Ut=BpD(@+E2^eY8=r zboRUd`Tse5|9$%o?ZFMFgCB4TUBb-mm6zsStmIuSi}R^cr1k&Frvjp=nV9su9D4f~{7$t`oHeb@ed|L%YO4?lkL$;Sqd-FM%8ltdUb z@>~*_D0%YQJDH}4iM5FA1S`l=Y!@yt zhM&3|Nen^a<-7<_;QH{t{P6p`c5i$9@yEkJ;q}ey7apC%;>kBRs*^VQ|KNZ45C8GWC!c)u(E)ah zPXXA+U|1h}h)0=NBOQWk_Z90VrfkQ&01M{RWMaL^EkodAEWKsgVMVtHfy*A7(;aE0 z@^LvX3Am2JF?jRU-+toAf!UqgneX$71-+A6gDV<&XJ7T6Qu6qlcdI#+)$DoP^MRzSa9drVj8GiWw(#11h z`0T^AVXr>wwfO?S-dyg-D24TAn;Sp3f1LgAKCu6Huf1FhW6Y?qReZ-5K_`-9(H(^8 z7B(gLr%&h+NY1|T^1j`3pE!J|!FTgbPM>rJp^C+zDfI0z%J9(L`}XYE_U@}Mrt^qM zyBKoVy!qYhTZrVQ?1~5lTyrAI4ISmU@ATe!<;4e%+{>WepdHXXN26*siAzBI?dHr} zgJ7D&#fKg^)LXuA@z|S5Tl}bsrSAAgt#tVY5V+KRWe6U64(ZKS?e*t>_0auy@7~er z5G|zM=#E0L=tYc79<=x#X~b*DhaW!L>n@yo?;S33oGeLMn@O(ISu*7SFu! z!mob+_Z}aPmg^0MaSub&Eo|WVUu!HZE}~fR;q#-9e*UdDUM`jwLzhgqo+Kz-Ix134 zJPmKuAXnX<4td<}nI^kwqq8SZpE~~VCm%$J;4Va00c|5ruhFp08#!Exmlw9p&m4W= z{#RdoKIRFC|s)lgE-Re`NA;#gIu2GD8Ms(fWu1S3}XSlBR zCghwI@mPhB8-Mwg=E^Lqh!R)B1@#}FeDdDIN51&@m)flvB9rlgrZYnC{ji~rN>ZKF~~beRZ9&FH}*DH`lTht)(%bk2Dq)- z(bZ?F9g)a_CNlBNO!!!^gkP$`vu!RUFS&Yw*SZ!f#DN3OxI$8L@-H6z=*fRPeCX~c zo_HJ&Ip87=&&occL$CDs^U}CbZY#RSEL-F}{a`{A>woKYFHp@o|jP?IjQjZ*(0kdgFR&p{WfZ5aZ;I zZv&T39|ZgeXLj<$@dJBz_ZF9O^a9p1UdgaEw>WAn_VFob^{{KBx&PpS6DL26x}>w4 z?n{&uCR}xbM+T?W1OUa~%58Y%UzTJS12XrzT;KNX-m$oFuHCFH$LYggohu+C7W0KC zj$8EhQNFfspFi~>SHk2nGA{K}fvb1x+?YTkJhM4mn=w#U+<;o|e{k%;!F_#V;1j^H z$xWwNB7j`G-K#gx_jwu`5{#lz4z|rS&YwM%qHuGvtmT=_df7YCwb`6sFM+tQTxm)l zI`f1T4_RkE{9yl{?PwPGGc6F{k2fpr4)0f%%VL?3d$?D%VjP|w+qPXeOAzXKfFX8f z#8ZziZk=u?fe}MT7QN-=^K&zNAeOoVquO#8zqM9($mOoXeW&bf3um*=Ec)@Vcd>Wz zblk$?#Tfb10m4h4vGQ@*nRz=KcL(m9&!f{f>-?rE{^2C5oj!ef&t1D~tp=fQ@jo2Y z8~vg^L^i6mmKk7V)Ivlew!!XQyB027ES4GH7@I{b%qRdKgJQ(Xg67&@VBUwn*H0j6 z#DRFQc%FBn_uL(^p%<4f@H|B?g2CqM?G7UG#olm{2eEeh(7xRl&z&rm&&Rh-E8V$} zE6c$L@4ofmeTQ20VXKDp&l|ZV`u>bcjpsNLWQW(Q3}dl$;l6wJzscL{m2H!ObN#vC zQs$j7RT3E2pL6HV5v+h#MA^8=pI>S`7mX`-T0)<56{di59yh}Ov;*a1XIDZhskpb+Ia6GajZur}YB|H_yC*;7wG-Cg3d z!WJ*@P^TlPF|TG*ZO11xC8t!{L+mByn28&BOgN&Vw#V}Id-6tc*EO+Uc9-@a^`ZjV zn>ZJiSCPKD9aAILvwO6TFm7}{#+!|bH(reaNRHfpKVhKhsGwG}jvncNcj4&X{iPgshJCx6%%AK|zS^2J*@jNsyl@mTsVQ<&*ahj#Vx`>E9kmO|vN{-`^s z^=Ic=U;e!>KJ}9)m%EENDDzcfibTBPwzaaakNp=raysN};r!>cH3`^@+M0H|(`ubA zVd*l$SB23yLQM3QxrcGt9+o3+bMd~kM(2{-SM+7Lx-Bg%M%R+zuX_ZPOycs&mG(}< zvxVA|>=43Nv$=5Y?2h>v0z}5(7;!&~2@}!(pzFZhEBYeC!Or=)<)wwV)g|*`*?3II zIr*o$@v0FAzYU+prWtoM#*jI&=uU8Mg@M}WEb$>>JKlFU>iFg1qQGEKn2URVn2Q%X zm^e;cO5>ExXL7RihS&FwE^yq3OKoI?kodRRym0>P_W2Iy5dn+i`j>j}0i81@%ocq? zTpburq#G_6EW#4eT@3ll>0=1+>_oT;j#(lnGGaTShF_0KwB#!S9U1hZ7 z7#TYH)6qpQhs)u{n0xu@(v?r4WVEzT;eQPJ>srmxK7OqdPp{)7M?KF(%n`Y-Tw1&^ zgF_M#j7EIj-r~bryu#V6MiAmAA4YSo^xMrkUwhJp;1Wit%|0&xH~^H1ugfGGxKSDk zlDA&?HukU+S2j!*eFffz)th|t-kh1`YomBK#<^Fo?b@F^(ufxL3>5m?I$y?Ih+viXZi5ni9$LCFmQCOOrCkH^E*Q_&)5X$D*rmoLsgCC32AgH#+%mvyNE6q<8V}ww>W42dAH)y~eBt8DFaGw+ zPdrYntWmGqrW4MY)gLleIN|rPJHO$78a@2*5zVjFsN0GVoVtwZ#7iB^I0L!-@`s4b zjh6hFKHi)+`IZ-9F{1xa=3Hm4-|aDq26fXuA-N7$6a`Sa(Al3dk+g%D(RgDXRs(S1*d;+vRa zabfwT7oY#?m!6oPo9lKLXFDCVS}ej@S<<~3e-Smt7JM*hvgHIJP;*Ei>jyWU1mcg= zG7u-5V3|0@`64F-;t+5fb;M|f>}#|rIiBVY?%#jp@V(DH`)l@`&6j@BsncxUe=cld z$5k?)7q`ofRW(d(2!W5b^M({}Are?aAYbsuSGhR+szZ=39#*>g(dQ=uQnNN-6y8Ss z^|QsWmZd*>MVnNWzk2nqxa}ReFsQ8@^EB|Pvz(C82}ymld~&8I z?szLxXV?U~F{!8sG{!)YwX(}sP(1yO@GM9J&ojDL3|K-JCr-}zA0yH0K{n=vPR2V~ znOvVq>}#qwxoQg1iF~>~RlQ_dP1g@uB*d-gjWdq+Aobbn%{XQHC|$%yh(W3&zQ-_} z1?UkDcxH%hY%(Rb!Ggh`vt!K(vc?>D9^}g#xQ0M(a>_;*I$)=Rp6eFRGW~W0asw~! zHW6!^+fV>62dxtC`6iD>oEoi8ysFF>yHx<=O2v7!>Y0d~k$GiWGrO{QdjFjSl0ki> zy~Z9it60Tdhf5E3q2EyQAj^$|c+n&g6uUSz@g9H?mKtaxM4>=C4quyyH4)dn@RDPA zJ9-gpoo+L$%3d`ySx;8vs^;HnmIO~A+aoC)>x;l42&~CKRosQrJ7U-dUpfsVzEq}H5U+0{ihj7H(IX&05@%cTg$#irqL;=nt)VTh z+G&Upxvcc>#Kkifr`a_|jC@WL1Sa`pVrgaY4}^muZoi=rF~gb5g){ow@yB;G{6&=E zI>Y4lka}@xtO}0)AYP4l>qH;}Hl<_IKqDqRQ=i}l*q7nN$03X(PI0@{9N@>p4RON6 zE4SO~OdS_LS?6kxn>-N&CT<*fq2feIo%6!1Ei#)?ufHvv7pv|{AY&1y9R0v%(c@#M z2A^n-mY(~?k3avZBim=1jRCj3esh3Qj1Ouz>=ie|IBPf)#}%j^mS=X*Q6%^*r!j{G zFXxB|L+RLxkzYA8xn1%el@fRl&}whzs?1pN^%`noqsJIQaqL9E@gXnNi$SZ+mc2(F zdFX@VzdLvCG!*`E~uQVqEDpZ_#`7-T3a=LCZr)f=WHpXJJdhI*D||+nM`Yxl z!V_IMB82k9R`hk^=0_P^#NuO>*b9A~RP^MoFHc{7*@~b%>GPH!WDwGvn9)ZG z*%$#i%m?`z&Ie;l#=2yzyvJib5P1vi=`%oBkteK?J}xU8JoK?HbhMb5fj>`ym!A%IJmji@`MBbj2Y{T; zG^%t684VtWdA3c*HPSG;@sM~P?dWfFVvNrRPJm=1o`mCW#dxZCT>&^w2mXokBF?3F z+8$5IH0+3lv3z2xX1(ulY=?*M3ZHBjP-Qda`wp`#=Qpz9mzY9sCq*N&r;{WuAlx%J z`uWr-Z-rFB*%6^M88HJR3>VYU?`W(N7j zJ!!s63{k5v<%BW8NoOxY`3xBpixV;q1@2q?62ca*L)^VHa?Zk5lTVenc5?A#v(?0t zNn`nny{J`AQ`=vxtRpPGFTy5h@LbK$aMYkXQySMi^;FBp=L6hl97+F(N|V40FbV1ICRC zM5x+H3&&NL%k!j>$Xp#`lgl@GI$z>)C2=LjE3+cM5j9Ube)JDxihsh0JSP*Qua6J$ z=)wYn4YJ>hAHKckfrH(%$MNGP+BC6j>6N!zca=!4MEzFv{h>;qjr%n&E-rlv0 zcLCw!dW$o&Etax5t_kF%6{wAKGcy+!y6yJ1vln``xxLX(D*u1>?)1s7<2n%ht9naq zSc!!@1(4K6t=4AQvTQE0Rvl6msv$&;BD5~3B^Ur4q( zdqNM6vDf10>373aIT$>6X#Z2+|A$Y0>X#@@M#Hvqb(tndZO@jt7_>FG?D5f15;e7! z;Be=cUw&CMn{jQxqu1Bw&o$=b=0G!Z7}q%5_s^fUO>HpWKmYcNM~>gQaOx!lotm|E z2A6w#X7+KrSsGYri*lkH7x7rt&B>{m{(!xHYA{@!p5f={QVK+9|X|)F{*p(dww6wg^A3Su&fuDc>d!PK&Cw2J_Nj)=z zk4!FXHRTMNQM25LTQZKF{nb}r)sT{dnYc0Y?UY?kfi+@!m&1A+yqkyub)-xIkv~IT z*ya$^!WirvQ5K?^X|Jd*sn63VI6C5CcQQWOyKFUCKglrV5V zMj7EMp|bY+vw~4mGSXZ{T=(i0`C0iTA_Q5>L@py6>60l*-a5XHzQRp|O&lI#+!~nx z!VVcr)0RVxaXKqEg0O!s2-wEXZhT0#X(yaFasJadk~ zP@>j#>xHCGJ`?*&kR~ybR+AIGLiwVp1&^5JORiAjtSB!H`U}gevtiK>CX9}Nq$#=z zDtH)ulPgj9Bj}95E=lUf%wTGKQ)^Fl_-Q`SY(KIKrPf>TFw)i=*fNzYcSPGmd&0IQKsL{uq$o(yp z>ZRDlCkyk(Dz``V7@nfR3n4y>$ct{!2-)c*Bvjc88t-qpXwm`NDJA^O6ipO^gcKJN z#e+IAteBjR3<{Y$zm4u=0cEh=oY0~#`Dv;a znlM#D_Y%p_WR@C702JP(A-KXk{$hkh{dwy|k|UKw5D(UjrFt{ntL%VDoJ!;ot)d^5 zRj{JeIdF`bc*>5XVR2m=aYwUYc_JFhg!7QT*sNR-rzY{_EU0s13yF&b0TZ1kfO#C8}C{k%P5TLy>yIKwLR zviOeikv-YbB1_RFBPo_^q8#fwekiJNEjgk_p#oMT-fjf%7H1;#(Wg?ZNvjx-3``+S zxr!RRLf}OwitdJ&!JTqUJFyw+1^H-^+Dt9VEE6~1x0tI!_oi}p!w$SU(Vd!FnVdQ| zzc@>6QIhqYST>Ck)E-u5$2BP}q;WKoGAuGUmYmw$N}Mf|d<1sF@45R!w#nKu9hJbu zxrX3ZhXeX607>wt#B>V8cT~v2{HrH^@$^p@&Y$f#_Z_^7KXt+02pH*@5kMx`Qc(r%(Un zl@}LX!DiX7-F@WapV<3>4+P(1IU#JsK#As2=#$UDwET2xM?KTFBwX_cWV6d8?{=Jz zi9ps8sH-lL82ryIo_z7?XMS}4{5uH4$bO(R`Qp$1-WmXve}1quyXTG%fAaG>W~Z}< zm<%UjU*^|is)X5nX7BrszyIe?|LmiWd~gsh;Vi|mE`XlafcQcTmhG~d{r8Xh`x_r~BcL ze{%YRA9RG!bf;U4)}c9X42h}3otu0dqrWlx?UY?jf!k){UCo~N0C3GAGsHw(+-Xaa zwX9;-3zwhp@*ozywKN?{0=*){65*$P+Q&pJmI0-x_+(-@m4r@Hrs&qH<7JV$g;!134F@hxY%nQt|jHrpe1zQ?iYNC`Tob{{s zd?YE5^sLl?H8UB4G-)!<86*i!FGDaeLQq_e;sHeObf{;s*VAwq$HHV8{IzToEX359 zR1_bC*q-KMTq(La1ts75DO9Mcb*u@Yw?Y%b{OUda3^5k}S2U9sk_bgdk?9YErKQyw zhx~Piu~|2aT%qh!<;$9xU#5Ea8+!Y=rv&TbvS}yNZ8F5ahlzYK)Y7gOR4XCqR|3n$ zB>(rYP0)cuYQ=QboGQqy$p&BI5r3nd2iaqIVu&AU`dT9*E_9!-{>(aBS z=hM&ldd#e5hAiRMd8Q~x3RJB9#HC8HW}c*3;c|$a>^8a&TZY}T;j-3cBOXlkI%fy= zKr<@i7nksIW&ueRlWjv32E;xGG72YR14J}~uvD2Zskf9rVi{nXqluq_JVFBOr%0xh zmlxPwF?!EOl48(oQUFaV=|sFrwpRI{bZZiahQR&F@|b-^`d!d=wD7Mx6|ooWiv?y% zL^#NWx=_-KjTq@mPhT-+1skI88j>RBeZrHB%V)e;+Zi+EmVIJSau9toBz+{-pBa%7 z%q^q8ZZ+-=Bg!#AQ2rPrNXM`l#>QvIP;?iE3*9b%w2-A-SS@W1Eda#!cw<}fA&7VJ zw)CR#M-?A|(x^k#%|zWc3cP(w5-N;C<(Gt}SPQPTqnkwM3fKMc_YI1NOhneOVPRP) z5W$16`I)W7>)6unj_UZ>={;IR6x*`hY-OSEr_R2MIvc$XR-+kzjPd@VL$6D=a99I$H8YRW0 zc3R0q>&&~~`^ML&27`k$U9HY{gT*t)4j% z+MR=@P(#+gv(KmLfC-|o&f?|cjf7A9I9pBN18|IjDq7Z#uV$m*E-Yv)#3O4`s=?k(VG3>hn+4GM}D?8Hxnnz zW@ct?EFag9`u30QQecxgM`l|j;^;xM`Z(z)kS_G;i>V= zvo5EfvI*Jua0Qa<{NlNZ_EeclaD)E6lUhNs{& z*+lZITk5$EYd@=*Y&{*;W-Y|VZsUI#C^2DXs*CP+l1T`i5Z_|@5SzJ8DuPGF$3OHr z3)DKKZzR)e5#B4h>6j6Jtnp8ZZ^jx-zi%gd`Cb6Wt7>)nOUvD^1-Nc>=hlQXG?fu9;vqTtiFaqFd!ac* zaUnrVCoO_900=oyd64krjs=+z zdN>k;S5YUbjTk#Z*;}26vlPysUuir4)L!;whXeMO=Y8r!<%e#`jKqN;SXjJ2jHX3~ z>!=PTpqYdZ=?I|mrzn$|$FsJp+T=3t?tel*fzKyGF>zLOb_)Km*Qb6F3dVA)KmU*a z@U3GH+_`^lYOr)}&+OjC;i8M)8F@Jj#3>=GgQIusoBHSne)O;3|HK!6Yr={^2+I=6 z6ilZ|Z(=MYYCrhNU%l(z_r3DsvkswNb~i((+b4W+(2T6xeelB%9KP#5Ka?<~>!teS zwNquH+?A8hJv&%DzkkmRw^X@|t-apVQmenxU7cH59W0#hwYv}8dHCP|^?(1$efPI! z=g4?PMFq`Jw5$-`rGq=W12Ii2wB)7`XnwJS@$19KqC=Km6(iKdpuhBkfBf6`9lvXC z?{t5-$PayKVadTR?cNl1zc)QMSXtV8@Zg6Ynf})w{@Z81@D((}EUHh@esm!Opz=GX zALFORKlq8y-*xZt7hXQG*k3v~98Py<=a+_N0k}FncksT$ci#2DaVNxtV?*57Z1b)n zf?pv$m-{b2_w2F@8229FN6bhgeRkHufFq0n+aAP4HJ$tJy!%K0_T;ZTaJ;o=Z}A4( zoC)aZg(p6;xjUqI$2-O572i(11{An$CcXwlyg9xD49q5wi`*+vM{1LF%b6~S0WGV0FB zx8GU$;SWzc(7!uVT$5w3gy1+HLBudaPSIwnwv<{F@Pq_R6|(u6(B5j#rVvM*LYNV8 z>L8s1wp0;HKb2yBe%_O^?vB)q^9-IhMh#bHCl}hM-af4u$H=Agi~WVgQ0rHh7gy&d&QF{80Mv#4Mka-c)yHDyHk**?m}`4`QITWl!C( z!ca=O{U|4CB4jqz(A+|KBaMRG;ge-XWiLsk0jT_ybL0GJc@1qs6 zUQa_Z*33@NIoPo`Gi@90$n|vtdlK3W$+u!cWmyor$kHUvIu0T`;ps`7;u>kX)L#f* zQUzBgrk2mmy!(rN-QmGIXX0$>;5g?~HzRfWMZ2I56o=IKmhSiCC!ZYjULH*BAwwN+ zJLO-u*J;nSCws0&(+3$)Xbve2k}Jx@U!gK7+FTLxHRV>0G$~W*K;kVfom)6R|L&=` z&%W`}%3yhRc5Z&K=+ubt0gai}NU4Y)mSi}1cRTK6d;Ld0TK?&C!-;NORky+&XLWX> zzxd9pdmlKAgRvOL@3Hu%+S{3)rk-s1xX%Z9xU~3_A3nJ{b9n#E)c(2XkyE|d9PzNM zf|n7XtV`MDKM|z1Oct>tM~>_~z~Ok46DMk4tF<`KFZ-?|hnI&7y`FO+7CYVEpyhDd zvJC_Mn2dYf1KoWuKlMV(A{*~xYd3myA$D9IQYO-hGNab#R$IN}f#aO$59V{~JJ8j%Nd`Gb#qB&si=G_&!KwSS#g;N%CVPi$-`ohM?j&VtgkQi zUpn#ZUHcCxZE11AaU|w}#0bmu0C?=mQ!WEGD`%EoIraR@cYO3?&Jn5zT`Yow_;}k) zd@=gm6zR9k#5V<-ZayGWjZ3&`kE}?v4TI>}GjG3h`t-Sl0o`x^?7k`PBE9a+Y}cs3 z$U%_xx_joPXFC0Np8c1XtXvyzN+C>7Y$j!qIt(N3UnRa`15`}M_wGG>=;+k2ZMoYN zPt^vhG+cU`en@%eopHyAjHuG5`1#`IRsz4f!#?QBDQQ~#oZ<}uS{ zTr)BiZ{lT3p{p&$+;PW&*_F9rt7jXVfs)yo+p%fK`E%zM7f#O4FU~J64EoE=$11{c zOgvaZ&L4n^x#)m{3B35Gy7JD_Y8RGP+i*o+3T57mPA?E}P@Ik;asT0c-AM|-G~b`z zboY+E2ijA;_T{guyRGwa6!@~^Zz&W=yHFg2b;6iMIBhye-ddU2H+%5V zfhioEm|dNW^U4N;GsFIZNuY1CcItx8h)r6HrlR*@ppLHTgER?El(pue84V?f&H6 zyN`Wn|Dl6>_U;Q`pd#ONL*%VZqSW~wd^DSo7EWYo@Z^=YM|Oo?-tB65WQay6e(;Vt zcSs*Oy1zFyJJD^=v`#pKYx^WF zQXtSAs0{*XHG9K_Gh}&gI14u!h%;^p-TLj3{Uh!5hN!kS8#cD&s=B!f#uFVp69At=0`PH zI(zOkrZAhFXBIoNu&6h7COT7{{d0S=8v=k3NS&cV?vq3b;Qj^z2)xA%vj4BH3?y4B%%99%eO*}##SivTDcKMtp6FV<8% zY47+bNX(%PNVoa8uJsb6#HJ;gH{X2II7p!Nx~=_`=RJG7?G80@zIz{TwcSESU*~)1*uSO!_@{5RR?pJXS(=>IVbW;OXttc?cbwQ;hoY<%FL7_ zgfByAG_DTp^yY@keO%=!?Ck!PSe$dXCrRtQ#?hmp4whmc)xhBC)0iD_EzW{g7SEnt z?JReC)BO%Np~2*I+X}oJgWI!l0+fZp-h^4ml=ic_G&Ez0McYeR5Q%8gJ9~7gF6%0g z-+tUA6u1W}MI<#lszL~iL0FLQ*861lC zZoae^_uhLwQ_&>LZb5OD6oYuBp2>hIfs0CCfBkh7!#f;I1s&1Q z+$YuQ)S%c>_-(bi?bWH1cENXx6JKeag)!k!98Qons1;5-;}c(|=cW(ce}Aj9&#tDJ z$46E}keg48Wfzn%(Dls#&z=R3@bjPl93L4Oj~+dG&pr2;x{GfHQmOuydEiU(QP(g( z$3Z>u?YG~?yEMx__~3)$f|+?-iV!*ro3Fn^>`Fx6Uu0eCzcm|NTiG0LSjT|L(i*)jP%Y2R9XRs7_Z@LNv=HM|{?mzKpeluh@FL$dc-AouUbBDhx#zTb!s_<86J%>_ zDNWWO6k+w+Yp>y>coNE!d|*(>aR;X|6brsnxltZ-;xyZdL5we~8{`a^8L5J?L?kRA zg{Q$vjG0(b;9U>CuQjmbNP@p-8hca{b-P)Bx2vR~Uzo?OM z0k_0oSSO(peN;*gzVXHzjOj>3dB8#J%WyHG@Nb)FEe;2-y~S$G|I9*J?1|^YAGcWL&34q_i;jYK3OJm2@e_hBV1i(M~!YYZtxJO=4w=S%GPn`-Sl z^+xafaG4h&dk4cJ6NanKtO7XdfU5?|hmqewNlwL`oV<(fH+w%rMlpaWso0Vnon-S_ z(f+b&k?nw?l;R-G!INm!uI|78{yXoyliB*#?0TCvSS|d;7hl9arhIA@_I>b!AH+L1 z-G(%r=*++V;xlJo^li5{jX^QV3JF=Phaw{{MRy)71+&6poT4-(Edg2%CN3Au@OgAI zVXV9H@L*MP1!c}7ipg11acb84Km6fAYo^oc7b!V;etz-Iw=6lF{onule;XVD_Wt+3 zpE&iQkWFh-ymnh6XjAlM+hY;a4tEg9AXkh``L|4nCin;U&7d^?-t?^F;Fc$>R<5+C zx$0X^(}z}_#|SK5RCZ`A^&LJiWp*9j9<2G%UyE)NDoZQ{@dPIu!`vKnSICO*P4$bfRm4<%}Bw=I8JWZ zw^srmL&tYmR&NhmF3H&~4%hbZ1+$v>Z*tzS8MEvuB4Kws<5j z(Y^ZTmwO>5R?nY3cPO}3*>wS?8cUMmV>H*I=6Px=HN)-5%|e0OX5yQLQ#T&Y?CRiH zC$*&;Yy_`J1loyNIxUk;$|ULoANYU?RZ0gr$0`5~B5Z|$Rk#Tgbmr=Q@)0h|g{3m1 z-sGU~2D`{+J*;cYD*C^;Jr@KgP=;|YD}gmqC%3YI7;jh#1Ul34qK#+)*cnHNgeuZo?olu;Uzcl4urwzAW!YOZ zh!ZMCA-EC0XuSeX`AM5pG&q?Ro__jiCQ<69_IB4@cjYTkC2N|KJ+2aO|dNVurfD@4jID%?u@BUZ<|_a&$zIBd3Amw_9t~F zR(O=?rTT1aMJw6$>Rwy-bUapcZ>2?tbV5kh2p%Eh-U3O`4HHxdn)@;8ksCkw!4Jra z2TGt`zSc*-wQ``SjzWd2?IOg2&O*C@Vpv=C}~gIl8~o_p@a=U#Z$WaId;W1slg z#}6O9vpuo+4CsiQ~lot(S7)%)1W zr%B`IpZ&$Z{oB8(QNLgi5%GB?U4m49?n2UmO-R-!cz$Mc(h@S_X-1*_yz&K5DPZ%_K-SbC3`VmX05yvFi+kBOHK`NVi?cJLJ zf77HT3)bNR!HNJoEcUU$A_(K1ap&QOA7;EuI(Q1D5S;~~LuWA16j5awt(+ivNSXHZ zVu#CjtZA7o$JxYXA;oYKawIqyU0{XNVgw>36c1(*Z!So_3RYATr812PtWo&}7m*!T#jz{Aza?ceK0qs!v;3ogC<#x||y*CMIHN;J0Qg5<4W%w)mwr1nmXe zrGw`&pv5@laxF^5q}|!Q-d^6sAfe{ThjBPaS+(A5xS^~bJ9doF@Cw%OrM}b_DpEt4i5*8p< zjmb%ezBn5nnLDd-@zTVK+Y1a^_R5?mJ15+3;52C#tys#z9~_L1S%PAuC{f{&n3TsO z0X&%rYl!^>nC3wDV=tyV&@M6q1oK;wO`G4v()06O^;`#PcE*Ze1(oV<8B(i8C4(T< z-KtD|TOuI=F=I%ln_w1XA`U}RwB{J_La8WziqWC`Gt(NZI-In@;t;lBrHl(?T1z@w zk-t-MpB2$ML9Sx6Z)r3RT3uaQX)o%qs}uIElcNQOmAToex%>CG?r%|YNPe=E7FU%|q%TX`Ni@97VjWKandMHHU>o4oF;g?z(5IF?KN{#_?DZ1~Z zYq3?f5`1J!aWiv_riuyH_juhcF7J(*ohCTP!MM}mw7%GGJ0fEVAbFC>)>I;v4Jc_p z$>-MjZ@=Az6u3^Ea2H~J8>B0xfI*r7t>YgpE#o1*_eIhh@dZic`dO~*$ zZU&#q7`m>Ecsp(b&3N)R-hlzNE5X6WDnzYhXE6oBK`5BIA3uJax|!J$1<0E1h({?m zPk^A7*~B;F`Si9q@k5-ao_fltEFD2U^{G$Mv&DtW#5jpwGZ#kHUyVrg&1NVamS%C; zq4p`T#nR25Jy-^Q^PFG&;up+kX6zsS@P~8C2ib~3BCf4kEx7F3c11=gS7v2b3b0rM z!%utp=TATT?9&#LXk<8No9qW|tTAg>iQX5tkp0Q!Y}3@lhb7v}B%N z|NPJYtYMS7x^1@b;g;x8YfI4VtG%?#6d0d;~)Px zW2^RAdkO@SAR-0d6yT`bq;NAnLb(h#8_^HW7XGezC28w{0q_Zih%d!?=nL#@FLMC$yE6E~yYa_mNyd-lY)QR%*G(+IAfLezw1Rlri@)`)Z!u5U zkb$!)8|%}DX}1y)N;NmZBgF_nv&Ut#^b;SoVTg$2TW#3LE5CvGH-GatEsCI$ka=n! z+zFg9Gv@(gu5QxzlEUK*o7IXnfJ`QOPk>mT;PAoJs2kEKKo0CgL56IrR-qC#DK`-x zlK{ArT=T4)^-7#gjcZ_YK0x8StJhu{s>xr7Wl8@V?Ugaa<5{<%eNs}y$^a!n{ z91#CJ6prwcEB|7q5X+dwsu(L}k!6iW z5}D)kc-2&2BJFZQLQB3MJ9f-O-O4sw8rjqCF_Ww>ht0*WT$(Pdp|2ugtZK@{ zPuw$CR?YRwX_4`HGVwwSk!xJ0$S4XnA15zzYz8wQG+MG5ku9Zv`Imo*o0ynr=2Lzj z<{T-(-2G253*bzYE8-#ora*^gqs5x5vpxOtFaL7WukG>?+_dvtU_)?I2b2qEW4(Fv zaDiKevmyvOAp%Ey%n1WgW6twefAv>>e&#cu@d|V0%&w~Wu@P_42rpXMjeC)1 zFU(4?VF2Mk9=|$*e|7TZmtXkt&wgsv{Yzi^66KgPPOv*=S`ahHl62BTF4%}=xHxUS z@r?_BM9|IB9ZEVq=3oBhUl`*){pnAmq)=&CBR7sho7W6@FS7k-w*_{}WU~n9FcY(V z=fpVWKrN{GZB5)f64YMPpb03hRfP-D12p&|w~$<`CeY*;xi|wx)yTG0fD3OgreOmO z?y#0Jd4J*)pCE_5@+GO2EoWURSAg2OFK^J#Y+#-#uRQS-%ElcWB&dJ?_kSN~nIWHk zLQ|-06%YD{AWHgc8FCda)cz6(mbYG%14Ax!lMzEB$EM+u?DQ32fKfjOYI4xJGDXPx z77kK`I5y&7GI3K5))IDYBzXk9>~7fylNXtS4%#Q{u%U57&_yRsbO{Pc}ftshG787|vLZ5wHwj_=`f}c$eUOB$mTZ^#N zZ(`Vzf*VSZc79(ressh-X|_1(E7+6E!G+v3V@uzsg1TGD^e1}12qHj7+PGjV~k&w zztOkITNANR*DvG@#*H$RvniS%Vdgr`u=G$PrtdcikOFNg=Wt=)w{&X7!i*B{C@T1s zxX78UF-!4N{$|Q1rC;A9S}QJGO`R~pXhF9VXDlW@9rfUd7sY0WjWgNDG8Pdu<*TW2 zw|xsE=);^iGM*-;Qm!YQ>4S1Lm1>&tx&L$>zS%Ktva1#!;{+tT{^A~{5`u$;o7WY? z>qSl2;bWTnle5>W=G*n`Bn57piFcBk@9nbKh|`|}nno-l&(9zJ@P`}#DJYLV`e>aF zt&<2F1?~o!xKSO9J-s87BFReDmOx*LJr^aINzgs$CB=5EgpkWu8k(S{&OyHv%w0Jq zur~hMul*ViHGz}{EjD460cy5L!2@gH=(C_`;@71|P0w!~) zD&whCg|?}K*2Eeq1j35=uyAL@J2Ql8*=j0{$>`BkAtnzcjHVN#U-2hq7mAH(kI+;Q zV;C+W6QRazMB}7AfBoxUXGJo7P1Hu3R-@G2A5El^5s7BPERGH^V&7+;;1$Kh&t@Sp z2vy#Si(CHa9kyZ5TVh7Wkts4h@sp+*Eb-y8 zL&Qdj<*!Hd3;iq@EyT1TT7aLCq0(8L-~Dg>)^D-t{LlaSKY9GwMNvD^);&53xl^=bEqCzObu^*|-2VLNaho(@;pG*v!^= zf)Jbnm=kPIKS>gHM!}NX-~HX+QHuDbrg*9CRx$9BzoOYFc(+G2LSPG!Wk)j(vW#0< z)NU)I$(f`KWT08olThP+vk7ZPG&cs0GhLhqq4+pAcE)E|?rFbxKe zFb&D)qQR+d`0nr|_V3+yPpQRJcpc?~zfEDcJy34wSJ`eMqA{R@5J+9$Kshmu+FqT> zM=T3n0q|o!VvFM=iU{h2jhI|r8FH;ti;NfanNn-v5o7DxE2eKe47YWR@TTSh!&csCRX}aP%r$D|ZMu_0wAj;1TExuNgR`ShSvgLqf7i9E%FvV>m6c2_7BFz|8{hZ_FKIYt zhEzV*6;cb|ossNN`ji$ZFem-vKmOy#Kl-tU9)4ISaMZf{#!Z(J_^?opqS>CK-6D(b zH^Qz-=hCXFrQ=s|<7Z^7N5Pj2IUh5n)a4mL^bQs}Z39WOa8%uGq^nKW&;vGb@_n;_ zr&WCQt6$9q0PLh~%`97HuI=h#c#UaX-;Zfnb~i+O9(RXgt7^qbZxK>h)t*u#`S#M! zCa4LdC;n>T`m!CwjK#-~ABQHyYvx++CGn~`cS$5s$QH;rWo;66jHkSMm+1Am&aHn*X=mvC&TGl9ni}`Qu3x#jU+RPCEl#uYGECO);^hSiX@aUtD zrbC>aNmeUm^P%TPWSfQW!Z?6Im1yE%LYPWm-9TphPJJaqFlkJsjqUc*IED}lOMhL7 z88JMrqwppfJ%WTH(X>WwR=E75%<)#i1-1mZKpG4cNl>%lVIr^0-~HX+WkR*0W-Thc z`%}M7YHU{5E=|)uVVB6(2vZ2n7Jq5Ex%Vh3Q>lRnK1^h5^WKOd4n*R}LnBEY9K^o5 zggj!vsOzlZh&+|i!JYV0aict2cgMsCY{|POY*I{2*oZBkGZy2Z6}TKha-@J4US|o- z4|j7Mv^n{LGB_$1D^=4PuWxfhHZdUc~sjZOoh1ez||k!P<|{SKlkiYZ=QT%a_QZpJ9$su z4J6z8sSI$%rZ zR%|CYpJFbJ=6>eznbR||-gdn=@I#sz98J_2T$sP}Y(D6u?v?2KX-c02N%|z>-ujs- zmqH=hgHVjcsZa)|i?P@%9$_wLi1*_bFv?%PiN}V}$WYz^5+tGt75H-wP`M)mmQgY7 z%j1~9gVJol+nfzt+_!*QO<*A0bTG$BFtghg#*L<+_+oPFmz-{DsRSw!$w`b7)c zeJ=p4L)GnUc3`j($4-T#cd`rh9$$EyMv&b})x=-#X4;^{ z8+WLl@RFWSw1sKPfkY?hP=fS)HBQ*a^(SJ4;9oV0_)R5_Nq z8t*UU+#*&O9UU`K5Rn<2p3t=W0gqUUwRv`&oGB#{GEc_0D1W!TN;I`SJNg+r?0c}M z7?9c1MG~XrrbZY%E7Xvl;j%Cq@evcF#{>tnFT}?>Q;;`0OX1f@*cHyj!^;vcdDcHF zFp;qY5rEZaM)H;PCbEV<{`liOZBwdj zicWs(m9MhEO|z{@jE;A~!dSXwIhG(g=1BAf`;wE9SglbPiW$Gml<_fEB{dsZ1+0FP zDHy*`!A(o@JU;3#5lLUNXR=Xh;Gl~#<=k1FMl~zt}>W~T=1$6_L+!KW#|fm$sjaHH?k2EpcIgi z?%@4MJIz1LAF(mb+u~}__*!C2C^D4H4ad|q`L_I#)-*s0sW_&B;-kcuPR8wydGXud z{x+j3N4(4i7oX%)=C!D8S>-NxNz1gpm(y2RB(e~%tQ5h`im42l4VqhHic^KhqnowX z;_7Aj7bck4B3d|`{-@ar3LnQzUHk1OznX0<;~FHefvO!QcR-7S$BrE{XJB^8V?e## zq_|z5q^&|zz$pd@b5}aFo#_?uWQ|UWamhPGkiTZQXr$15P4ARqs-}}Qx@sJ1^;lL0HR50R z5GO*2nGxS+YtM)$l6q=3|Lwp1w}1GDe~^<&D@mPgCO_OIPgsN26W^<6AEO(9EB6yM zh-A)$JlmDHbm5B785^MxHTn*sj!cTN$K}G=j`?<6@+kB8=cWIGzxHU53rYW+39|@T z9kF%VX#3b>k5RQNcc6Bq7+o5gYn%hXrhNMEv17+<5r8ZXRwjT%c(#FSzA7D~ z0c$W8kIEntjbAUizz7>?B{qVjwbEF>! z@i=MyU)+J6kd4#pjEi_xMJr!viZ00`h2$Y!y-lpQz-xVFH~M#Naz+775$|jP%n#Zyl`)198NWOe7AN4oU!l?83sTbVuAFy~6E+^zW$0cs43j-?75udl+xnIrkQ zJ@V*zt<^S0(s6=K4wqhDKJoku_NGkt;!f#1?>ySoB~~5DJ?f;9?Y+pzz^m0_=%@Ro z{(&OsriL*gRd{8>?yC_1oPXDOirsgqlE!vFV=Y0{3>tJ`9paIxLb18m*s69GGV0 z3UItY)cj~{DQ;?{&G;1*eIaXt3cNsRe4a){BqP~3lUeexv6`rJ4&qvJp`e>B#x>@| zi4!KSG|^;@7Fx8aU|&q`=#?6Fg9|7iNUWU^(l`*kC;mkXFP%3RKjP%pDvzWq)`tiv zn`hEi+|-Msy=}RC?U8z&Sd(Fw1i0I|V(&25Ff&F{D^ZXRLKr2E84uEx5F{%he_qKG zq5KP~ZXTlXZ}v9CSAMbC+YMy_71=9a#BoZld}elgigj}m+sK;?+AdNXs=vMor%s(> z-Ed|oA4WApHn|2JbPyzFJmG2*K|C`gtu6pwTlpxdNp`)!&t{5DVX!Ex)A7M*4Qw<> zMeo=mR0siCvsECy1zWxnPt-icj~zS4>Cbu$@4x1^wUFHzNpvc}BATDmLY;-GL9!7i zQ!*Pgg+xTo$}e1&C`D+ZS?ShHTpQ2)8<`U6vDIRIkhAp4VEWon?ULoyI!N+o!k_#~ zM5Ozf@)i~x1vp0h*`NKHv6Zv`+6s2{ui7j@iR_dgn~+!-Ic6qz;0x2l0oH4{*niT2Jd$srpw0ZTOssR;UNU70uPC&`d7 zZv%iP7Fo!1&pl_ZO*gaF`1s?G+g*ge!Q$d3Qmfd7bf~ahO$1)7;mPZ(DQ6qs8tGCt zZsXf#V@!wQP5D@+J+Y0WR`DL@qxNjl3e$>X#rk&Srl7!RoVY1qy$$5`p+JsUIc8^# z;)ZR(Oz(4_`<$7N4lmTPO}q))x}RQcOexCAg^$MKb=X=aP^gJ3tDV*$jzV>7Q_SdE z?M~Uup`=k=Zh-Ad`(F-c?#`yCAOulrb53ww{!>2VG!Nr*vfHXm^fK^a8MxwxSZi7AueMjroJoWRcbtO-0`t^(e za$R*vsDzelp@UgXS?#Bjtu#*58+}nDBh-R5Aqis_TktvVuhS#P<;U_>=2U5jqINmH zGfj=De->cW;@k08BT?)VflM>uU8H12pV=gS4fi%k35vshx zudy}cVjZi3v=P+ABA=BGnhL4)=YRg^ZVbzBv))ndwYc38sZiBEs0p!au#;MoF!ab! znU#?yH!DqQ9%zZz^o>~Zwl9grMjymq#Xb2|gju)-5-{MPQu1?eOYYboI=ZLOi}Bly zRg{;bZ<}u`kWKJ1p0AP!7W1@Tr%#yk`#k$pvu9OWH4DGt@yg1?h|y?5+8g87_)lkQpZoHDO}TKA6M|XX4U$JOwh%3O+s#qyI%uj6WGE{cJX$RI_A|5Bm*dPDloMT z*A$U;bZ9cSnN1v#H>0y2+vzWbrH?o*C+c(bz}C3l!1+it_5{qJGguxb`%1|%-mRldQ3sBHm2Q zSG(a^+pq=eTrLJ)HXd(zq4H{~6uwyg9Z9dXs9mMTIK^&F(lPr+qC@^7Jx0v+ z@hdOC%+SwF%&G1RUvLEcK_0{-XiFPtmS0P2ChiDoJ5osU5s=yCW3q4=e9Wrxk(Wbl z!#e`c?Lw|61vVSvuV?+Y>)L%3NIB&xr0^24=a-7?crS;wr9pvAl)hc?*EhV;P)pCC zkr1YXzPWmDhP$h+xIMbfa5*HDKwu?QMF(A_<*W>2UI>z0zS}NHYrbouR;wfL{@@S( zz@=enDF%R70;tw}ygPniPyz-*G8*03xSi3K_pt=silFzlj^#T5I6maG&C>d0uRp|~ zWg4DZ6t1dLo2tX~|sG%Y^E*iO6>r|16p4g2V z3FKx~-^esjC3xUxFEirWjeZRQ8rivGYJx|0Z4ZaBJzR~WE8!OvPwwss9WBQ^bcMv_x%EGpxOVwuPk z>#T{-fBy41D{DeNh7c}Q%PdiyV#r0M(_dbJSN0=h)}gfu(+gd9vdBe0nK8qN-0lpQ z29urkvP};cm(M^F0BI6JizU+%<|ww1)Y~ysin$Q4i8~v?GW7;bB|ujB5?%E77k}{= z9BNp!bKC*pPRgsC44_@x9#6AeL&;>f<7|+EpVTr479)q|)b=~`)vWyxq!4a$#8 zyxPCnV!_}fU$*`ypL|k#PFb4tNpfZUYAf66@4<#sN)=%swmkXhqmPOUJ6%P+AMcrGGV>5RfWhn3dxu_T_1|LDI&cdFi6BAV*R?n#XWu zWomY=zvSn7(X=dH0R&rQ1;gWyKhBMsPqA!C zF<>BJi!31 z1f}Ua=|6w;M}Oq3a8fkea7u$M%DL9BDuhYQ6!=?LnxBAQ#1nrD zi6-E}K*<4umS|DRs)l{~rKs^p7n=Dc=(cs$*Z-|Xq8VsxS{gKiUza_6_|{jI!q*h| zT8l`6ddceYlgdY^UEt&S^WXdKcikL=kM`?&=@rEGcj_S3S`r^pbq6 zB=zleT~E(#O^V z%?xs32y|K}6{IyezqFKNOsYz>4QhFu>0hRQU@!$Wh4wQnj1-Hpwzt0YtN2}ooifNf z$BCLOxk-u6og+k2=_+ctR-a-J*ix+H6bmB*F)cH#R4MnR!yZD6Ca(t0Lsc=rh#O|iD)W;DXhhtloHt5hYUy46@zplfod zaZnWBu0%Ya(Ta9u$QB4Z`Z0b{ADeA*Cqc%darYby#3nnvDHh;vZ(6@&cj466c&{e4w6G{s=6&U#7J%AeHQu2BaRnkmw%#Yo9#UT6QoK@$w@H?rY|PsN zJ|$!VgGL$gbhsX^k+BSfiOZ(NlY51ROjpIus~@ejyPetWN9oApc*U@wloqQfs2A@@ z78YADmOE4BzAOv=jZJME>jI||0CVivF`dGlmI-abpZOE`jo)626u_uD2OwO=R^pwc zwSOh=*{g(2#CHUSuvj2;aq`f?||c84rl@^G-kIE7r2mX-!{b9+1O zUicPIxDCFx^PI@(3EP~`Rf5+DGy%X($$W!>!`A8GcQLYv>kVPoYxkW%}a$ZM~?X1lku z)Gd<(Tj}}Sfoe7>B36&5re~=HNxGEh;=Oifb-1i)@kKT6NbfHVCMJ6eOPFX`Ao}JQ zw&)kJPPx8+Trq#MxmJ9gEP2m8_n=1Bl|9iMSanivEFTjXkbpVC7DqSf&YB=M44O6F zs~rhiL;f3=R+K177LS)#COchj#q;xvc*Z|`cG_4*)uRW>W>JUVzvX~(d25tXPLCTG zDDFOOP99P)F`)%{&3~B@)Gn6d zR;L%&ctj@}4rknvP(oy9s27({s*BZmBh&M=0c=7aI40Ixw_7=USv!8of;bcmnqfk0i(5G6E@e3bc zc5Iz=(F#q#eI2tkflnME)18{=%!*MqW~QcP7z`{ewv5CR%OeTZ>rF2#EQL=HUDH<= zc4S{RC>P0?0xn-B9Wup}x@I>JyScaMux%n-q~;w<2C9Ieo{khWP36)B+T^clr2Cl@ zo*k>zL@fNl!*bG{nq62}cKrevlpm9T)uEvyOu&wt#^P08kJF1w%bo5_tIM{)Tj{b%pn;mU*B z`7c!YPLHLF90&skRZk-k2|W)B`%6>Z(a+hLJvyijz$1gOf|-O@J;~t0LE`Eh&Uo?9Pz#^YY9t)IHGiJZ$ z#%%L3)|5H0V~t!tGRern>KDr=-G#+|+urSVNVZrMxo8dBe*rc$fxCb##vRMFE)WYC z3I%qM4%>%v?Uz86;F*$^>m>(F90Y2hG48w>S^Md>f9WPgkL7bW=<7g}-CnM0h4?DV z&+V|Q#CRhhsZqxC{OtMp=~;J35?M+Jp|y2?xgz*dk5X5Dm95&TDUN1lH_t&?VrIbT zD=PdtWX`5VkTPMol7VTa?!&#Nt&aJg0&(`-Lc7hf&<;@;gO26LiNR7|qhiSt;Q~3; ziMz=gM+b&KF>Sa2;sK7XKk@)o^cY1$(5&=l;mK{nakT7$VafjIuXxjLJK5T zetV~E*iSVKsMi3_@mLnZP#~*2Lym*3IKU?ru)$zSYeF&LkzlOXc5N^cFR?#P&jQN7 zaVa|{-UF6K(B0H-BmZ;HJ?E5LB$ASZk}GK_QoC*A)9VxkLvkmXm5?|m&A>-{Ptql* zp4;PN*yYPAnh`Zorr<@J84Y4I^Dy=1r7wMZHRFz!X?g{F7X@Q9*!iP-hLJN?1hH6* z{@_C@qTzCCYKCn&Rsx1ARF_a}u7s-rPa{+wNppseanSm4F3%v*_UO#!=v6 zIj>#%Hu#tf#YcvIe0<`GC-`H<@KhWSD!Z>8%-6bP?1PinkNW(cMuVuntICm zL}w?5F=mqq#x0Ze_JKop^@pv+rC=*axDl*6JbJif!0DRZX-yNIg{76nh2?#F578}$ zgHS`)QDWN@yetQ@BFQ@uSSL+5b@D6u?svbdEhP~=cOf5vB+$PZ`b}?WT{>X4x^;xw z-p`AXH|!hj?j47YFo!vJ!h8=H>U9F$%v5N8***Fg-0qy4pFeQN5!3sCL5XIpFGsl`Lq9d$H zxSSsl&ybl@ScywS2Uox`kexVjLP`?ka>~5Ud4QU*v2K{@F{=?5646jdU@}s%+X)C` zgLBtf<7)ekgGbJuS#a)VoGlTnmNELUGdeTG`ien^{hv;ur5`?WxBP`sR>=bJoq#0D z$VLlI9XWZIi57}z4swUvGX$JvSjd7v-h5P9WlN6$rndju$bBj~^?yxez^^W5zn2YPyCPx+&F0bvOn^eC( z3ChVzO*-Q`pcP8;+{t6f!Km}POUreDR@P*5N|5AX@?cP-IaEbx-G^8Evye3l$*4|8 z5*|5v_rg-2711lOp&ortPh9RVrAG)!&cB}(vfY`YY0waZh+!!n^QcjBmVcGcWEMy) z{yCAKLggeq9k7xvTeuYAD(l+z=h4(lTS}-v!kqp5^UvGLnRBk>qx8&qr4@yPh8iX& z_v}01RA@sNvLJ|Z)F_0n%>*GC|6)%J@8b#F>4tAHcmBd$F?%`>9=zlH{DPs75t(iV z9$jN)*e7Of8njCXsac4u#?4IgRu29)SPBRd_p-oGz-p6LmvPX@l2?q}KxQ?e&wh7F z%`>x&VGbg^^wLXT|N7UBl3)Dd7x(O)#UBXD0PCiP)~xF$if5;TNU~3pC}Z+PF@SY_ zhs9OruZh<>a+!P#M6~I#pn#pT`yJ}|@WT&&@rz%Wo0~Db13r7}dw`GG2cc|49yRjt z9J)jYPHqwlsR3SV-rX+$8d2be4TskVpWDS=B?S!jRiPAdU4ZMYX_-2F3VMbr5!o&| zB3>pWhJ_p%%0Ypa9Lq)46Pj1)WW~gNWoY~$ra|(?+oxVV zdGheFhb(mS{t69i=R(T3#9G{tAmmS!F?VwAgfV{SD%{DEl8LBf*fUSC?KjoROji^3 zB~M`lubrwp#*N^p>nv7}-goTune%7P#KC77@R9w>zqrZ7!K(dm5P$mF=Rf$7kH;Zq z-RZJO5DIIW&n__vjf>slZ?;th8mMH0b<)9;t)G6JHWySvnW%U71yFH(=9I^u3)t60 z=h%RE?mV2?cVO?{eLQv-&F*?#b54si!=*kx>V1G|_qzN=e)#V{dGMk4hs#&#fw5Wf z>N&TT5$aN-w5YTg4zP@ayeqKzS7zSS@bbC#>+AXp0JTGc1 z6og1;Uv8mfobtT9didVs@18zqJuIxo(Y?Z3r(=_~w+MnG{bfhxKL66o4?g%%nQuos zRt1@LNz4-@Fk1-ZCFk_pOMY@KG9Xm4HN}x}hatvfUR_|5sOO3#_jeG0VnK|Rq%o-` z@44^3H&4AYADdJxX4%3LCQg^5I(5RNm+7k0n>Oy8eErQM?>mN5tRF$`^)OwU^6S4S zzcq<$6)MdPnc|r#Xu`;7*{_PSP0P7ai3uke#fauA9tVbi#Lq-ZJh8}Z700WG7O}#3 z(V84AEPeQqk3aw7D;(-4i6_g1TucXH#V9>4mXn<|aq85&&p-F#zQaf3Z8)M9E6KPK zTs=J5tyHSfR@@~*nJ947ufCQI-t+3K+wl@YdQ42%>druX?AS5oS1fkCu4aK&Y+~uL z)w%zHW6wSRd{T_UkVi|~WPyN#@Y>_O*fT8Nb?*bMUihL?(DcEgDsC9eniX7OTy;p{ zBi&6LJQyl&jhT}*-chkoCb=xb6&An4*%LyAW@{I5)EG^b=8(+{sMp2vVgumZf&B+5 zSM+(dePbU1)*r-CAt<)A)VE!OP4Zv=<;jnJ;*+S0b*Oyp$cHkjPTnC4k5tQ|G>0Tq zvN#*t4zuJ6i%a_WDBBWRS!^RQ@@}*o1hdHPHMX*bY&Dc?opHeQ3?~3`vJ7&CP0=CmJbL%JbIt^qTpaXSim}sJIo0h!0c-;8 z+Fs9>UwZYP`|g#kNr_Px-EzNR0iaY_zEr+d3lAv(06+jqL_t(CaWM5j`=4HY_0?0S zPI)Q4S@;c!UychZz>+u>7tD2F{bP?k=2$I>GN9qy>>M;P7Wjs(QVS7$uo^#JFD&76B za1dUC2sxSk&W!nDkjsg8o-*KLh>{{?E|&jx`fjGc8ri*@0lf{|^`SuO!FqM}aL#&m z!5HsTOn3srE7`W&?G60(Ujrwb4S9+zw>p-r$(EJL*~9mI?w7y#(;xr)sW)C*bt_YU zadF{16O}E8=gyy5m|uGB)z?qId-iL8_+Q;b(`xrXw&Th|3cCiBfqN3cm`77F#`}p} z%{;p&ZZI>IGIZInOhEy>ki#$_lgmnX6g zP}G?F5l3Qjb|@TnX%irJYDe(EzP-+1$_w@zW2 z2}6HrVPXE9zHfp-`(agl@r9Qkee}^ew=`%+V}0G{F{>E=+_ai)B&s*{Y;?$ootV6d znG|bjXjS7kHiBP(WMoGuUW8yyK_Mbs1h!E+3$RYWLK*#Ndhh3-_{uZS{NlAY-dvbJ z&yCJ0c!G#-Rl?%yufOT=&`F037Fon^GOD=vGz@H2Y9*XN)y6n3DLG-wzLU?3K)Ykf zSxb^dH>7qmTXRDYPa`8}L6{jTI!9&0Nxi9?WCqH}Z)H|hzoHO^c zSII7o>)!8w|DSERu{pyjF*a)0 zvSB@yqy;C2bA_JSGk3>B59>Fo&VO0JC7&C6lN80~XhNSizF4YJ&c>XVEPSUjB+-;c zPXN;86q}z=CoJL6qQCN$uhuVwKc!;6V#B+hlbJXfJPEuMcd$`4L8iDU;gr!gO9P3Rx=OV=;Ffs)KsjRTrnT7 z1HxBc1-_t?mWd-V?|b^`t;?t+h5mKpyO>T17CWo@Ww)vcmRiw!qFvU%7ON z*EdJP&neGjk68h<6%iua+sbGWDLVySsG-`1tF*%E-)hb7`Ms}x_4U_Yc=yfMd6lfL z^z)ja_M}Tq=GvX<&;9DJxAq+hTUgv=Qp_v~%19f%TDV}E*io{z*xG+4?`qA&d z_QnfmPM$QQTUqVf_34yc{RmB56#2Pd`?c2IgQXJ%?JtRIf+t(jDq&;G=BetO2XmDq zc!l|r1zvrItP4E$uTVyhk?rGgCn+XF*0O{0%4!3#6D=3jkj-xO<>lF9$G@`YE7l&I z20k@8xv;wEx+R++;xeDk+@T|f?|S%G*zN-uR_0DPsP0r_PfQcdd?s14EqP)#4|+on zD&f=)lA>AZRmOk*^PjWv>5_>~c1DP-Yg9V|e9Xy;29Rtlzb0nxzyI;M-+blO7fziz zNse*7H{dzLAZ}Kj*|&G^{)a#PDK3uWmHjO3-t4Ghm5+Di0dbsyvPhzz9W4oheR=c* z4rVr--I02Cun=czxq|_CiHnl6Dehnu7cDBz8k_v^7rwCc^0P0#^c*uWjbdSCVK^K( z5OsNS1s82RpStIM$4#(Uu^wen(M`hWU*sU^rBqDJ0x)NtI zs)0wyR+KG`y~EJ+*kg}L_eG+RN=`Py&5GG}+B!OyJEeOwU;fYk>7D0Ky!Psg4x5;m zbT7rw!7y<%*Tlr!o&yH%-n|dTwX0(uDdh?cW0wms%Ai0ZUL#bOZ-4vS_uhN2m7Anq zw4V}^l9yZh&gIJnmK=0*hT-^kfA@D2Y(25olib`MA46|akRzcF9{jEU@?U=N-S4QV zXPt$w?u6A2&B!Pich(VJU-`;!xAq$8;5|1K%Lok)Mt9yBx5enEDPsfMvRVm2YrqmUgB=iFmxSqBW`GhzV$*2&b+jM2 z|A7-HPMn-y`0QsteX(kj*ZCGH5aD7uuSNPc_{b1}k5<1RxV#ceP(pTOr8#(qT{R8_ zznMey?$RJl?O;c>qkq0}e(;&keClFoB_BPt0iRrFK3)u}+v(e&z|Gxuw}J5OPqwCj z{+u1#==Ic7Pq|%vWBwS;N~n>Ic`lw31793nTKJz>Py zeG1QY%-(G34JCz>{0!}#` z0Pid&DOpOEpZpy)$dXp%VG)UuZ3G41oQN^jPwPi}^`pCwe)jH`c&LN zB_t#!Mb4FG-SD=;){L*is1|ZrkwVgDe;Ey9LF}?t>=@0?VgK6q9DCrNU%h0R-8!`& z4~Uoi4A*F|<#)NcxQ;ae~+wVKQ$(Sad=$uUu4 zzP&2OZCd??#F0PV5*3Wa5n2f{TguYH4L4lotJth>FIn2P$bdC^ zUp6yM%TffEU5sBBOWy)8{UK)wt?th1s#M)qg`^%GbPn^keq-KOtS!`WkEl^i9wjqqC-LzBn`-8w?6yf z8*iSp0%w)$fd?LN!)n|k7%r%pC(DfQ^7Gb;?Ks2L)g`UKQgtkVF`KST@`G?aNM~|5 zIhYu(_MEprTwR#xdE2((VoqP8Nv(DVtCOHnv}3|u7ui`Ewyn_=1}(d%7dKbqSS^=H zI$|rVnKF+y*2FUB!0EchzRLw&&?*+acfxqwpS;3R){$sr%AnI8xFW-BX=!P;JJnzA zyF#|5Em&3PEI7u5d)LhDzQNM@>6y9T_|;E*`+ML2!ILYW{H4#tYJ{BJ}b{Wq^1VzZoAxC--k1AK!fI^?&;3f3ieDhAgiv#lhVZ zqnmHgr%149d4;)`31iS+9ke^`R@^tbto8IfCMV|y!>O5R+vwu_83#FO;Zt$)TWeu) zc4q(C;p*(vzQ?}siNE>B?>+g0Azieb_l1q4lSs1<+VJU%(vn;I_aC$tg^!ia*OHmH z%iduMjI^j7F7H zv8-~+R!x2f>CT@_R4?uAc9*R5$0@<#eQuE8d0o?QRHXJtOvEW0GOQupC}6fxuc?bB zXKQeZk!EY$Mrl|2iRw59fx#)j**&kcg!gKGja<{pCmfAC#DZ18WD{Ctd-KecYfT4` zWEC07W}o2MoB=i_h6Gr)n)zBowfoCYLK#*BE=Z2%*n&6&$aTO-gE6>dv#G!^GCI?? z@(#3`Rsw$=K-&FPpavkV|CyE>dMcrAAmq414(NAaV_UI8$yfab!gEtpZCTWc2t`X@ z)A(stH@uMp03^=Fu@Uh2@#ER`c959G<~1^kDOLk^Ai&|W)_%v~!-tL>Idt^MfrAI9 z9bk1C@&~jABZptR6xueIJB}W;#q#jcqx%jV?6ETEE}QXSarIabQ&mPOjh?_rSI0pq z$mNNf*I9p1U326Gq@UR(J%xOG>YY>0ny@Tk2?wZ*lIWkpWNYHYNg)Ehm52`=yzi_J znA+0cT8_Pv!+zYOKmw5G4g?nz4luRHj}q2R0@(q(F8^_D-u&C$_EKx{?e5CCsn&dN z`AmEH%yef-;^gvqc4im9296AWZWD!#R+@gk?7YQ@td?n3PCaO4RV0q{a-35+WLXPw zY9tEui>dq3C!RRJF#nx@_QWIkPg?`Md z!NrU7@iLoSZM_*84EZ1#%dUGJxJUF5>-LF9mc^ywyNaWDPV*S z3Wy+J_QA?2)VuzCthL{cTs;M@hZ=hIP~I-#dQyO(*V~ zjT??aa?rtZv?%j|8xGIq1E8_pbI(15KiBO1D*Hlhlb2uAmFBGD&ey*7H9XbG*U7Xi zE%3dUzY+vHNQS@t+rMQGnwy&=%2Hlc8FC$PPhcuYY6KN{oOJZ)qmPOcR-PHI13WiN z6*=f2%kksKEl)C0F|uHsl!`)=sx|RiL{zFSeJyLxrvu7O2pN5H%6Hv>xfTdsqcW3j z)|jmVTG_HE=^9mZnZ@E!?fV*-VE`$ip0reNGPH)XCVL#n!i1S<4lDuJNQ=o^-&${F%J-mbKCIFaPo{zx1Uq zIi^4c0e`dAd2U*r13;wblI)nmoOB%r%?ovOz8NlX2wmzgxjwMpAMoUrQ_1HS%iR{` z(n!|DIItau%M)MNKRE`QQaFEPvfF>><)6Rx%5%$$^Y)eaUZq_(&Mlnp&FwpI~3#qA#9!G(}0({nMMm~(rbr;tP$(-R_Wkf-bz#8B?$rhI5ZXRt0Hp!7 z3B?x`ilKcY(_G zxP;>tgf1ckGg9j|wh91%ikTi;4U~4b5twCF4f|mrCJYO?EE=tZSyoZYO$=|d|9AWL z?aU1nX@jYExp3Z=^oojhgfJ@xS4b`U3EBahrCML%q*AR&A5>~lBTF^xUy-w1jMRdT z3RPc$SzU#L8N@r7fzP2J7*lSygj0g@)4u6z1F5dPMkQ~)Q6 z94H7iaox3V+LI0JSt&?$s?xCLt(wEY09d6JK}5!Av}M93Xg8!G7MX)@l$r}sV>qD^ z3ie#2Tg^z@s5~sqKk4zjsXg>S2HC>qGm2mwvdhbi3tS{_+ICCU&En1nh3FoZt|*wD{cyHoHM zRg1aMc#leZF~sJIr84y;bw7+!p_lafh_e$Ut_221HX9XY=E@#H2GRy&vD!Vga1>L# zP{~7qT%JR;_xi_g{PxuGPe!8pwEM($ZtSX=bAtRET6JLkUo?B!qn!gDb5 z#bQAQ2EgHT5;=)P;!cc=juXiMlV^fIDn?Alr#InCj7RF+TqTHdEE{B)jDv7wVjN>| zLez}F|KEQ8b6|M)op%I|6hpJFEnM$?pbis+o_Hx~268Q+BX&>Wv|G^;qSHwsH+1A$ z97YNufl$#DhzeJ$#bT%@LWf7}?-+EFM%D)WM(V^-J~uN99rLxiQyd-hf=J>@n2u%c zkdf6L&L|;#1;*R>vv*%T_SyUEM*a08&c?A|%i6*{5VY9Xy1uw3=bZoQ$g98n?<5o7 zaB7uPPb8ECHhNjtX}I~ZiMm_&BASdf7)EpAT1{3G;yUNY=Ar^rgNU5QQ5qj}V8EIj zuk!Ps|C|QAW9X=Q!a^hro{kG)7{2Km14A=$*QmQ|Gcsu5PJ-dg_IZ_VcWr~0-!Nn= zne@&Bv9VAz8LAm4z!mBych&HgU-q(dv`;d1_@M~Q*6I()SJgsz0K#RLewStd8xY8@ z8{;_z$vD52yTy{ySJB&VG*B;9P#5Da55Ry&CleUn>nEiwF!R$gUcaHb^UGJXfD=5Y z*!HL)H5FsG1$avuIvvp)aCI4&9Oy}VVLvuRkk+JL;1~2oGF{2*w>E3)y)Tw1e6BG+?md6TO%mvH{;lS>fe?A-VIbgE}hV6;$K{ z7Di=w&?R&=G+LOsu%&j#h&^YNaxV%QI(*W6{Pq z$a_RC^u-0ZSST(s#Sn|3*j>d?gTId)3-0-a;z$tYuFjUrn2HGw91`KdSZ?toW)9&7 z_z{;z#xGr-#F^LkBa5iEmJ05L(d*@tAHH3=dTHy%^#B$``SG6;FV7Z0*k;ZLS6pWnW9Q!dEHMhYXPC=41o*PSSil)`Yl znA^R5i=S1Gef+i)UxOeXmSs0>;Dbyo3#jIM_wL+%wNimJ#>qJgS!eJ4^5e^aG&1$#O@#xXRq@$&+tEq_6aHS&;txv;$R}%(A`V6lj2HG*e zyoHd&j2Y-CHydEu(|!}55q63ed&jE6@Q-%Rz;Ct8#m9_3Ar9EMh=4(KQFgoQ^iLN& zsgX~8s(1AkYjJ{2me3P4#In8pG?><@30UibwJ@2X{@SW7v+-uYfLR#szz2&KaDw*@ zn{90eyp7NT74|#gV8BaUw-J&qss?xh>3~V{f>q?$U{K0TZ~gl9h)^yB7gZ0Y-Y!sd zTe5%<<#y1*GZ)G?+gDjn0v^Scz8?&D)I~)Kk`8uwz~L&@ergs_tvdeiEA|v=RySaA zixo<{kww4u6@Z%-t~AoxIl&S^N-}^{;%b&*XKY8EVg`)yz{Wd*3lU7VJ;8vVtxVtn zc4D#3^4NeOSzXnsfP&$%%%XsSwAEavR`}h(B7|UFRj+Pr0``< z5h%)DfGXnw`xlJ|rm-NKB6~XT08}awNX_ua$Gf@oJi3 z4#ojBhzbo(CvyTXDxJG9b#Zd~iD#a5B5Zl{gVd=ju>e`0yLj%zmW^xkIjo&w=7&V8 zaHE2sFH|@=PnHcZMK&IKVAtiZzjCs&I4}Di#($wSD+JIX3^(oENjTe!GqbT9)Wcla zarvuY)W&}5_;dAaF0D_^Ri1hNMW-|(89ld})F?R&bDWmU3e6XvAE&5=3pBUgOuRT9 zl@)0S11J{%Md{cqN$IY4n0C+O-~agV2{7kH#X5#z7HP1|a`2-^#0U;1%zb@f^5W(4 z{-N~-S#s{w$!%LUQ_V=R1Ylg4dvm!`5`*uEr3T^) z@7=TO()qLEP|PuRoPj3>9<)iKhBR1hxcB~DjY8?-Y`I3_AVqum z+{@MKQ!l*W6h<-}kJF5`8AHcV>8P^?ss;?N51_ii(z^tN@6=~?`z>HZIO_$K9w-J7&7XV=8$E|VzoYULJ0x(UWFI>EM(Q3Wg z3wm48ywa$PaR-bk0+p;5ne!5VV}B|_>>WA4DOGS954_}VWk|nKWLd(}NGaI9vIRVv z%qH$PRI8$Vl?mLz4*-KPD5w%l;kQT|0Nx-Ugi|6?9g&VOcVhQcMp6l&aIOm64h%@e z7zh>ogjb_%_0GD>(18_sZw^#NdB@;=~Cs20?El-Vh6u zlasjV5F8v_0SRiVN{j)bj(Ls!Te@1)3FHQ->XnRRvhgEcI&T*#s7Q0!2Y#|G11Z{G z+Q6iiatoqTm2(RN;GwfeQSkoz@3Ya3JqO?i%OHTkbqc)Z;-$?s??DbKgC~hMjI9rk zZ^uu?V>l>IZ2a?|{nc#EJ$U5H6Q|D~KK|90r@ub(#i=h&o%`zCrK4Y-nN8iNpZg;x zEIOHABN)VWlU-X4lza8-D0B*?d@7bgrxvG#xow0_xn9Ms)#FGy9A+_VyYd*dxB@EB*q{0S_dY*% zcWsNPzkR;6E(M0F@Jz?MnFoDifdo!2o!j&oqE%Boyp26wafQu=H4>(Lw-I{ap>`&F z_{OR>xr*qetj@FA;6PEWM5qQgclnanm&WjIorOLOstubM!Svt(&DnFtMyEyKQ zpU8@|<*^QUOwCvfVBFNiH?4(SM2wa&4HBn;7U9B4EC;9+Vk-Xb^FKKGAH0C`ww!z*a5%ZiwmG=L9oOx)arcAj$bcBxYF& z7U5x_uGY$o4zvqP08YfUdX4d^PFw^+=K_y}!tknb8WG+$4EO%v*}ZeKL?o^!HG=Sv z3*RZCVF+4EqmIuJY|Lje;wD#$?=!nA19r%P!H@gfSjm6(AE7Ce`m*?bzv5nvP{)>(3Y#P@& zxbEebUwQV~9~4UgDpdhy&W}4z5no?1<3_&6se#5iOeGKvPSDzj7lQ^yEi5L)NtTQ9 zSsdfD?nNPM0duvO;3FoI3<)S1l!nBSX0<{6;aOhvtgpYm;isyp%%Up#e0Z6}jW){}Ov_Cl1;Ew|gB+qZAmafb$i zF)o{?uT7>FE~=vzm;~zvu}V}@sf^+)pG>No&z<@T-W^-GC6Hx)#vL}^Vk5RN3l#@w zUROw<6pVz$6EKv5?GH*#3*qp=A`MjFy^F@h0ToKMb|J>sfsqmu58lM{6!;;f7Qp~q z4RzplR!63$rtqd;BykTvDNES9eSm@}@C?2R7n+X7 zreF8~gk1WbqBP_6?5$4=Gj{jJLaPtlZSC0B=XvzoPuE^pVPKWO^vuJV=ELVW!G<#wnx1iOIW;eK?;_pmNA@_pUQK?i9x>ZtncG z%HI7?xog(>>o++Q>z%ORct9Hw9RtVafa(a8fr~U?$?Unlb?fHMTfTMR;}5kXP(C;T zIjGRsAKL)$<0CavJ9MN{mF-j}cy}{AWDg(y6gu(-4M&LC3dpIT3MR~TEBGjx?~tE= z2spx&lBpffFadUaCpVgfqtW_p#q~SxfAD*so}3ECw-z?s=M*u7_sy~ ziwIau!=-5~IQU46`qq1qqnkha=pz;(jJJT$ku)ra42+JqBEj%|*BH1#c643JFypGj z0DT*n>B9&hm2^Dw5(9_ba=7XmbsGT&b=dXr>XOA+Gp_1{x!)jJI#Q90LuR)-VX*pL z?`kg}yeaBzL$IfFC@??q1Ldxrf(JT?P1C;ztwkxwOphpL{j2M4Dj6aw(r6S9bUILn zebZ8H;llu$4+Ys(KtKJ#x+tsUAnV^zg zC#ZbVuS*uV!UWKf66l5i#(4(Z+e1sQu<*M(yLL40AdQx?MuL%Dsb*g)1_nb)Dn(+q z?jw zo;VdVV&M3>YQm9ca*Q5zVKls%I)!;pCOEP(U@q|+V-_wwwCXgzWLyT4_Jg? zWQ3-@g(qR;^*M^8?ia_B8iXxw|MsJouT@T+yHbnYMv4n?h$D6_j^*4tCng1EWeCX~ zLh&b7W#RIv{OGqjC_`@|kSE$tzpE$^Vh{+y05?Yy9 z$h8Ffb3_zl!b-e*;NzVSJyd0NBUi%BSpHm(g#krBW@jA2K?tiOFqolZjXS|rSZ`y& zhGbT|fRv;ee6LhchxZj?pd|rTSj;f{s>J|u&}t#f6Yyl*uz@A4W)D|+Z4<28R`wlm z5QT_j?+MO~psO9LeXqMUtWptz7)5hvCSDTf5QvLp+%0=Em|?3@CrT}0kOP~t4fQft zVGo^xw`41ni z9KKWxSkizrdX^$k<4&+QnKYWfB6cu%qf%4uce)ov<0PWUOaat;$WJo zbusX%BkEu(y_;&GGeW_6@ofjtKnMpTSzdyIfMI_Ql7(n23h}(Zj&miBy2fX=Dcfzv&L50RtJwXFu}|_zuwtSI)V&0I zZkWSJ>xIQ9o_hA3cMnvtC3>7c=GPj80?u=?9Z@I5ZfGts9yT8`gudG#Y5>x|`4ZRE z%*i#fLdKtyqY5!E<1H#yWB&X!j`Kl?Susn}LO^WVFvJAhaB>l5N5WwQ@4xq9X?)#% zJHOpLc{U3k)F{&W^uV_TE zL7(ZL3fgM6{Hs)hj#{1m(ibBB;nje8jR`@j1nPW0H*iB>{%4bY{I_Jg5gcZ3+`S_A|AngK3S zny93Duw=48sgT*#_ylib!ceh5v~)rlu?V(&Dr02TKSZs6tD|KE-Uxl2V;+ z_@rOEuIrnS5v0LpNUc++PFdUzpy67Vy{i+P{v1H+PkoqQM;v}0-%5msktOM6q#&+Vk9E5cF84k^(28xX=`4jOy~s{ zVJ0S2E7LK6;SJrhTd34Thpon#dc{yE{Y=z=b&9;NaMxLpTWL3a`O&r za_UAHg=;~GeLqOjDh}P`1ly1rhwl#$d^kBh_3-15B}7Zs!+W5k$!u5!76&5nHYdwX z>4>`(7Jo$}?IKVK$qabI+v>xBksqrM>>&36^>dQ6BVW_Rz&tt8ukF(JG%%) zBWenJZ-GO@a-BhNfCbZkJV+W>o zu*XZ<*nS^ClL?lEf56a1U|Fbcrx8jQhAQlg^VEiA6}Xbb=zdB;+W{>vt)$p*NM7U^ z%cZ)LOcAKW?RWFF7Q`mvT4gD3KKbOM?c2AZ$pZ=OWafiR(?l#R>td*A8s-=&sx$Oi-}fD;h23tJ#^^(_uuCf;<2$2 z_Mrh7mt+E9jF@86Z2g%s<$yiBmpKHDRGAa{VO)(LdE^l~ARR&bpcbO)=(6zEMPf@w zs#e5O$e~X^A>QuVb!!N`+e(1Dv68YQYJzt2XDq7AtU%FgivV~@CL_QH+FN4 zYw^|l9=PwGZJYn;rC(O#nomsU%pJ>*d5Nzs!$M8sONcQZvR-E^gI`_~n1TS-;=b(? zvY6n-@`tTY3hHP|!J95Qf=1n$qFC)~Lm`dUqIZVa zVSva^5DvN&W0tq|4L|Y6QJ*0FG?=kX>*9{F0HY60Q1Ciw%P@^%0PJz`Ouz?qD&d(o z7L)d&uNb3hr62-9<>z4P?Ny_6!UNurBMFpVb^%(U6l8sieIzQuc&S~W z-)*Ei?RN}$tgca={_JerUvlk_#!3 z$1D~9)nET5F$b^`(r{!T?InDzM6fk~&0>uYtV&J;wm4bin!|G;Umznv-U(SCfSAE( zz#cQuiyAE*+35D*gAei(fPoNN>$8Hl^1D~hLfn3n)Dg40o7NNfrI^c8^4@*>o_PE_ zzxvfn*b@lTF*`FCMTNE+l_lEm!X@}@%esP)l7GZc98Y5G#o}EK0fdSsoU!Xi5p)zM zVl3d8^=UN?JaYIj5%z!jAAXiA6|1QnmVgU0QA-SGNL`eG!Xm}8R6tQ?1YAf*lo^hs zK62!UlwdR*2*Z-&n|#Q=x` zjeNo_NEJ;)=S@@gEew1*>+5%ocUk$0K<-2dM~<;%qU6)TXfQ(n+=7u2k9 zq_nit2jbgeXYB2e*CC4NdP+e{I}2NfCG;Yhn38V%>!j3qS?2xhq=c2GvA9C@0*zXn zzFcY1OXb&E7*yRg-gkC#xB_lSk=#$~K>ZN^&J_b*%jCD?0#ML6(1{+vcV z7vYub#e@#PY}ZIP-?M4|laIgk#%t^@jdGZ(Wh!dJ6H9~Co7E*8LE<|xUZKPf&_8iB z#=IyNM(Pgu^RW%fd1Y~s#TW-O*LMTo!BN%8a)o@kI``^pzrQkd<+91)y3e9rs{fw2F|L50xBScHYbV!<_k z<`vJWpQL_WJbzhBz^mPyA_)5sO_~OIe%^H@;Bg_J=NyvPe*e2G7cTtp#TUg3UdqXe zQJ#|riH__E4=}lsJ2q>I7nU(+hC?9Wr)n%7&=GW1sufU#Yrmx*T|f~vyx&F) z49&5^dY8Al%p^7Y%J(MJ#)xN2{T0g09yv^X=@zo<4xtMEV>Ei*-0L`!8P%D{~ zKq$DlGY@wic7pR;kp_mx&dymhwh$cx?aw|vCD;nC+*v7;euMej+;?S+6D9a(r#t2))G z63KgD;6BtT+cCDtvvvVI=)okb@Zv|D%|3`ncG7LbNNIA&rV z1|!F)2q6H{Y#&Gc!>@Mx_U+8Y_=GkEUt#FPg4Ry!W#PGHRn+MK#$xeo#jc1wkuvc@ z#g55%j4e*At-jBvd$(?W=9yUpXe1qkLG%qOEIGPM&C5`ir9{c>2S64<0&_>t5ne`+I^2lxU5@;o~7Q|YMq!4dn|v13X;aH!UH{WQF*}jVjU7MFNsHl88AR-#On9YVZ<46*w*%G$LM9U5A1g zsSs!45uMf8%i%Ptrg|hCpAnD5Lt-IRgrmcOtt^Qy*}{$!AXq^ilRY;7q88}T(sAzY z6GA2iIq1kt4?}GzsCL*{Y1kqU{>&Jq2aLYXtI%cUXjJly&b`Bjsl=%3*Gsnnf zG733xZX=mnb+Ss6?sD!gr>#ap1xjSiC9DzKW0%>|X6m#P_D&)Lr> zW|%rxrxK;<@@rER8Uh%l0p#c_UkJhc2a@p>-Zv-;I2Zw613D)CV$4bN1PPN5oIWfB z5Htx9Yh{P-LdrTvC&x}OCFRi18vM}#^G+7gWvfutKul!iD1}D!W2%Gtb5aF#7v_O& zC^ZmP2`D=xa9V=$!Y5PLvYM=PRN7gZQi8^v&=Z*~WqV6XOCVb;Z;+zwd{hw%JesB* zuw}Jw@QJ|{Fk8uwk19ObL#fhEDJlXpNG<#hXsNAK)7YQOLV_7cBnLbU6CxvA$8b1| z%qzq4Ml!&G{6KIp=pa7>h6OVw$y*s?WG@H|9LvDrl9&k+PJ;A_t-{Y<*^E&msL)9u z84;3n12>y?7_hv&5nMQWPzl2j{vRmRb>Vh~JkokN=rnsJMH{uAK0?=_r`}v%Er|r| zQ4iD zH~ca5gI)q03f>sjgL;r77Yra7(E%7N&>2L^nNk_2z;*MPktKHgJi|xI88Q^p`is)b zC_o~`gy={rbO!!K5M}}bngaxKL-EZqFpX-F%el{EY%3fPS;TgvQ&8BzD5!v=0nW|M z!9x600G89B#nRGe=uLU>Sx5fI(j6E+M2moerrR)@LrA)*yPh{ON&!=J)0=W&95M;n zNCWn{3w%c93`E(WE1?E3yY}eha?uS7bWaJT2uY#-G((r+A;zha^ggd9JrSPqijF(N zphceE!etu2XfD59$$P{&qH6+CnC zl5vN{QC%Y9r`Fh>xZ~Q8S0Dha`rv{~et?Gtngir(y`o2m3PHcC2q9cFF^G(cAeb~r zB`sq=d*bVP>=Mu&RUG{xgb=)rrK3jhTR{jw#7(0)7y=u8gWdGPEp%Z7BfJ3vL0jY} zrNSN516)z+jX-xk@Po*n_(KNrYk8OAiY}m|HXhmM&!1Owt>TtDC3c0(#URlFV4EAk z002M$NklvHmYE~ORL?m9Z5s;d9xYEjCExp+Dy)9erIdtfgN@dRTN)$L4Gcj0GYJzR9 zBS${lw{H*K5CEaCz-R~zxCUgUK(jg5|B>PBh6{Op!*3B**VFi}$jn3x7NTk3i7koi zVvH941xbzC`gP;a|Kazc<14Seyko~UNI9`)B97IkwR-$cb!1;&I2kSf07|^{Kh?Rpw?4T3I7t!HsA~ zEYFUPuOGW-2jiF92&f1!U})+Q<+XhE>(j^UX^g-U(I$IeGB)NAk*81^8{N2tDWdB! z)8MZ+7vq@@GZ-0J_d`ljBXYAhrFT#Y&ItH2caU%yNyK6zGd<>AguG`8(U>}aW_EfC zB4YXAC$wO=_x#-0#Kwtr8=MF`H9ANfFi$6z6T=?H`m{WC?s#0AuEj!kN$!On^Za^N z94oBbvfVGPby*Qeg+OVRKwufn$}^Rz%a^Av)!dXLS)E$UB(Lt|{Ll-F(Z;Pi91jx@ zR$`frG!@k*FIu(LK6Kh-B^h2h*!r>ek!@fUq-rXqYOS%i^$H^RpDA=wu1?Kdxpa{} zh;2A=lZY^4jYN7-Vco`U4>-Yy%T_j{6M$IQSJq#GM)l(9$xD~&4bD0!XZ|#nu6m4e z5J2}0>$kZ1HB1ySfI$J286me0=vVurO{qf9{_NF5d$Bko(fw`7y(OY8wyzKEFvE?^nR{Cu zd`^MXqJ2%L$GxgTmjRidF%Y;JvE*Rk)Tzy!J$)LDn;3A62p+B#D6Ij?aIkLe`ohFU zHyZa@2$N1C^4hB-?rMx6QaL)*ms!(rx~r*j7DpaKkO0o0?c2Lm^oo~ zvm#QRy>RXrEZ&GYa0LGwtW?Oh$P1#e(&*^MtxkZaxsD1g=_FE1pIdJ_Mh7_5;hVdT zJ8*u-xmT3XQcwdEQz zoc6*6z9z=U#x~yzR(%eGX?QtPOeBb`IPuKIb0-@F{7Y(#J5Y)&xLiS(jcX^Ob(@&o z1YyZ_Wa}$4sL)KAo$~DE^A~5XVc|5>9wx`4k<@cxer$AI>08?zzZ7I46K7P8UR|IL zF@mL!A%K-;faNFk_Nx5F%h7R1$p<5$#)!?+m}Q%c$3G&i zML;3klDTu|7*!KCG5bD#Wl3DmhYZ@jTNv52b%$R~VODcYdXPQ^9cwXccH*EOUeSU%AlAa`bxb!C>E|Z0%eDTzk3o`VG#6}zOqL}T| zSvIkDWaEYjr!a<*8flMCC^H54Mx?rtCFEJorC74`LVjZCw*_>8Lz}nr2k@zVu=6a# zl)ap6`tdlg_4Y}{wv4dLn@86G+nuc_ecO_}45jt|GIo#DH z{5I0j4#7OoF1830>X+I5(W9RcgMtNsJ$oN4jg0b@0TD`*MhqQUmW!fV64z>phjGIg z;b~4379b)QGjA?(CAHdx^H&caK1!BH9(i>0mQC2BQH~7ntOSwGyl@v(gLc473>M79 zup$8Tpy*){1IK_2=fmq8hyg}5l|*!Ax-`QZQj%H9a#5lFhLTIhCcQ+iaiOz``ye_T zvoHcFUAyqk8^0efvj0UwB6%)KI;tUZnKjGo^2x)~<@5(X{jd1^$fBA|6PROX&fMt_ zUw;i{v{;O0;!2b&u-q8>td+${=1!jb;>znU|KY#;OD8wZ1{X#+(HoswI(g#2d#@IA zE-9TX&%#?6inmlOwT^|?|zQ9 zUyO1%kjP{~TuA0Ve&gTj>0FMbY7~31+{ij<2p71))yWHoK6>T3AO9z>xXHr{ug)Bv z?=`1(<iKXdXEIKNQN^~PZAF>@T; zPI$T8c{UWJYv7WAb&TbADWPT0x?V-!p75+h8=!avqlO@53G#dV-3CnLv=bDw_v)-%t&7;V_fjGcvUv_+H(XYSHhpMG%R+=(^g zBaKExcWz|Rq#gyOzz@%VarV$Vul(V2f8uS}7C5|NWuXqMI6?i>-~DSftzhfP`*KEZ zjN7=lo#XWrT)g`AXCJ)&gXjO)Teq2*Tmf!+z$Db_O@G7z7VSh4s6C=aij>AYAqoXHP_(&=Qi!tmv9^Qzx`sA5+ z-~N56RK%{q-V=OX#H4_gh(XE8&VD)5aPr^#$zL-T1mX~k;<@Cch|Mi8R{qGD@k$BRH8w?iV#+9=lz4uyFWY}S?1j7p3 znG4hx&mg42FF&8t~W#xjD zOvtI7ndh=H+Qf3uH%MDkg(}_4^dpUlA!+JTH{`&oQUXQ}8kjvvEY+RfYCP!h& zUdsB^*-G^z@2MaEM@SdeLvdGRuM+FTGp9d%^OZ24u)PI-WBV^AFp-6rp&IU`6LVK* ztIz+3|B4&3Sywii3i`(6srTM`yI3f2j11?xM1_3J;waf%)Rf@t(Swo=^y(9H4--4^a} zF;Tkin6-`Tc{6(YOOMolTG@wm(Lyg@ORIA@B4+#JDhsS_X=jAC~?Y@ z$3Oe<`C6A&+LHzw zQf5rPe*NoTGoPjQ0hkwQ2y}#k+63rG1CikkhN0VZS=>@T&tRLu<;f@a&&`#;JbwJu zSAU<&7q@J=_nr-#HjHBxX%H*|`GPI2ge|+n}DwxDf%#ns)eevbl z11HBfZhP#p$2M))$f(K{1k(I2m5v2I)Y=^w;V4y-fBZim*u8Vp+I8rn6A>N+E;^}L zs_JsKdzFw+u6y8>Up@Z9j~#zZ%*$9a<7@x?i@$y3;T>zoidow5e8OBrctAultHd~- zY}vH#qeGv6_WOU^_sok<&~(9AY1><(oN}11V`RjWu^;oUo5Bf78Ri*HSwipOfGu@F zC`QVd*d58s|Kso1uN&RD-)OvWS(fdGlUMh;P3Q;5Rf{{R2@;*bBzDQ$u5!aC)dSN`YU zZk#A=+p6CYdq4aO_B`^r#c>8bI$3COqj}Zo z4NQ`hIWs%g08)Q;nt5gEs0%}XublynrUzOjOh^2|YyVc6zI6YNUHM#?HEQgP<6H`i zYK^!Gp5uDDZcX7|{`qhJ>}UVQ8NW{qC>Rdv>G|V_PJVvivB&nYX^10>Nr^vQT1z22 zi!MwXH%#PT`sLsK>0kV3XT$D{C}~)Fedow)zn+;qyK~Pj1OP6?;3i>58;eHR&hSlY zxZ|b#Z+`jTfBe(GcE&f0ulfq8wh-}XRG1*?5iDpc5qnEY%NzP&f3iNbef#!>@T}4| z7;wR%qOrG4P>g9r3yZxTRKX@Y7NOX2hZz=QW25%}{lowLohKeFmGCXXOe~gSCUjK9 zDIsQ!bL8mfXFh%HUmkqwk4;1)x<9$f|NM{t$3y$Jjjt&p1J~B zO6rYjdgyn*dFTf}aWF57kWBDd{^c+J;X98$FuJY~CuNq<7+Y(&z=aWJ6#nknxb5J< z&p&?cUmpFzi;i2w1zBS&X{)Lm?x4BER$ z;w3)Oe!)Fj0hVG&CxT0Sv1>E`^ozfH^0B?6BYB*HM+i^Ec*N=8Z0Z%^<&|fGQ>QN- zee2b4Km9{lV#fP8o_74&|NOiE_Q;<5$2UsgAPIy-$*3LFVxkoJ8zzo@dFGSf|LY^q z{m6WRA!<$%|LXt!`@P@Zvv$om_zc$Kb;ZOn5)6wpjMj~flIz&pzuW!H^TH>xnB5yQ zv62ko&pMOEB$LAds1Q_V!`h1Z&m5<-1hG2)jPRuoCTCB)|MtuKzx#NY3*y>rv}Ro$ zA2z(WLz$We6NSwi)_w57+vCMxbk7UoB#PS+YQp97#||IZeg6*ZxmfMlHY+n{*h8$u zvQ2;?qdRrYYd|&*VYyCA^-j z8;{=n?LYm=pZ-U0?M~?#^qVlaaQNM?4!{4zqk9NZio;?dUyNDbaZB}hE)4y#%-yiI z@an(*;)NgnMf9zQOou`(tbcvt$eAw=KKAefbZ~S`VMy^@CfY2=JB9d=qi_7?pZ~{? z{s3-3k-z+}oK5%FYh^zqehA!P$S7LoiZ1qK&+ZO#85+3feMjf!uiq&pi6* zqir9DkITRSY_G^fSE1{HOzcww5T05olP~m4z{LUf$tRyQePf(IE)KHer#7L7wJa=3`yh10Sj<8R8}EradOSbt#&f}zZy~Umq$O;fGL~gXijOeE0-L3n ze2dM6Q}RlsKYR9%GR!G#29{-4#4|@y;rz>RYy|!?Aia4~C7IHZWeN0B=m`8bPd$8l zXJcT+gX*2#ydiW;VnAh(I>FJamrTD{0wz=ZiJgz=IuaBI6OD1sO27Qzt%r8qw`PQG zoh<$r>PbG0WhWpz|Bz!CgxS=)XYa0Bd8#~lMj}Es*d@o{jQF3Ans$Q!ET~S%>nP~*{jQpw7 zC&t!}t>3h^RK zg3?kfi|h=lOpld_20FKXqPS)(cmDXN1hB+a-B@ajp#jx~y*3ns+SA^U&YqQOIL9&0 znFV$Qs_&$Pc^E|LfO-O9*F@A-bC;P%69K4+{H1f3&dpxCyl>AAKV$o6#mj1qitOY~ zYE?W2&@D()$h!~hxaX^5pPIbVksGtuj(qyzLwoPXmK=DEkOMiAxmpE(WSNGG66wZ; zqWAE_JC7dugwF1wE@Qx%nYwu4)C0S>h@H8Sf=PNGE8ZB0`J=HMd|NE!c5d7D#b<}b zs7yc^iB!=ujT5DW?!f97b%D&4N;W~}%`asb+xLk!gLcbHE`4b~-8}!D_f|2caM23- z078{EjEp^{4sp`4Cgd>d!T!hJKd@)l10#hZyC#|GFuu{tU_~aA5=H{I z)iirx*RI;!bUJxO3=K%FMtb(sPjT$R`b1zH*=s3!x0uhwQzH!tiW4}wdp3TnkydM0 z&art#f~huUzxw>6-8=8Y9?e3g??g$B7)f}YlwM%TKmRJfb zvk5NDn5jLt3p^|Kz$fDl)}jH-jTJ~`Y)irJ>QzZ)gjIXf*-DA%!B$xR^5~&GySJ4H z5S7J|-yqkFz8(ijN(Bcl*x!M<&M6G&rlLR&SKqP>gJ( zTF=L@88_;C_U*koeXV-wqFM56hj~TMicF=Tb`6TN4=XBqsmw4oQLU+3YwWSSg2(Ib z#|J)q_`z@Iav^?d*emFKa0@$8jK1kLq`$aNKKjs?$3Bw{WG0LO)PL{oH@4k-Pd@ZG zmkD~g4L|bpA%Q8F(?TI4E4tDC{SO}e=NL0qhX~&9(Fo(=m3>t^k}sj zYKg|O+>4f4*~K&2@tEny!L9QE6BL{3AQni3S5<+Zmu&d6Lkws~K$4g=tb2X;XZu)Psj3)XX>xbJkOq(>8710(cazxWzw|ER@_^R8tzuQw;Qk%V0+! z)ltO?(XwQA71t6<(ai`P2OV)Vv0`D#>^)C_k^{Zk=?H{in|OeP`r+c|nl)ojJofmX z{^Tcr^2dL)d*`l^Qi&7run)`5X)`FuF%Qhp%>{(azbFX{| zf`J8b-rEaKa_T#F>^O1agaBZ~U2kg`mNozkbTh0D0IXpZ{l)-tgetV(B8?yPs=7ZOAfn-%dy#hW-^%#}v+7cVkh$D2Sl5uQ7H64L_; zCYDP-%CRktc_4RY70mR)97boEb>wrnU@%?p%>^cr#)?n$b;?H~4^k{LUB`{3=L0I! zZqfVQ#<_E+M%Hk+TZ9II(jewOj!npzLh~7-;F2R_81AT(m%kQki_gAM=j(H4)~y}G zyvV4}K2sEnLR3&h8RRhhkTgo}*wC2Rp)`BFY^ zRM``iuqDcKO1WSp?_Zm`3=@mji{18eBOi7_T>xXyR3l>-*}Ka^rD|bZQ6F5S?yNCA zIkk4p7>-&w^aytF#xX&O2{BA#*>uJ#i+EhQjZGUS&Yb>AP63jD!S$)h%Ud>Yq8o{V znco=^8A|kgEBgV^P63(-nfSUko z-DH*Z0sM(JY+QHl{8=G7v0pGKFjwr+1wPS+v6`u%(^3t~^{9fjVklN?VXhAOQ0Ce` zu0GwU9ZNi8KSH*&w`hgWs={g^@H`tJV4)S3c|E)AgqEQ|xYu>De{I;b5zb8*iR2xT zR3d_K6H6WU7#UHVs8)?fM@knio{@1!CQ9|om%m=Wp1pV&vWhHeA$)ShLYk$FH7sf1 z1W9=Hd~u{$%AGrZQX~(t^n=FfGheP*KSAs8wex%ex)g}g8G5e24BXHTEluy&m74y-qc(+iP{uok0LWKJ$3kVvkEk&y{hily+{^kt!-mm#d|AxInOB*bDim^zGt?Df#2_AGJ*!q@ z?3QSl^hUPN5KURUdohWWqks3iO49>Qm5?X>eXpo)MBU4jXoH9q@u~eL1 zzI<`p)-8pcMA$?vNm-x?$i*4C##+?UH}a(dF<}6dX@QaUbVt!)x^B2;R0I;f1I=ND zMRk%LENe3SIvEFh$S0lDemAAjzGnEA{cRHAeK5$hJ&vkRojS$&hp5wo8G>(0KHyO# z@W2IHn1^B=CNVt}*mwXu-jj?s;}f!P26s}rHr_V+o-)vtL`yfi1cpVr7E@YQXvv~! z$gbPYb&Jb9A98tpMCx@gD>vw#GlU+dy0 z7o_Z|g@GJX~81nK)!D!J@;vl7La5DyChI%v~Wd`mb zoUbbc+h&JPcvjv}ajF&B1jC0_gn^X~tE;G~Lr@oDz{(MZ)$Vq?!$3{Z(v(P4g7EU1 zn?{j4%)!`Uh|X6hoS7_KF+X6XEJppuJqKq-=7Jnlmo;)Dc~0z{nY-$6%rO@yW>l{t z%ZQAG%7`TyJ7c9>L-ZkWkaXz%Od4?|W%t`$dD?jQ;Gj@W(lP^JBB3mDb2Zr;pEyaS zK38kR#N?l2o)K`Mke?q3X3Lir;$-D-5RuH-nR@k5>@EV85|Qelpw`&SdS!z)hJMIZ zBVb^)%HgwOSY%n+5AuFkV3L)j9;Scjz~5-8Aaf*%J0LE{=no9H*l0XIj0ti}s9HIp zReZzEtdALCP8`GorOmUd4=%@~h$mn! z^s8l@gGBL>DUFyI^iQxQYL5I|_67F1u#VvpzU<81W}rr@(>@46xp>n>VKlj|)hgbb zI$yP1jaX-H__-iIH_I}~DD>idVLbQfRCS^Q`^AbF>O-dPc;sbF6g{UJ(_`XVLeS`_ z4q1b|M2N=ypHrPo80iGxF1oxQ!6gj>oH5x$b4g;?Bhm{tJKVVHW!#-mL|AhWE1n#n zjJ=?Wce#`JdD(f-giy{Y@9A*FT9HonKqmT17sy;P3ni0A*u@Zw$sWG9eX46oV0x9H z9e$n>Wj%MTQgi_@CF8ZxE66E_Shm{p5m}fEQ~r=m#;{1h+BnOQLh^?J0%H?Qx=F23mL1t)#1J-DDd$I`a&5*~GhqWt zO)x%*c`@{BxI5y7DwC@quDW3`!U0Ya%iRseN@H^~c=20?L`F&1Y5gq^8Onj02eGW< z&hm=rDm4_;s-Ym$a!b(V76uj&PfbbJ6yRD0p4~(z0xL41EtN)Dc*PH~lFl`lNQ#{i ztc7)#Jw_f*zQvLJba_e+yk$yDTpOlOqJprTZ1fF+<3f5oAZ6o;EI3!P)X%d_mR*~f z6yq{*$hgDkCc7j|I6q8_NI%YX5%V_-&QR#;%rt{LVTosEuQ3P0Le<-gs@YAVdmwsS z@*<_-JcW^wsuxMq{UxS zAl)>nCUdb<%>v@NaH5~X2vTF5$QQk+#BQ>f4jh)~jtnIXPsU2fOFB~KaaL*=vAB#1 z!NBB(c|sS622*dKL=wP&<9b*h#qeP~mvLR|<;sfM`rA8Z_t?8d&UuBSaam_@a?JaB zi!zpY-KKot-BHE27qvc1y+UpT3qLzHRPkfI#fg!6##pFy5aBDfB`#KSq;DrVeAhE=)L%`V4~8YWm%P&U15f)pCUgtjTO^#AiXWP$9=%NHit!?ZxMWSp**tg4Bh%nN?I#cSVF@pB4ut z0k9XBm5CJ2HAo{P)zJbbd#FctFz7{_qzr+0=mWEcn+#wk*S8@k1xOa25V<>j#3X6TkN({lhL$#OGL`2q<0gjv4<_9SOl`F{umh%mF;L;VMX}F6T24172BWB5=G6NQ7Xb8{zTE1;G39; zSj2+$jck(IWj|ikEij5;XG$1NfKXU6fU$+peh5qB^oUsr9bNo5^lT73;>8fc(1WZI z^B1Ku!l1sRgTu{qIS#m!4HHN?VMBc4QVvxUTb3-OHLqrWrK_~wbows@Qhbtu2L^H0 z3p>rcvUEx{Yv_mnK}(lKHjPr(#Wf*#WbshMja+a`7~~xeuZ?7vE6M}Z#NP%y@({2G zKCMC`u@$l<#YwB>8DuGTz?QyYqz}-sb_|flCy_A-Q*$n2>_Cgq?ra1Qdt{7H6ZOLU z%!1Ha5_6Cp2mFBVl%bHC#aP{Kdl#Td7iQ?91A`@2 zh=~CrPUW}=(7$mPXFQP+#$3EJmyAR7ZX@}rj&HmnC0Te0dd(-%L7*e5W}73bFkzD) zwan$*DHg_KN{nAWXatsCBC`cavt8yVRiLOa59dL@+lN;2W&C?X!=*s!Q$`nHAjqM$ zA>`1(*yB~LvDIsYo`Uzi#AIRRKOiC%Y8#yb5~8Qe*^j(5775t_VDOJJA5`K>Q;(Bf z7ZEY6utSN&CZYxAWjKMeM+yb$pQ6jt=_mw}iS9?4h7CA%K)ODnp7kJJrx>$Vd{HuG zP_;EOT4dUX6<5+MKxP=Uv*7v+xn0yf%yN4%fZ#w@h@3FuUMz7&N$LbDax2|66(DI|X`cQisA5>VV_7!o5E9*8J?1S#Vk zdLt50Kx8r?8X_y7%+Q#@o1)QAQ7ds#NI2NeOaw@Q9|PG5Rx92HjC@QaSS@2ejjXOQ zJI9SotWq&sA314zSyx-DZ<5bqj|V~)FjZJQ{Y{Ek{wu1`!3-m#`sGVyjs3kn!!wuUQ-hi^_Lb1^2OOjKna$hakwLGEw?qaCw) z9IwQO6blYsLYZ+t#aR`;VS)}Vu;+*e7>puze}|9=GM$v;t*hdI$5bxCH;@g*7?(YK zlPE&QGCD$o%znBkryv7{S#2NOhsp5|s4S+MCs92TkN%y?Yy37_gg7ktGf zS{b%`>1#S^l&FQo`^EzpCmy;c7BBQlvHS{|q<3R(W10>T)hU^M1ZKs?0Ln|Od2T&s zMS#gZyu@x`NJ#EpUDU%=K^)sDRt(m1b&gm~zxbUhtRAV)FigAAlb1cOba9Y+@)fW_ zl#UK?BPU`j8ruukrnSH6{8{rt6ck4sM!;d`bXl@q%!CXi991R4$7M4@1v@SRE0c{Y zB7qpKkQ8t%{ASD_b#_JLfeQDEBfc1@5Lz*=Y4A3jNonx3@qjc^2DchI3G*ceCfbb2 zgwXidd@};5bKnyyH)5$SCd)=G4A{s*np|RN_Jis%9zKe`9e>ig?+}I~XNk0429YWaFlQPavEqx<4iPZun0*xgu zu*P5mp^P(k#2uekUdrAE)O~4%GQkabMLmuS5kfr&dUOBSAI867Pf}#J9y; zm7w0T6{+r#kWg0W8>VBD3)gZw4#a?nvQfY_BNR4CqemoSXQX$h7&~|&S|STUvh#?u zFQ^fvQS6;`K+KwyjR?SS2}uzIlxl`iX_)-A;Adc3rh;gC0oidVW;Ex43Yjps3hcqZ z$`bP!Lye5Kbb6vnZ~JEPhjxOBU_=0d9vP6$c+6*mAD#I#p0o5-iyL87B#J3iKs|$b z0EuJ_>#T#B9+k2L46%TO_k3kN+M~~+4kiQ{myuJv@ns{DgbWjfLH^h^C}Cm%1EDGX z4&MMkl9O*6OazeCkQH-?C?15ftJD2?7RLh)s5XTx-qt0)Dq>N?Sn-u+8_2b3`e}*$ z%lWK~p;f>wbo}My9;Uq|2H-8*0dJY8cORf2Mpl-=U>k-)7~aa#-c{0RNwl4d)M#!h z1mIg9cw-ru1p3UKv8_Rfb~I#m%rMBJBQd=a!=ZF1X^ZIm85=k0wh|S_^azDU3u%AT zDU^$*Q}8I3i@aP_iH z5Ev*@l8XLmGP2qx>w&~9z`ZOC^Af?9nTD7SWok;j#`0`3nWpKj*{-%X`bat>UM2mK z7itP?%bllRvcOv4G>E43 z@3jh+LeU%mK(jAWFQ3{k(nCbXEFqJ~{Pv!2(4tE_9CRyNg2cE;LZS>X7!lxx>@*vx z1;IpcL7{qsh&qH2kMOv%U^5;q4O6-NRrlVB`YuB=)j^jHdoYj9rX*+?}~ zG@%l}6gY}xMO2JCSeXTKoWErk3k6B?77}<~$;ARQ6kJIO%gu|_vU?zuMoYHECRq5O zwe#L|Q$`T!6x!*a6=Y($GBlpRU=_N7T?~c%BO?X>Fm9m!;e4O5btogYh@{8M&FG5K zaaiUNb`gC;%z|cN1|y|0>2lgF4>CfTK1*?QG}5cq0g;G%Agd0jjv*P7fU-I+E1OAM#}5dL}w5_H)2ZJ z+vs7iyzEuJ{s*!|qn3k-)sq8)3Pqp-y#^s@)H-s}8M6is9Bc^_(zN;%L^rz%jZs*L zWn`e7cOhKEWqPhiU?>i`fr6qbN>-Fv3=jsAT!tl-ik-B0=`?gh*1&8!@M)1_U#{>$ z8ABZ00PWd{-X z0IY__LL{Dy96ZOqG>@=k4Vmn*Th#pVlZl_W6re*A7l{eB(IG_}#D$0B(b%wy--mdT zh?R*yUTWy!5OX5Sp0cclqNt_;{1up8FtrE?x5v^Mwqn#r>LpYz>Lm;>o=TV$O$IYP zFu>9xy==Q$FJv5D?u94gz&8H^8`E_pGJ^pyFAl0UO4HZ|YAz+Ny~80~1R@Y}NK`l? zhlu9S7EMBa66_C0V*Z#BKx32*aqnfyM+{qLUq)MEI?beu)5^fVf&~K^gudie#kGsf$5UAq{i+lQOh)c&=Ntk+rEr~;q|PhHT@zEF0FW^AJ; zTqN;4-JtH%w;+vl2aqa`me{bw^}l%;G&YC~o)MfFL$GW_GiVl;kg3^qX3Gf7YBu$Y z?>UKNN4A`0Zu-0Sbo}sm(2VqSOf!LWRUxN1BncDRnp+UiM!i}umhmqI;unCepF9+@ zjGP@OomVWTfp!XNx9YgCJ0hWN5y0y+9RXv#lS(NH+)m21hGO0 ztHbBWrRJCgYa?d}djNM!0xX7Hb&Syxfrqovh+fPBB@0-tPf*mRRYe}ua6-Ydz1oy2 zL#M3B@GqT^Zb}pECB=4dfd^74epuXE==$A38C@oUf^;#>EH>ZeT+op*OAPLyK=BO8 zjP%C#o=UV?!Mz-N$u<_T$FtbT8YG6`riP&AMH0!HBBmsU4+%g8nIw=AW@N0oO?^X% zC@Y;pHKT8!Adrz&Pay*ef)Ql|zDJReagAL}GPE(+2^Yx$g|ajd5;sYXK-6(o1efp< zn77w*5ODLfxTFoz?*_i)1Ji$VY0M%b>B%x~cniM*-d3uI&z-XUMJ5yA=zfAikN#|`h6`~mi?Goxa%b9{DEIHf7J_0G@s zZm1>LPm2^tMy`1Y3_~M@(8M`C!r&0WrMiJ@!UChu$wSJK1cZ}@u0)Qald@tW_!RA$ zvx_6A5wQ)4aA8zI#0b#>5qjcC4nbx61(MTf?GTMz7^dWOP}y3Mg@Iq>oJo2#i!qpn zvGR+JxTV!WFo;Colai?w#*j#4yx_GguG?!~)0Xv9{X8^_3fN_3?=Z0y-Sk0>j2-40 zCR+ppP@k_#K7C#!aj!m65mPWGee^r7fz`Wm_!%KN)V+`|%6412nK?cVcP?Vc&?3rb z*fZh!2^h~jf|0ks7uW*^?ph6t&{VIk9{sJ@lz!*a(X7@fCS)##%tH;~VnmH{gvdiHbuj%8 zjE+5O#ITqq28tc&!~oAl+=924M=LIF!66iX%#PMqf*$r{BB()Q}YIFT#|QzR^6V=e&qqN2=H9( z3HtQRsGwRV^#Z5fJE@D5pieK;#2aoDE0>u^o2DDAiPNXfQUsZ;3mQR3`D2!V?XNl9 zKvOw;2HzoOfX1X^GAK=zlq?0Xe9e(1kVA+r4AdY#2^MFW4hY?0i{@BJ?l6aWQCzyP zN)WUf`xPb2Hb;}%7(|s;@}}!c(TD{E#M0ib%jrC$n&Dtwo2AtZ`(pls0;z8SQm@ue z(-#1|2|H+{_uvJG`*jt(nf0BFgJMjORxzxAYUyP{6~VcoCi6du3JGe(KNFjqap~ck z;M|Z~N|j6AHFHq_^G8horhl2wP2s@2Pf01Wd5=KhYO zChHrX5d)MVwJ@+JJM54w!c;9S7{g(U!{K(0&+=u>3--WV$)7~374gF0L8XSfwJ*Tp z@&;cF?hHG`E#d+UhfwTXf+Y!(Q2X$R-7Kby^_@iJ(L&oDR_ zq#_AJ&qS^wpXT(K;Ia2a>VVlL#HFhR2fPh#da zu|NVyFRao^YiQk$BgB*nG}4&!c&W3gT2OC0H@4?m)-&aR6DY|1oLZKZx9h}IYE4vv zKy%rLg~b9PFWVC|>1dMbv!=0m5%8GtWPq2YNN&HB8+tskBLi@=14V3jPaCFtdjKwu$ zGdAxi0o2GJ1i&a_@nEw(v9Fb{8af(* zX;2}vFtPS>uy~1{D=SXy(?;>2!$f7e=8YV;}XZ zb_#Bp8MW`v=vT$Va{GV*qe%6k++mS-I0jTE0wh`#(-O>@JlmHGfmcn71e(U^dFCb) zDr`(92TDAb@yIf7#m<+-VqYM}ADSjBlcE$dapvH0{zU)7H%lB1Vw6H0i%|zrK#5^T zfw;%!*fqgCKvhK9V`$t8L~k@oX-w3KJO^3G{$DDQz-LaQOhk8?Pi8J>;z>lvh!U%r zkDH{t$3@j_`bADswM-PXQS|7+%O9Vut>JW6;%W~|pMia;1Lqbug_AX`)Gg3iXT<|dTN*HKKCB9`cPk=qZ zB6l-wGsNUD!d$S{7+>Y2hY@Cni_&9^(SVqiN6dF0)Qu;T& zRp7=E9s9Cew518P`A4zQ{-(ocwVEk@x-MkNy0m2OsA4)@Uoy)boBV9z?yYA)2a5RmKMDG-eKX z+psrABw>844KdM&35pRwlXpdV=>xDq|3?lXa&?R}o1l>b!InF{Au1_TKzQvg5k<%ssQJx~sZ+WjC8- zH%W=4^e9Rcwc8R!NnT`l0X!bS#sCi(!1n*~@P3$w`Q-tF!Td5XV9aHRU{4h_)GRjsO5a07*naRF3B>);Uwwa}~Nc&$HzKv{3dem69RI zB=L5L+f;<2Qa^;E4lph^uGBqWxzd-|$pTiYm<@UdK}strAIJ@**v!4k7Z55{j-{4q zA>AT{Vtb>?f%pSPkVMTY6RN&WGBBz^xweEi>b!uY7mlY4?))Ca(3A59rSk1eWF6J~teg_@5*5k8 z9{Vw$5a11FI6$htihY3e!RjDdLxV;K&hr-1*@)k(G}bB*tJdfB7g1J1!;mjq5s@_M zktErX`oRzkGY}?r5LX+0TN2t5E?Wa0@HmD1X@deJ^@6rJP`U-nLSo3KXZFt$mDf3S zSp#N@p8ubq8dKtIIb3u)u1Bv7l@KWP!tz6l(dbOhaxcn!OXhrwOv$2$vn6aGSc^8Q zNAwcDkqepaag+z;5H@H4*Xom5*y8L#W`_rYK|bx)=Lg+Ujy*nxt2H|$P1F}1^^FSN zH6xB-x>3%rz$USI$G&rX8o;BUnwKi&hn2vu+3sML38Jo3hjkMW=v!I{P+3a{Oo8*%_3H)Fk}3K?51PNb}Kd&8s0PA=`fjJKmS8p=*ml^QyTT^v?T zJdYHFAjqXt9Wy3wLC5zIeV3YgR~We9;qzU!a;ebo90m~Jtc%ejWay%#INck!4#y8ON)Auey7qEvI;AsK`mMOKd(dZG`BB=ky zKwQaBF9>9dz+_z~L@+m|ZXCcPzBxt|r@x4+pM%F3T#US4$QQBs3Joq|QI>rYQ75wk zpu@`7EJQOk0+`1+f`5S(0r3l=rZ3&VIJhn~CGZ46UsLcy4P)3~(3z>UAYx02j`P$* zPiMq_;v3z&#Fn)6)3aXS!&v_W8)waUaUSA^!^)(t8FHMn_A+=Xi!HDzOUo0Yvjdpf zfpQSf=`I}ZVTz~`nFyky9viFlTA~b}Fz@CRV@hW-C8!FzaLXeYCIQ2G&ZrL{OdFvf zbPDR*BdzHkO#x+{uL-3W3`BIMyRs)7GB?I;xsQ=@&TG^FN?E~<;&kMkr7ko>)Ermf zrwI%)Dk%e8j$df~?|=RoXz3A{#7$h@&@n@u3k5P0f^p}ZUUitNTtDCug3}^iehOeZ zx`8CdBnW{C?IY6cFwy{EXT-DwERG!w$=K~H-saGAZ7nROm$0|J#<@TUOQeo^*-#{h!)XWLBR~hR-guWWXmDtL>GjXmadnCN;!yBpp?D9a_@mm2r67^E*pbR zl$o}s7g6M7mj=ZB$ZHcprj$dPil2kD9A527XHKW7$ljn0maM9io~NVIgd0MzoPZA} zm%j2D zB^U%M%aZhvr4K6DFW|g|!r{?+<&$eVOC&&rf+)AhCdKkzhP>DYqRzjaGT#DozD=A9 z!a+OT*Cq{)(_xLoJ1iK?`H>3dw7?reB`-0pvri3y;)}$1c(uD1&puQ{W_bxreu_3{ z!LTQQTe!vQ4<9e?7&(q*RjQ2a4N@R0AQemnqsZM)Nm&o#fm)YMj@TP|KZ9-kvJ?q| z*P+ZXdT`+(%V5AzzFv4`o0rISqD5(L^GuggFAxKeJ_I3bzY8pV!P$d|*nYGdwjWje z_Uw1TaJ=gRwe}4k`S?@M9$^g}{j9eF2K_lc>A)AQeFC_;R%<=-_%k2==qD?U_xxOx zAYbSca7$qKj0}MHIM5I+{QrcGs7mWQ8B2B+OPkg|{E?fUedYxQ0T#@qjrp!`yA39# ztJxc;&jZyTf9%N{Kl-tXd@{wl>3f4%T4XFhBCOAaTigQg10CO+-u!#7HTLxVd&k1R z&;G|?hCDN?Jg1vDqbT%g+@c7W9!p2be`mAXgEV=tjkOs;fsI}Lg*i@alyzRK%xONy zH|X_5M?l(v%VpAA>#|=&JF}R0WBm>IQoJT`+VVkJOho=mJz~RZp8|SMi)9?j!$}0% ziy{cvyn^v4Q!~C_r0tO^=y|lX6`6P&jwt|1gRZ~;T2K-F$^$$~)&?gvzMDYVa5BD| z!WUi!Lo+ByyXRASCR<>Df;m#E!~+tiduRoqp|#2;F9NeIRQ9?YF(Qg=^_V~K0u!$x zRtDWly9p^_OnlCtg9>D4tB7WWdaYO^%3OuLs_@M~A0v{7NA%=y zJLn}wilc7zSriyR&SJh#)oC;DU^_A%AEuGsGHyz|ca z7yNBG_9L1J@-g`LhbW7G^SS4qi_$YZf8kZm zmY5Ni-%?}D*8XpIU+x!ZB;X1}!^}Wb&NlPt?6*M(uIE4Q&=w(65a!4HB?TK4@Nzos z!dnHy!M(%n^9$;LG?Wg?<~a^SEt0SS4iOPsVowjU`a(2V7wfb-C?6>}HoGYc2Oy|o zsX#cDcJX~MfoZkb{aG1x#xO?~RE%IK>WRO6k_{yQh`P2&?gFG{REZ;^|DFxDuDjb^F zY_!-Y3X_WyObi4;tAe)LY}m04z>``D9;L<_8YCoDmR@*~MVw(6dG3fzknwPp&F08m ztSt~>fB#^(R@3{@9&^&@lSrx)v0xQ;y)rV%Kgqe%F^p=AGF*r(O}+a-2jLK~if0_e z7To;qE1W3_VkJ0*ipaKKPRWs(f-|?kNgy*2<|5LJG9G+LP866_+w-`Gq&&eW-r+E{ zkV>Fn*;&R0qx@hn$4T&_#agYl29i0}h4+>)>E*P57$Q3hW~>K5H?0847?d6r6_8)0 z@IYU^)7O@;Ffj75Itp@U@PUM6HWTBC=r^2^~Lgj@ufLW}O&wQJLy7b}srXfzhUa-F1zm8T)%zl~Cv z)^%}X<4SAizAJC}^k<)V-qAWJHH)bw6X+bj)|Z6A|r>vi-wWxB?)(CBrh>;@$(F z`qXD0edJgBE?aK%y3=TMVsMNv3bBuP^~9-qqjUO9@9?!B*mL+==2{+g)10|rRlOF> zUc8GGBmxmobWjr&cganBk#PHdS98ft{C;EncceXZF$xheMla){V#zMzTY;CK#u!X- zj2m`LymjJ4zuQ|`S?lu~tn#MAVBAG1u%gv!ViOJdDi`k6yWFFPWusG=ZE3-e?CBoyCsNay)wNwb$Re z>#j4y_JQ#$Cr_R}RM%^pep8hfXxPP!Lz=S%wT&{(9mW&p>Ss=!y6?XGsE5`G*z^pI z1R{7{8J|E4aayJOT%+$(AWE^oPhd52vXb`xy!;DP5CVIM(G=h_3?=BM(M(5@pgXTd zo;r03QnFhR%N&47q#h_pJ>=21<@>8k?a|=WJImKy2@Xi7MbLrSA0w(~)M*OG$w+S> zJNlEm?(9$6oyzI)>9;T6+g@B+(ta?i(|^6tttd5llNX4>yK>_Ash|Gjr@ej+y00F8 z)?!w^Smq7 z<{jeZ_T>wkY-j9&4Ll&f{`Ifb_k#$b;-5G(cs*f9R>z6k8sewv9)3{qTpq$wH^H zI$AxtW4^z4_W~|s2!`_oB0@M(2lpFSw(3(uy$%?^`1#$#%3QOx);;;g;oaM$Ve}<* zAk}0K0p{vyD8^@f+>;2eoL+t8;YU|S?GC;dym76$kVHg}fNg;ozU#?0QVeeg_Gl{; zcOyREMM{}E#GzmiCDQX#+I<(5{~HzoOVJTv2;jLzo#r0>Hn5eEb3WrvZcFTn&MC&7 zqeqXz{4oZhuK*8$g#>I*#}&nb1z%8Vj89a0$FII}5APn>eSlv{mB57XB&!+XNqEj% zN}ql1$nz(E%ojU%aailo6Z;P^?vPDxidb&4rx+=b?jjxzua{qb>9y`%-Qi@ZGB|zu zWTSC88}_Xh(@8t{NpJ(>BNk+G|JE2TkDcz`f8YIs`XXOeWpRWzsL_EPj^nL2<`z0I zJZztNNdTXfg{ZkqFQg{#WE80HWq<@g8w_)H_6YI;3z-3rL5D!(%*qLmI~sI2kW^zi zXBwf%tE?vvYU8&j-D6i>u?w}sfi~za#_co~6#cS#5lvq@^3oIUe1{iOc2`cUoH#k& zzYo2kcaY@XGTj{!=~=2||Kn%~+FyS8wU>X0z0q2nyuGrj(@3S{7;|ua@mGApNd!`1 zUYVBLaxfaa@y07}jeoUPSz_#>Ycxbvpn?GYx#b&51%T)KHxLMZG6jieyq#|?7nl#M zXXZJdq5%&iV}$^PPhi}Ej>s~J1SAoLq$-$}&={R=PI`;AGY5As=`aCM&l_V}H&JWG zYi<^F*M52LeGI#dU#;OF*vaBJ4Sbda^cB7hdeA9}d6cprRiQpl~2L++sYM*m* zCVcxy9|o-RKHp$_yU{!S_6HAc7xopK`tzUvywX^zu@JL%eCHe=WZsVTg6_>M zB63Ij1_HHt;>58>AANLCJA#3LUWIDp=Q~bFBKnhnFcJ@rb)gHmutfmIrT`tu*ghv# z&%c4PXhT8ZjJD3dhD$kTFaVi?Qb97*NVffEa$fXXq$wIjm$IGyqSyc4Rj37v}s(snjoaxALguV2hhM*wQK`RWSC?%8| z=zUKc&6TgCqerp)=yxf?a3;-Z`^4rCF@BBKu4}IT;;yB)-gg$^OuGCpx+EIu0#i5kr)NBDPMQMz9fZGi@SHSscQfJ zecO146USV$#TSxM5PT_BRpD~tn- zu^4ce`>ab7*xiFG7SnUa?hL$v4WJ?};2hFkRBfI}8!36lAe(rA?CO%Jt9h#)Ng{gY z2e!iU6MdsTWfsPYJ3oqhqF&8L3&(6BQ#RlX@RZlIX54-I?YCcf%<0qwN}iP~RRIr+#>zCVPQ*%O3B#5sWaj+q$SGKdLb^Jr3?-?sffeCpHv zNwZT~dHTV-abknpID^87;bIo{@x%k|>Kiv}b-W{={KQB5qsDyW)C2b%vlE%_l9N5m zQWv8JTU^Gi0q;OTRp$95^;+%v!Gi}*_2znPBcBX=<1Sm5SI(S#bFJU&YSH4vsgu}? z>?)j%Uj;B^=3NXpJ!EC$w z^&|HTjvpC}v_W9RM#mZ+8Man38RG>ZPK(AlBN63*GBSMP6QAr?7CN=HS6_Ja)UoHl zZMCjbT>Q?p6fc#g4o6=(#ih}@>E>Iit?l*Z+H+6b-|wD4L^*jv_(8SOdA;^4+-NO( zXbsO^zIx?~1N%<)>f<{8g~70=mz9{fvX2|&(^;;_Bi)3EA%jp5>SPpV@@0KRelmcu zFimm>Nrc2G2jAFK5~DF}nNI1k`!$j#mxnT~i9~0~!983g36|t@J#%VpdD~9Am3U0o zxjMIy>YAheQVnhh{-H1WV3#sl7g#(DOU|qo#Xs6Puj&-f14|lkl?PF!Qpl0u=usJH*%-pg;kh0VarwKMnp z@HfG$<4O~W@tCc7K=s;duMr_X<7v=t*RDN?DwYF8tn=1_Kbfjgl4i#o6@BcU zZ~dQ;w8|0Ew95#iQATJjx`1iIer@ch!S21gKl1TgnV9UX9>3@IZ|g+i+ORg_?OEP^ zL0;sSqFd=?LV+OnePwv!N4#Wu#Q4yF|5!{2tgm|Df$`9A=LW}Z-XCz0Y!l* z;_wyYI8{Yl%nU4a*OJ8*&~z8XkE@>wNFMpDUA%apDKLe;kR<1vAsY- zH9%5@R%ArzBRUx(zVXHz^bUrkU&t?-dH??X^bWEjg9HegOI0Q-r(Zwv;-mNQwwR@l z*T)zRJRZo3mzNPe2DfSHu|B-vgCEk;jgXkUN_q>HrGOd?-#}CDZnX z-;J(bf5Q#C4_-qTPrUQWZ=Smkv1Q!x+XHGtvaBMo6|PUF&0`iLSW(`+chB6;%X=JJ zhPIFtvi0Hg@#DvN$*jAou?IRX?x2eckdFXC(K)$mfhbiBaOQKx7~)J#@Uz-o+fiL+ zkoA{!El0|#4l9&pDs+)Gq zyz514^5-`AZql&Nmy~X}>Eo5T9T55Lm!CWR>Z7(XWH7Mvn8HvQ1jR6nn;6zaVbfp5(o?JWeqP}Fv7eUn;|7Ab$78G))hHE{FQ)9Eu>D|H0yP9$h-9h-#z0!xAcUpCEoC#sl|SfWdQpQzLD}No zhY)cil(4#b>h$vBqESuK)UZ)r0woFST6Bu*XDo{f)m+NS zQ-)(K0fJ16B>u8iK)#w?A2z1wdGt!~6I`mOKAsf45lX1R(x|NKLR2jPfS+qrVaO9& zWU+|d68y^P8p{1$Cn@Xd&o@Cz=ol&7LO9H1x0$GB3k!812`vhy{Kc*2+dM;c_^RZh zxA9loRu}?Qo+vdksPpAazc)Z67|FHeCVcn1dSM+B8Lrg#RCevzvFpaIz~nv8jf}Zf z=HD(|cmtDAFwA&c7cg7kKNu=nP@K8DsmW8%N;tM00s<`$t@$ZvO2D6DQQlhI6g)FU zZ8JJ?!REGn|rL zCLyA-=Q>$;h1{) z7>SPG)Z2;5Q{ycCdR<@kng)SHP){{!qJYd}I;w8cbzpZt;Bu6R-odTu^Vdv8a9UAH zFBqy0ql$|*i(AvER^&pnNrTBVq0d7LNQy4I_MzSi&|Hyvj850aqsZ?<=YQESCY`fr zLV@#TQsq*i+;z>;m?M#>+uXZUE}=w^ig7r|pX+sX(KL+`u_{ph$Vf^6Lz1#ZRw~S` zlR?Bz72mU7;q$LiYml()A2^FlOfmV+k}cz(*Y#ugqUf&JGq$B zl|A=2xGCx_>M8!NE2Wa95W1KI>)CXT2}9U0aU}(n&AZ#|ti=4f{PN4Mzy5mW23VGq z9T=@NGX<$Uz>{pWhZ-sxpMi>7y=Ee!#0Eu`|5ZCaFFTRKmBH;h%M?cCuA?l4v*0oXG zB$&~$q27Rv+(u(TMa|QhI!CWiaIHpjVPSEB7nCNAZFBQ2)U+~acCN)CT}?z1aRIZ1 zN5J!)qGo6NVyDWxqP4lX1x*YDLo_1I>axuE$J&L$EH7Ai%Pw20E@%qL&?1Tp zg%lIy9&oVrBQU>x?6Jo{5!`##RafoXw+~hUbx=)B(J6;ud!o$rm}$hLt*p~j6u-!kDP??5l1?4NE~uOHUVN{g!t%602V!Pe8Yu@&LMQo zgsJtw3)7>48OhmGK2MDzp70cWS+0g5xYV#GOgy#=n7`%P6ff@KH?YSYDxgSHtq_{p zoE~<4Q8%#Xe6HO6o$ZmdhpH(BQgHo%bD44G&*?)lry63I6N^}uB-*(mB2YFsAeV(q zA!v~j@!(|Q!TjjnBMzh=Jb3T}ANT+s4Gju%Ou*xtKr0bbd!|gn4E-+TPANs)_`q*O zCpd$p-D;ytWIT^+xISSL^9lJ`p7a2OY-j}1Pl^S+R~wvq(^=Twp>u;X?S^!h>{{VO z7?9swwi|gpPQl)K6{5;J5BsmYf~{{nqB?~@Bh~6fOD$W@j60A3kwwJM6Bf!8hqqiq(90Bnt_$x$+^tJ7Mq3Vw(q(8 zK&7$GKo+g|Z!jmS0ADJjEkh68f9Roys2nfVp+kq3mzP0KIHc%4sK~Mq5&TnlV{>Az zeO;e(YD5B8(+(?@w6hg)LCLiiS0uGWRLa2*(*@(_g%@66^ueJF5wRTNNE0B;rONSK zmaZiS5EwK&nf)Y9F>ZJy!rjP_#1qv9au$}CwU9VC&C6X{#+0zbCAlj^f=a;&I`?+l z!ouA4WwH#`j$ug9gJAR|xfVePN+T>qYIJN=JDvIE-Mf|!9F#*;V2J_P(t$Fd0dt{5 z;@F9I;9*9ehaY|zQerY)dF7S3#X$w=3K|6|se=CUKM)9s^X91pXvWJX5y2~8`Sg+d zE54mcn%+hX7(&+K7TM=Eu8{&f_{*Ms_E{_k#!UuIm=G}xa+4>7QN#kxVRzA}+65e0 z6gq^8xFjglB8fca)+$ltRBnnx3s58*pgLKZU+8jrSg0n|mnf)B6Ut5&{>YA}>pOQW zH+C$um`(A~8J5I25(IfmIR(npe$XBN+4jAA_s-_x2M=Qa zBag@+raMGL9wSc>k@5uK=QhLvG@`Xgq&8v39aJWT)R(xKxuW@JQsUN~PESPd;vSyV zaG_G_*SiQ)`!iJ=wNaDgx`ajUHi4}GWL?WC-xTe+AQ9k1YCP1su(?;MIa97n&vOGg zLIDS9*s}ecGfc5z5h(JfVq5$PGVzcI^vwdeMfQ!!42zm?g0&2Y^nx?SHM8+F$Hrcgh>TuQv?7?>g4%i1`#=OggiQT9>If@oP)^Ok_^wJ@?=wm;G9vj6*hL8#F+x9da;0a`lH^16SNFz z{$SkYeZMBJpL3gRMIC%p(+laL2|OiXs}5&jv4f7c*vIh#0j~q%;m~H2bv-I>7%!RD zYCzFQqA3qBa)v*9aRe5<9t`mjGxB3rdb)`3O2)bB1R8oTHm27GMDrdhh_-|*a>?H< zq$npZRkk?ZQ+WYs!)Ov+DPTMQzZdK#X5F3I158sQKmqiK=m}`ijp^Ia#LRS6p(v*W z#f<`D#zVZSp$w6!-~`FJdnZ?sTH(g`(H_}TekYdZ%wZ@}#5SM)iUWds;!buCbw)uY zP3KO|Fzr#w??0Y45G&euBGr=a%0(o1BJ8T@j$lmf@W&a{8<&EA>KcrxLCfX^0*DZx zKfma^?$?1R_-RFD3V3KJXK|_2sD2-KJmm8_uqg2;lqdVT#yZ52*6Jq>m+hwk;}5Ggk(@Cds3*Uli z+R(7gatTRfLp2a#3hqflLz-IqdvFHO66%Np=IS*NNre!KVwzuylVwnnN5?ARAZ8W; z!Z~_^inN%behVRHMRKb)VG9eKZ3P6VgI|>typ6H1jW<=9Erhd`E`thbbHFJi0uLsM z6f7f_4CTPaC67a5PW=Ew{A|%9pKhI93=F>|ER}1BIAlV&GC7FQXk5v_42Nkdy*PB} z&}TmL8Q3z2%OfDqapP$bF+`)d_t4qch!T(& za3Q+}C)HqkLkHDHPB>eD@d~`yNfrp6RO?WRuyY>$$onw~ErDcei5>+HLliuT1Q2z@ z4L9%6&?fR+f2!7B-Xpp#QcEO^QH(@RlB zDMhv61C8~5;xXLNZgyewW)&=l{$zdP5B}f}B4+?bW{O07MwX+A0xdDZz*`avvtQ9A z>jVJSAVb%D%QVczkv}#v%2JftprFPx(J?bE*+2kiiorV?cl4to9ipG+lx7(6d)PI! z)Yy<8P~Te9zy+>UstC;7!~#MqFd8hlmyY0C{m4f?LP5NZSW_(4VisO=%{8EfzQ^Da zl_ajNRKOKP^fWb4Z+aCi>2D+pNcTjyNt~QiTnvs86tY&NQ-LK2000KqxP=@+kx;E9 zvPkpdi!VY!`0dl5{xnZX%!)T?L_tE-8{jyOYnVH(7zN;l{ zZ1ql{WuHA<3c3cXeK6Ls92z=Tov^;*$eSp$*&~S! zOzaR!$v#nMgkk<9K=d$bjjQnNJW6$`)TYz(1;P<8 z9u;GatUhN^*=S%2j2exw;ZgCR_!~qI<|mC?gaT8aD(N)!z-MMclPXoppHdS}WGojH zE)co6l7ed}l8Z!lBmgdwF8zHkFc6g8s%8doz5T{PdyLV19Ky58D(^;W=BGVFqsfTF z2$_{&jM+1{RZW-D-xCaE@)6MhQK+kEEDN}7Dux4@3APlF*>PXv17GXVo8NOa*?lN= z4pbbrcu_e-&uS!(@8rOH%OWsiB3Bs>{L=^3wL+kA$P;{hG6(OokSzIsRC&CC>4lD zP6_oLot#$ge4r2P35m|nh^U)swZ)?hg_`DZImB&BjQUk=IZJV?hSrC2N-E--40Gyz z zVundR5(!~S!u`DDiz^Ki`H%njk7L&!;zzb@gOZb$+($PJAeLKrfc8n!v{`=@O}I!C zGa7@Dr#Ju-$u&lMq#-$!P=%QG-p{2TQIDWhD+D#IiX%-0!Iu39>IHCWDkV(uKx81Q zdAe&spWCtHPsfRgO z)B>pP*tDutd-;z*1|;|2@$$o&xccf21++ATQ5*5-NPx}M^zOUw=C!-qZo3Vqr|P$| zM%xl0Prw{jax_6YAvY&@gczScYuV-w>XJn^ztf%B%Vg4}VbD_p?_2N+#Sk5avtCDr z3g$4gH6egRxcJ2uY(ha>>bbMnmZlV~xIy_Gsf{bsbyv@O-;GI~2|KUq_eA1EI{qrg zq~ASIFEH;w2=|#faPpx;huEA5=N~z81pF|IU=oN~Nlp@RxKl7nQ84C#Q6uRyLF14S zlF|bb;YbM*g9s6s;(A)LmdyePRb4G4`1<&l~K>VCglO!kVBS&C=ro)Kt}5;<>rXM!w?Q}V^M@dC$G5Tia-6+KP4T4QlzY-$P;5wYJ^M$x0f4_ zr}?VYbpfF+$Dl<-4nhULGpj*@AhFWl>F`9YxJuwxzB@OZb{1oeaZ371$OA%nL7J?{ zAr!eTs#D8yFS8~VFbWtZGtz+`6Cu6 zTwQ@Z(855F#~spBUufOHq{`z@TrPF|!9RbqYNu|Qd(BnVawkc`($%*CpLK}VEj z*EZ&W>XCwTOR8J_6S(w2gFESZ14D@v^QQ$19>xKwl?jT2JgCo}Wgt~sfXADJEVn`M zg7?%=5ul5BgT}1Mi9 zcobLr{)~I@FC2ne*|a9I{zM4kc&PH2U!?O_I7yjAJL(Wxkvi9cut{8zL6b>CRRmVL z)I}`7G|WVcGFimoBBNKeT#F3FggqvU0i{aYhaB|rNE9?|o1Tp|n0fLf_48llk;m4g# zPDl(L36Ya*1k$6nh%=aZ7ZOMUrL2r-bhZhsh>&dYNv$K5c!6CBk10aCn)HG~V8G?C z`_OLk5ldCtAdBKkp*5o>DhV1z-(;TTZp!}9(p+TV4=fj(TZjh?_9IAqf?m_IZj3I+ zID*+gV9N{cenuIzCHNsFXNVdo6qbWapW`AV64af7(2>LyvgLT<{FGNEta5{bN_{tB z$_Y{0_l#9jZBVLpLt?-dIf&=Vigby|#!x)viLE{UB9xX(^;Rx|^@U7oPkS@}MDED9 z0jyMdrk2@S*S9fq2l=E$4XRjk<}tTcNc;to3_#K~Id?>Wqo~K#s5<~7@N!N+xOBNF z7$~%XDnnnDZ)fU<(b+e|$Sn293O$F`@luyY0o;z(Ne~I5>Xbs1{07FHsqWJ4Ma4i6 zE;K51TvxZn2ZDqEc^4UkVvmKiht6RT;mG7oArszO7abYv+f7K#Az-qTqpYhxz&3w93{;XHNf!$41ww)i(3ZC@Dbxm zmhXcxQdwpAFd$be{=PPy(WhUlyl^G!w>DTiQ)gS7mSnY7u2xDx$lCy0>G?UL7wjf> z&*-rgr8+MBy06hv;stUzs1J%5#1c{cFd2Nrqmn6-DHe%gvgj@qc5P9oQrCrY{jNbz zqkv2t5rle5T~3HgS@cw)RJ0gK;z4AOYtfd<<^qwexatw`r>?62tSXz8ku{2tR!qtg z45a4?Qq+?gq87(|4s75v%wGzsY|54}O+!vn1F7byZQ~UiCG4ko6iOL}z4zXGdDZUo zpZ|Qk*sdn@ap5WU!rYVtP<|3|dBKVFPT-`z7&5wyQ$gCAV*`plTW$m*nMEJ`S2C-a zvmr**rX?T}D9H@a%Y@ns5pF+@sbD0aN@RkCAZBaYzTngAnA`GwbG9mKA{U!e1fiyO zNWKhC4I+k5dn7kED+3$(8v4$1oYFuVo~fH|x(QqJ```cmXFvN{CV??I427t-=U51f z`e|0>Xcg3`#XKz`WgUfz>58x-#H0|`n8kq<)f43cBqjjBLtBJsod|OnoM^Zg*)bt= zo%%#5U4hei4GAnK9kwpSQ?6umb8r-qh*O2<2dWHuML`hl7^+l6d@k{;+7FZ9X1}K0oAF7 zJ$DN+)Ljlx$NqZHu*fCh70E1#5f{HfR)kl0ghD>um0ZrIr$DZ&1dPf;xN%Q2o@N$n8qMg>ml48tH76PD&Y%Q z#a@~z#g$uzm0NDP1?Lp5#vlIhhm1R+oya5cukj7fCb&)Y0b3Dh6huuGeGn_IMV=t7 z%DCLj!<;K3tZbnkvn~ohBiDQGxrcXdzVxLp@ia(7{%Ac4s{5i*Xk+tXV$sy#v|WaN z+e_d|iJ%o)=!eoUX5c&L4*EHr&o{6~jn2xhA9EJ18gpq5WDkN|zP!`O zYgN={1_&*IfdQE*Dvianu;#UI(v~a7nDG$v81~K~|${ptc$-I@tQY98L8!50KAZVa5Fi##9eR&#^8!Spq120WAkFgiqpHxDr2Sw{N3-}^lhBRh;&)}?eR`V~(j zu9>JbnlX-`IS7}jj#Belwnx!w3Mi!;Wsn|~R&L0Lnna#7I7o~+rBPZhGbobd>7?2a z`qiAI8zNeXQ;U|GkvpWj)Ufi?5xol_>d8q2h7GwnfDRjggnTNskdhe4A;P1>hYz#< z0qvk5Mj%`MgVj~LL%4~fDiVm$-F5VGPa?DEmT8x8r6)vla`}caK!_5is(g32)sYOo ziy&_U#b_p43fo(a0x56ixk|M|WCn#rYg71gN}@aWQkY6E%BEIQ#m+E&yrCwQG6*tJ zL`UUa7Yclqp3IP1EymNh#z4HTlJZT^3raB#!Xi4Q8KbO#OhhDi^uooclsm4#DkkS~ zzLFtsNlC$Y$h8%zm;PQv3_!b(wWu?0x2m5n26P}VBgv32v#r%uSH|5q9t|^~(d&1c ztxk>8Y?>Tur$M6N~KO?nGJPqz^-5PIKFtX^4 zJ9gQkBjC*VpZ+hnNOjfM#3 z&UhCXr{sWT*zXT3Z@s;8^yqQDL8cLd&l%MFqb}pZ-hI25m(VZ`&A!C0*5z()<|6lr zH4vW%O3H*BS)b4ejkn1NRfnRFB+Gjd2Pk8O4yyzD#B|wv@9b8V6Gg+W++3AV#Za@ zM}|UK2tUZ0UjAiN4~?KX9v`&g2r?eY!CWE-xAIs~gFI_``^+r{mH;{QllFN@F;RDD zC@0IF4MM1b=^QWMRerkQKnHX8bA%@m54S8=-l*je1H4rD@|e9-DyrfNeweW{0aKms zWK^DO%Q+PRDjF{^C*X?5xyGr$6_5A}$*7QagF2)TdE}8tcw_Vr|L_lE_L5uV5%CD* zjGhuHgUxNuAT@0=F+UdlufRIhPXz#@MySyS_{k>rr>N+4byenil$}VRx0es zneB~=Z80p!wmgh4;H4s3*D5CW6OO;x1S=tUb@&g1YG@?8&2nlyEjxvE!<>JK_ z&dO(q^!`Wj0Lk{X6*(oAMPO7Dbs6j-qTZhIT%D&PIy6DAnu|5@r<8Oudof+7W`=s< z$bypzMA0B@^u1_De{(uTh!fZlbolULCdN$pKK8MXg-#=#vhnh7E?u&U7(F7qxAukySQ<$SvvLXKb4Ag?q9C()0^m_gQGYy`)||Um^fY(DIWsc{ znP`Ig8F$cEx88bd3`ry+3&HIXAtg8@gOMXVqN;qGBPfZ$soCA*du-nY!6Y%}Ks%QE z0aJpnT)oi%o#U}Vi;GD#Y+~Y#LMv%7@ zlmx?^i)4E=fk>ko-?&3&81>+4DvF086vU%<`3inM@rh6HS--pPx(jCz%U$FNao`HB zX3!h8`D)$}7!K-jNT?8?z%@1Q+pRB18N|w<5}{Gti|DK0|NY<3<)Bx&N2KQ>rl}jF z&`|VR~}%vnm|wR@wx=vsNY2fq7ayp1)1ZqvXz7SX0IX*5fOaVDsLd& zdFP#2HHQuzip&Khx&vcDO|JBXK#@EiMgDbH4K1k)Vt*ilpotJ<1ff)fR0?zkcY<(h zc&I#8MSih?C`cs=i8>;qD5*h5w1(ms8{Bxc$Q&i{Q;E1TsPQTizyq_uR%h_}`y^c4 zSlX$8?D5YGLxYw?paPiaxTiYs5*tj%yC0A?vylFYLW+HPI@c7p;(#3&aJG5 zFsP5^kmd-2Eh;|WYswzgst~atT!HRL3y_py9sDL@q6<(JjTdHytBvaPXxfS@B9!Wi zseGhmN(n7Rp5!CW&&WEhQ?bZci~&5QZ~>5!xuJOqZ3vJr6CF+4=TkBBQ=>)ML9q

BnDx?UiwVh!x9t)ogb9y&=a-E-vr5>gvNwS6o?Xbway_7?vv> z*P1fvCY5KOJM!`?Z#Ehoolb=NSjK9LgDmDdjccztv~TY+MgSvbTY5{HM{I^Vw!Gsb zKD|=;<~P3c#+z@``arveJJ^$h-muZu5^{gg`@|58n0A(~ms) z)*El=Gx6TSdaQj6r645#7NM-P&#xLSzFSAi z{?m_PUO!K8BPC~WzZvOc0Eqf!4ai3%mynL#P#ouie(ZbVAV2n5lPE2nfKzitqftjR zO<83QU~(neZXhs<8j2p0k4EK9F*7iPd&`eKBd&v?gQqep_~pjSpvQv%VO<|xMgy?` zW8;!GP|z;PD$aVkwy4krUkmW+`f4^tn0g%;NZ4S9Ru#tieG@L9`P#@ zxs3#O0j4=u!0yj(Vk8-9VUr7fPfmms?4C*yLCoT**w@*JtgE{)aAc?D2V?;+GVURv z&L40bEXjm)E}T@v5lr@T=+p_WPNy2SnKFrhp!`HHe%SG2ChGbWxopcK5mmqhL8PaC zc6Bl1(;Fyyz1|Z=R(*7|AQZ>vW&CJABNHP!v91?%EqaGRNrd{8qsW77M(syM;W01%86 zKT(~AJlU2~kf!tQk|Rfs00|pOu#+MYPzjNTNPwq0P0Gl_ZKQLZ2E#1o1tc*j-5#^Z zX95NpA+St967_W$HaGBAh?FngtxxI=j-F!>2x>z`T-xMR?^KCi0MTX;$S(BQP=zow zZ(*>YN4w^5Jx9?FnbuKGWl+*6QPAn`Wp)?EtVN(g?pN+U7$aWZhQss#3bF|fH;1Y% zrp*+O%;eu9xJ^@21g*k__?-BOBe@mX;C03uu&n!jxpf{el>Kl_Nuh!iFR-E@1OtEkgQg5XqM?7WRo107 z7AUjiP8x>Ycd9{v=;xr{{QSt= zsPQ#b3n%AshZ5ZK_5(+(@QMa}PD5-~CB5M4X(Tv=3^`2OjUMV=J#`==?5UGt+-Yh+ zSryar`WI>h{iETDqV&X_Mzc_Y!CZ394`I<$uLCih@UvTlUQ#f8U6LaS00mvd9+L2b zAy9hj#dHT}!S;YBtH9n8e8;>=z4^EUOGkg{J~-qG#@Nqz+ikbu6#{i+h&((3f!X7k z5l(S+-W^jn4IV(ErZ|tBNa{WjbDBrkMAIl#zETqnanK5A8?uvnf;YeV(1X9ivBG9& zwNrRq$riVy5KvqMKqb+{^;ovfZ*JlVj1)|`7!c$_GCAl+a4%!L1`A#S)N*xF7q-86 z5mj4GWY*76AJ}Ocz!Z|1FE>9dkq$pKeqeCSk8FWJSdC^JNe8nqs0COBKLabh;~Hd& zc=R{u!lQ^V?$9hq;2M?#6l6t^lyX@xNq);&Mg`r;KT$G25T=4W>Z+my;XQ&W?`STr z2&@PfMHXSAT4f*cKmOxC@=E@pLx&3nD~A z+a!s=?JNx?T;CsV@hpiGBaB?UQIFnPc&i3Btl8F;1aw%cy|+0TB4hntsHWai3_OYOd7z- zcD9z;d$*+1wP{3qBechNf221Af7-GVG60t0u`bMLP+5$ z8j>r#%@qsbbD#Shn>_Bg;|_!_;}Z(v=#Ltv=uv0blb#Y>1pzoEk7yM=29gep6sVj{yS} z&OkC-ak_`fo4(USl8xcB@tzm0oo+ zIQHm6PyYO8`#R0##&~YDwxc!KUK=kqCfh6h-RZ< zbPO;&sZNBbkfRW+qydmzOp$)?fq(hdt4};$J9TQV*WEFmY{OH0^3?X>aK~iQ$HDTm zpFaJQJG670VN*j&t}NAzg6mEyKl%KhK_E!!b^AGd;35B(|`Vt|6`@w*NGT1#Tf;PP*Si7 z)y0$Z|NAZ!olz3b&G3h$YtKT+)DBUM(I~(me{Kb;5au31EHGfmQ=A$=4wb|( zJ;ff?1`r-#;RyAj{G&VGzo`bw@GHJqz(`_bO#?l1o0FY=@%cn(XH%w$1p z$6%b)H;GC&kdDR$bSp`W4;sT}vEipys3I8?kqd(YgND43E z6h<=!sP-+|@I}-)piZ;I;23%?JI+s4ilCjUSchs_HTaRH1DaT#E0oQ|-1n#<@DTXH z&&Xy3rV5%abMOyNv5s7ZNaSj|!@ijbd>8h3>B=yF^|AZ_T7#~sM|i5vvkf?ofa`2AizlCMGh zXV$a0H8?w-MwvU&crGH(i^)O5;AbF#qkbw)iysT?ccI54^Rn|A{|UZP

  • 5+gldRXJQO->yQz zRR*en2r)q1nE8PM?Y^RfUfDG%jb{+(kxJe%=#tL|L0aIN)Q4aNHNa2Q;SC+L7>(j^ z(cBT4L#Q+DXq1NddG;V4L%tggPV{D0WBOVmGmENm2O@+>FL-j@Bc6&4m^C&UYz|@z z6(_e+RK=Xg(|lV<-ISvuTj?4!l!L}qgf7u@ZlhAJMJfnURfss9HBl%Yxl)xpj>d$n zs3NkYj3G~vD>9~n(V|Q1sc}u29F#d13rcWR+LX`8uTjsxf*;b@rmWNIC~2rsyLwO# zhFM;ZVy`6+1G``q0Nf}^f~ORcEVzllQUIH>Kv1!05pZze;+fF~^%yO)VQ^Gsi+h1RaPNC8b+d2v@_!9KIF zO&N0JS0ZRBMM8(yIF2etwK=FM%PDyG+KRNCp-QX8f<`Ik3f-&=MI}m9JH{0i(sv$_ zh~57$2y*O#qqRoHYLojX0A+{2%~yHhN)TP|izYLII56 zvj-?J8ZhQ?t{rS(pA?JoojSfwb|Q9mqX8!4*4J}So-1@zsTa&tFOMwQ*u%Y z#F9TML}4p3D_;pMMox)^3=Wt4i%NV=L@z%IctW7?Uo-ao*`NJc9JU*YzzHOP#Fdmt zjNXw#5NHLc5qENW1RPq$Rlt6bJQ}MS?z0qH7LhhYQ{Hnkoyq5+Nc0MwmSk4w2)`k{ zRCbl1Pfmt@+D;5qG)xNVYMLKKxOxn;q_d4cErlXC8#=4VO+@9SNa5|7&N`Q91ns~E z8A#z9o4m#H5C8BFK|xZ$TZ~GYRirYmIo%j0wIvoUaum-3YKxyiB!jhgkzVA}UtkF3 zK!lS_e({T6{LSC|4L)KZ4A_D?K&q7+ zjG@H(B`xaSh+T6aXP}6V0|vK{hHTI%I?hwv#2n7{wXc1R6o^P!GG}VK;kXziv9x{w zq3WhVrv3{$5}pBy`J~2ffi-uRnax zT?ckA4$qvNZ=39AKR+66tBe<`)mFd1v)$OyXuR|C%SRu7teEvjd9DbqBotBo z(H-}$^(H&_9GKLbjfL&2gHf$Bhoh}Ezc8#fTido@cHq$KZ=ZPLxi|2#$@rALuR$D9 zM-2nWFMsu9vo*JKd2hYe;d2z&lc<3Cxka}8u^(wPY2b7_aQUI9pLy}6mrr4~7Fs81 zdqJWgkluLsH{bfdzj*L?k-{@=+- zTqhbiv1hR}?SJMQtt{>JpTF~tpMCq=c*&P1qq*Md(qy!wHfeQNTZ6TQ(<{4&u)Rg9>{wyz7(IfNo`VrOUT~@%S;}k|%M5^P<{ZtW!h1Q-M4qwk6_( z5*cKufJ96SP|!bJX*EWtj&)bgG}u^LODpOanzeenxnq~Uw96(ZD#5Q>;#;z zr>L|$X4}LmYy=u)hbrC*tf1j=u(*ApQ=6xaW@QfGHEW@}sdah{OEHba&)l7?o@Ns| z=OXZaCxa8ZOnYIOHE!EeskhA`08sQ@wNMW`Tk)O@R4waT)n5PTJN3zmR}uI&j^0qE z0=>CBzfHhn0ypRBu2_fSI(HQFnvKSl{&y-I;Iq0C_-$zXpAo@}91+Yjbga zN2jtds4pk3?<#0OorEQ zl&hbXXQ0;v4@;BJyvw)1iKLi=>=c$2qG>$ea1<6n9fYI9@Oiz#$%2}i<9}gSBCqc@ z*uFHJ_Z%3O(P5yN!p&Ci^jZ&GaU=~FT+lj0hI6TUy!%d#$egTCU@^J*L%}v1Gi9fy zDR+Q>&MWf}YhIv(f|;fP zFdRa$8GuPCcc&b88@jL|gwqr;$hG-K))T8Q%p1XI6EIbQSQC087wRJEUWqqXaGqne zjjN>9YLCY3e*^e)HO0mTSA;>AOG7}*%ofb)@O+HKTpeORFdDVYLl z4?Y>eM!V4zDgz_tf8RJw5)<5*yRt4a)7iM?`j>zCm-t+c9655$HP^&EVJ7FgT3D6B z!wRvS%G?9TC;n0-A4s%0iF9>W+FNT!Qj41h$QKWYFw^6GWKQ-B#S-a&ks-+u9+l(V z!f26S-H%whpKt~f=cgh^@C>esVSkmc0rW=9h%`G z7KymUCamu0j3rS`8DKwxMnAk}(xt`jQmX2#4cS)SLn+iU z5sz-GpMpqQ(G~edF+taLUzavH2Ov;xgCTZa7zg{WIO;1+FXZ* z(kNoWqo@t4K^k-vB!hPBrex?rYvtwwx7_A1r65%oL@XypPQVb<00W>CjSo7-p}}$^ zLl2K6is}{Fz?D=_6ZL}Yl~G5IVAgIiRCM@k6O*pi;H2^aN@&nu>!5!Iqh_Ns7>*!w zI2h=KP>HjcZKid;EdeJFD|jjKbVw%9X^57%hPM*n0;%?D!{>~FCY8)!i^fyz<$kY= z&?)t+2!RJk^Y?%M_xKqZZWwn+)VQ;{$}mL6jH{3oSdd0+m(&JKsH(hWqb*a$k6A0Q zW8bTyLYnwo5d?HJ#H0=!V}wIHFo4iuHn5G67w}y|7ZP)P(+G7^9Ns8(C}lK@(VkdY z90I>F2zq&pmB9u?k$+I%eHP9}+T`ABw$UkKH+gATd1~SauIVIlvd}-tBYS7v5pd2B zGj2B8Ypg@3EpsIDQPhTI7TcVE*jf+>?hp*PNGU7t4{XwGAcvYytY2n!<(u|ZiSaZv zRb&d%kV1fheBl7w4+_#85q$1k;Ta&a`CtOLq`O26@T9WF{Xx~=?V$_25Q!$2SnI7U z6A%*!;4LzuVuT>A(F`8Famc|dg!r#c(wmanv-Q%;gL3_0D|1Kj^TGq*I|G zQ?1_MWH$JgzRS}LnRw54Z9pM;Y&8OFtPvO>z{UmLsui9y@$uodnLe=!zd}m^~22(C~ny)|m zd9RnzLD9D)riF{bl;?M^fZ%>=HiYe#JR80WQaCGhGF3t;SwAtQV>P zCVWinN~l#^&p!M7p{s6oU(prcs<8okrB7g>iJHPwA_Gri(g3L+kY^0rk_d?q!wbgsxo+vr>+ph9){=8-3#fBKR7WWdCN&q?zcDU1eo zSo9{najo83Sia?RU$Ihc%Z_b8NCB>igZ35nE4|atJapgM%1U>zf=1xQPgaoY<32v5 zZl&4njjp}!KkUE$BbC~mOp0b3dkP|Gix0=pP`rEc{=4oT4|@#u^~zdIIN9sO&J6TC z$2s48%jfDlF7q@)Gap!h8ZZt&I%kcQUy(+%YHYR|cz3J#g=-6Gt0WCZJ+|aNb@t2X*41!pEp?gdr+{13Op} zj0&SZlINuc6~RW3E!N=D?6h@AO@lY?nHkoxFI$Z|Rw`~U#%PYMLGoKoyy~zXBtk#5 zkbLll-@ZTSwmb6_gh|LxbBLGR6Md#Gn6u3HFIu2Q6e7nm;^yO6XEA5-xpTD<_@VP+ zLIn_{fAkPvgC@GfIfmcH6bg>Vqdpd&w_Ad-gy&%6FvYDYw7FYcC>g6f^w2|m@r=DB zZs8(aG!d46Lsf~FMKAf&U{w&Tgv2Wl9?d|guf>skbW++-rXCQEhkX=)yx$njbP*ph zHlfb4>$9PhMH43f@}tR=mD7>clvcgN>>QGghJ8rQeiKB2IG_9I>s^d`w?FK3=7l|f z?sH>r#eEx7&zb_&cmt7dL-9>>9JOE)@0fbMMF2m-4fK)&rCRB(aRidI91P5>F&L6O z^6uU|)7!Krh7cxua4Byev#aIc!Gquoj@mTyITOV$DU)PPp;}v8(;Jl3*>1M!RHt3< zb$hM3`7>u$w(r=#y0V61VqVv7F&^PNVt7VM^~rkxL++Rnp(&bnH}XuZ$?>$KlXwUl^ zDAEAzeb#ELUgx#p~u3D8aBrw=p|t8u$PQ$4V0P)T6}qh6}8M+w=e@4%#Rl0~is2r&{_bwhd54c1GNnu>)aQAvWC?4t1YTiG9-+_491Bd&XM!(Z*U#+ zVSF4Ye-22cC;|%_XX*%Kil>5mClyXI{h5?a411O(dG+Cqopif1!ONrV{SAY*e`@q zf{QU30FT#LhB_uye8}Vh&KkEx41A^(3Z=vj4YQ+R0#uH8!}8au1gX&E)B0 zv$b@V5#CDe-G`;kf%x(?=tvRvWiTUzgMN?kg;Z_-mZ7dmppL83t2WhcSACR1gO&{8 zvYjBR_*^=72HQ@ZxzIFhUrt2mI0WYS$z%8#Kl|y=(af;#B~^B^u*l8nU#MkkN0^At z#15urn8(y4wG^ChEov2rmTcFUaESS=2)1LMq$*(>Bw>>JlY*}7HL?AL^Dl-%4(VmtJDq{qKILT=I2gB zKiazo<}})CMW#uU8fe8)RLjDz?ijWOl}06e2bFfx(EUkyfDbum~pLQ~Yh z4k`%Qn1fq^bsKf33qQ7(2kMuEcFbE?s|13?PXxHiY9E@pDv&QW|v04Igny3Ia_J zhr*0nWg4@(p)X1>Hmt3jy|$37v*)?=^n5U&QNTo$n%Xc{uB#!*(74;>#d=(*>^tpa zHcM+s?4&ZW?@VG@LmbASWj9`JEFC$X>ZnxK*nWxHsK+KYE#y#zU?75AOL|c~=yGMy z>yfyhRX9};e=rZ`<`$&gNtb%(FJL%m_Yc*G(=aJ|T`ZJ5E*n&G(MqbXa`M>8Ci~>7 zI^#&L53OlxN~q}(s)o^nWb54%M~|UmVmjc#-+ia2XIA?s&#*IsPYk1dP@2-B)NiX+ z^H!{&>bN@^tsFac^61gM+GOW$2f$>G$U3-+r1SfG;YRLw_4J#s{_^KP`q+&hY}Ljs zj4zBj_HJOPj|a7mHk-Zl%3I(4`aga3FaM)9G_@CPR!h|)s+iuD-pbwI{pPlX_MY9l zwk_^r&A*RPUSraNImdhsaj)P1iyz-S?jN}R=0;;d)(In(@XiRtF6Lu*{`kM`-?MAy z@@}YI8J%IOhQW`C2@ROVojkMpFW>yXzWQhXeS6nIt*bKc&@jheE~AH@yYJZPw>sM! z7%Svwl1!0Wm2^r7&+xzIQf4&x01(+Q@zWpu%NK6@6J6bj-LF{*Zz;M{CEFDg%9AG;W)ZLZp0esJ@v2O z|Hh8xxgEQ9gnpWO3<&wr+J^??s_?6(NCMF=YsB*d%$E0ehj zdmvjQmgh2*xEy%>O&M%(URnQ$|~mT_0%g57fpXNUazq95fFI zZ069`D~y7LrA0X$dBVoyPHQsiarhjHqoza5wGD*XI0Dv2@JL?1TNEANv|ksR2=jn30`5iL*?i!E2RPiB z`Z2{4T5RC~py`P$XsfL_iXfBAN1o_p8CyyuYQ;!uQO;seBkj<{yMx|P6;X?fm^4qI z>380F=hUfFoPBYjz@H<($-!ni;e8;0k+oizZ^+Ium1fR>XQYYQsOiuM*Dc;vEvWFT z%J}Xy=VbZ=DjpY^$Pf_YZP-9-a1SeV*ED%^A0^`zoWCs=5Ge)q=i75beELvyFlx11 zOo>qky!PAX7<-Q>n&wTzG!AG++Wa~40jmHa}_FMfvIOfpBCTeQ1 z3iTOvd5r>#j#(fRU6>h{4SpQ^)W8R?y|-3Qu_RU-)seiWy!(R{guIEeLWd5S!!u;c z1H~c`6Aa!05ueLFdNveo=vPuus0ALOPy%Vl9S3vcDQ4adR*}KbV_caVf$q-IvR!H_H0B?ZwFpBQgi zB~?bPJkoGV;Cv(+)jo7jv}lUn0hn|;&}%DZ9Z{7o3wY$*A<)NAzc0S{B8xFI;||Dt zcl?h6>G%3A+~rcO0Opq2iGaRUvmVZDFlyMDROVZ_FysqhNk}kDShdTww|07AaUL%u zkN93TDIk=c^^7gh0vxmBVK~CiwOb4$JTy8Tj#E3;Zj2f&4OU(r2T?kDQag5;<;u@h z~G9Ik;S%hvO#B7w-MtHsVgj2GO;bEDh0>-1|r3K)2 zQyTz(R|x>d(ja^4pLpU4Mjxaja2xhMD56)wJ00aG6@VT9A~?1xstH%l0o723(yfpc zNyLvYE#e9cnToFqG|e#g@WT(|rzUa{jUqKO?qY}aSG%q1@^H|_VLTjShS8nzJSS+c ztpI7K#j1u~15&JbL!r^Q(Qd5%f9$>acU;MJ=lOD9ulnWx|Q_IGfupPZOvR60U0GzVXb0&u{~F9C!RXsZBKD!Qo*nx1P0=o44+EAY9O z((r*2>x~+cQIBw$;+sGtIEyoPJ<QeF@bgzsxca9AUWgXGn$ehZ-Q9i zC8t?bbyLHDO!FdRP87{}eRkG95wTo2uk>IEVCLi1!| z*Rnl$FLGctq4fIl-8me^Vr8{PlP<=HSR~os=k@jjcUt7^#Y}?K0J7?T;ZhY@o2cC) zo!>n}z<5xM=ICb^fUV_<1veaO<0*4YPWWediJ;UYDrGtbyAU#iVHHJghlX|VxC3w= z^b>d_z!8K^fORa>T$*0MKtV5E$bc{`0bD4Jq+!$}FPJ6p18-r$!~S7Rr=>~|yJeLg zNtVmv$i$F@4Tw<#?yLs53PKS*YA|)SL`D(m5?iWOXe~KUK8Vrhhf- z_VSf-9uF0UJ=)9;Db1v4o7^L6C^|ra>6w6)%dF?tDMZjxK?GM8|JtxclY`cj(EN7ONAI>^tXyN z!OQ*R2j4z4J~TSmhx2K%kd4Z~TV=bnTVk#-7WVhXo;@{l{_MNYe{$HJKB3VSn-dha zxH5nF+J%Y9u|%9Rs@SE5SsAqqFRF4d$~H8FO8LmKgG=-COR2fhzQdt_t6gBnRaqMO z;*~3)?$E*UFo%oi90j2pLZJ@nbODTFrH{S*?4=9uer)uBH_(fXSz4$%uf=ne*J^I@ zAHVbZ%g-I>R1$PVH!3rmm7A9aORPEZNZ0rX1atf1naO89Rg-Zr7?G9;Jyg%lzT4d& z?HR<;G9QVNJAiebNhz*sxX&cJ!jSmUlSfXUe`joTvMbEC)13$dS&Wv+jEQNeTv^x&(KwJ29VAx_81N9`Lx_%mXMPi9;Hh=K&OjCOwDk?jINL1F1=bn3x zqbIOR^M=W6ygaNUyH04Gm9G7SyCIM*9({4PYKU zdi47B>zG||&*7}+9SYX&ION#;+{M%H=Tj-H=Mn>Otn*r>*UA3=$>S$_Po4-@@%TXA z%ExniAQO}W4`M99l}w#(pmlrV2YF*)0dAp0Pw&D_WEFH34Rh2iCnS}%i#!^Ezp#|b zSrt5Hve{%0asySF?en$nzW!?k=B$zf4Bj9cS-}X0OahU3sDfB*i0L)NQPfLts0L{R z$?op?8`(-V&aH;p3V=VC+>kjy2KyPCGiU>@)5T0WBa5CJvYkze8CSbwIa+rbDDeU4 z0Wm?|$tz?UmPa6Y{``4zL6=jlTly2V)(2zPG@~3>zn&=-$)^OEuH+O`n!9jz?$&In zAZrD_^9=e0ynZ;AC7L*V=-Ai{oJ$4>p2aYE|EXh$Z(4~dwwO-MU!A>nIlG=lTAG21 zmasGpiFWn$KK1O2N*a#CFN~-Rw@k}`D=TCg<11OOyng4xnR|C`VrkR3jfp=N=Ehi* z>Mky27(FyTa|%z)K(OxjWZ-g5&<`dL@U9qOWOUGw=NeefmgeTqoIaag%V53J$s6c1 zz`|ouC)PJQI>YX;fnn4wc7n@e93pdp(lm>6>mr}|%x4Todl`4ExQ*W`)QmTw&grQ! zO&wUzWxEqP0Tu;Nt4)kLj$pL3!1-YLY&a5ML6S^#iM}lKBnx(AuOm=i2DH?K?chHb8W>^(Lfnf=aX$f{swtc8+ET>_puGV6So=kRe za%Q03-pwSm(TQEhjNnNQ$Z;tC<~P52^5jWcTSrz?IhK+hGGh{{6tbCXmuD|uSX)n# ziiLKlPIgnAsgv<&GWN{NF9t`4gCV>s6bv;M_kRt8v$M0WzWS;-%BOxm#hxNE_yHN= zDKauTv9_M;?TTu}fq`3l9njh^Rp2G3)khRC!pd4|aPp{}%~;5cc!NVDnd}0qF7aLr z7Md$KO~=Ur=@u|_4?H- z7tZI?>zK^>PHRi-)pYy%4~!jr>e=Um!y_KTgch^is#O4V)Wrfb3^+bM-YUt%y$b~r zgM*A1{KXU83C6m95E+Q!FdCPrp}Cb3M#gA?li=2KJ=ppk$3(l53{vSVmf{5dMeuH+ zEO&CCfLB>F&Eq-OU`K%AhkOcqsDhv(GZ_OioM!SlCcYrwr~+4j@Y~dNXng zoh&UQ46~{E3zx24yH?0DyOAFhsOF#>6unp?IW%@)`jwA)@+}$tW0d93Ets%q!P8(H5%-oU`XbU=uT|X@qfT9-= zzpXSg9^oZ`p9i`$YCJ^~HX#~JpiVG7VTfVEjtlMD+O?m5Fn{|NCQgcKSQXrKkkz|i zd2amZNp`r3z+_~0$Lts{yB0`&ar9ey9mE4)Pf0$anhq?|x7YHD|0=Kltw7uP)vLV&F4e(3#ZwhNX!#k?6sr$0lBUi8qiv zEmz~MATgl2MA#FfS=P?)CJ4Z8JR4W?OMsAe0xx^E@DWrFjeBND)JVXx99veQNLd*u zky)bC(>B$%^+u z%FW+6G}7Ofh_RH4VjYPl3l&b92q0Z?z9@1=RW!zUIX*nFI)B@%;O4;G5*8WFfOT}!pBFh~X3Y0cYU1hM1phlqdFjr2-BAU}y*c53SJ&^3|>COa7xxv0< zF3a2oMJg^EEMB|ecrI7Mn&f7KrJ=#@?E0dYUw4O8pnNbbVqD8#zkP9VuwQc<4jsVD z2ftI?eB^BvC}`Ibq*Y|9n@mP;-kgOTG@nQ0XqLzrsa3~ImoLmrA9je=q!Wr^bppR3 zOggNTmoObyN?qOYfu8u%!fje;@U;;gp8`%+SX*8^GBt`nvdFZ|vI*3V98@2*|B9WJ zH4Ha_@sa*37tXPCf!8d)v}kC65n$e&yVBK%s*!{C=mNG#HZ(;d@vdYaiX01T2*y~P zjYpNG)jL&P=+`#TehF9S*u||`fCUZ7-jK)m-0_3skhaBa{vbvY!=M6^O(%}&aO&{% ziKj1Izg6aZ7GUbwjEXQ9m1+SFL(P>+oG(7VusC~XVdUT;@MQ+UgEJ@Y-dlzh*|&v# zndLyh>;ABTh?yy5WhHg{_T13HNjaggcc~UQeDoOi`4mcA6pl__U~&TV7;E^;2kgRW<&V+Fb+e~xCXOegZM_VDimtxuiQQ~eL`jk z>Clyd$%ExubTx&aDYNgWCP**=6GKdpjNB}A;1(25zJ2<_fvKaKHIcFAjUPFBZ#A96 z`JRzq{wk3yNAQHG6kqfx#~W~<%4()~arV|EDmbP~oux}qUIR8zGWa}b&+GEq^JZpd zn0gsPcPgZktKPc)dNc?GsZoG$GD=anT;TNDC z488rq+3AzdD8B|ZFFrgLN%mg3ImcQA26{Gr1R`-P!ft7lofw?_7|xfxpS^eH=!s_q zeb~KL?mv8p<1+6qrL+|krz#{SQwX#GM4{6KvITZfBu<~bc=FVeRA z|J>=H^u|ilQ~if0`zObG_?tO2d~$khxIa|RFaM9P{y$!JNtd{~V+yGmNW-^mN75)< z%l+hy*Heqv#s_25lLLnj^v@g|K6GGkVyJ6+qQ5ItxO(m zW(<)Z!+`Fjk|M$)eX+z561Nua-kM+PKR6Av7(Q??m_G5$oyEm0yPVwmKQ}65^N6@G z$Kx}8A^u`r-M8nLuicm*JBDqY$)u(wM^8O-^~UWC=S#%9b)zGr0qQBfR@xU4NF;j# zk>tX?)$6zCdnb=TAgIFiR#O6hLYLsPhahW|n@}OOE?l_4xP!f;WjR<*b&)d*G#AgF z{n7UWsii)zFc>NiayfW_Yr$)M!E&;cEiK=LBfPseg=@)wrzndY(Y%3?+DWAtaqiw- z=T-4jamIrMimF4w*hW;P7y9KG0WWyy=*i31?|7jEhf8^}WC3?>*o;kJIKQDn z$?#FRx3+%$&O-mpacT20%^4@Br*Gd|9Orimp2V06+jqL_t(?5QL9GU}_+05C<5M#~aM`!9V{noL%qo%HutWkwm0B zR2__Z-L(?BM1FbxZ~yyOYYPjosxy%`a{DME;?_UFBWt2)4nWBo=tp*J?=C;e_&GIo zboRzvCdY!AmhGX>XcXa!p&Mkf@`yROc<%hQ!QshJEG{*Q9*3jD2PUsvzX7{pe`M*5 zQC{n;I;dBBD@5<%=$Q-G2gjzou7tLOqjGe0O&&gWdG>ZNnnX*%st5}+^q@uPa5+u_ z3?&QI@RZi1#)<6xw_N#t5*f^%VUoU-F!GHwta%k!hz6dMJ%XAasp??Nf z9AjZ0=YZ)jNa9N6{NKJlckY8kAw3cg4#vEmV6hhw8mta;J4!J-ck#?W{MSGC3TbUd zL0yy#tK&~~QJf=eez`gM)TvW8((%azf(?21>%o@q4Laeqz(RGbBMvapyK{Fj$T?u_ zg44+#F|8fzJ|E}*8DfJ`?9I2{KXU9DsSS*S+|bb98}A;vc6$k9Fe^RSh=F!B6lZUR zfDov192^^n?%Z3MnmJ~(EKW;^jZTz8(HnD1%no37oil*9KtzXhLj?%usux9Uzw^Pl z@q2! z9E=8Y@BiaBy-?b#(FQh~(r~V~;fxpjh>PjDtM9$_gX2fW4<6{78tpql^QU;lR;agQAynz{zH>P{XLNb zqdf)#1ocYwg1r%~NO*^|%JQ4v`KxF(_1v+^gQGpCj!qsu zIDG2Z!IMWPj*RymJ=kB(FTVcO|KhFP)t)o;eWze}v|rL8PEl*AMxb_+uuT9KWg7n|Pf}`A zaT-Oeaeic{wsyX?f&j7$^oiJAfFh4 zd*viE?;EJu()f}4X6Yo@0s$MI42k@1;IR(cM)Iada3>qEB#fLhIMz*LJ@zcKdbk{X zN9l8jCb|uqj6C=|Y0VO?K}(G=ICYDpuo{!sk(M*s@+?J?k=Sp7VkDwmhOl~&sn7mQ z@KU%b3-pN69J%^MIFV+>DGJ>&i0RBI8L2g1^M)xmMa$QneO{`|EieNjH$K*f-1;DI ztzZ+4E0ZTHn@5zLFqB_K`_-hBagz36ho?zuxD<}Ms3eOoG`GP^X?GJIIAcDwiW@N< z$4{0DSXD;JlPfH{m8=taJoU(Umz_DlDaeuhr!RGD2al65c70ZgpiP5e-#}%x>7<7e{|( z7Q@gMrv+JzQaxMpL)`o8RI}6tk6Z^G+E{s_nriihw{;^N~ z+Hd^q{RHu_UVCX=)e2J~^b{?fhgfBWx-lC-1N?bQ;MVy%+xiQ}D{?+SU5Qeh|-ndtBP z;TzxaQfr7j=7*3hS_;z(!msq|BpZg|g29DOGk?R9GlWu%(s5(^>MVR*p?gy#40)^S5t0@!gzxQ=FqA z@qx9IB8{B*-IQWi4=uVZ;2?n`v6p}CGuLm;-@d!NhJ~!k9ah1$TxBh*DKxf`^?c>Z z_4!vm^&8y>j|puNs4vCD=^cK5;+@)cbT>9_- z;{WT3gcHG9B3SK?1d^dDc*J)!9;(KRxkxcTG0>A+UAl7`Tfc#~1&~n%G=$NA*b;UZ zJtCr+id4?O|FdFlZKyXs*u$Y4g?Pm4PKMcK%Y-w|83ncc^Uuu0!==j~yrn(JPAOp2 zP$61mEoJ6!{PRD3=h(r~0d_-#YdCFp#e!sy1j^w6XNE)$jPyKp{LouJ`bW1Pg&SrN zG_t;_3HbYX#W(-{uR~seeKxr0aj8zCn;jY)ZV~M1fo!Y8eTfr?$Fi$87f-+Ct2|`& z13hx*gxY9gf{c>AmAeaXeeZkykzgNVB4bvt9D?S>1^H;T)Ef>AaG`86!ZMgnO9XVK zf`&OKp!fwZj5~P08lBr4`%2XhGVb^-1!61;0zrlkE?|swjZD4#@~7T@`^?IEKDAy* zXUdr@@<(gj?k{lt_VQY`_WOVGXI{7mWkMqe+Dz}*%b$4l^!eF^ z6iMgGEO6YKTg?60pF?aqr#_ z-}r~2t|Y7dkUckJ#DkpUR8E9FZl{U|%FL6;db{5F&bOt5HG&FhNF-vW;5KUxBK`dc zKn68mEQGZWsJ`&ZPu}?;l`UdtE+ejAN=Y6hR&!_Q6ei6 z8=iRK74(hsnQSGKE#yNWIb07GQed^q2Kv8|H0#6JQ;VS=0EVr zJ-57G=o%b9`m3LZb6L8?k_e9+dH&_0p~-XSXN#o}D9h%`YndY01pIIfa%XHQ>-6?JjYw3KJnUs^0mx z-zl%HBs_N6VbU%HikV2Yi0^bFREbmzUE%6*GTs#pTzK>Q!T|m>4m*lBSl;3*D=TQ8 z40bjYf>4tHTiRxc@B7=xr2zzCAz2#jdj8{|x;@9j*c|#8+vGACCN9x*ri6S%9xkrs zXXjR*fBDscqb&NufFMG`?U`Tw++wPF`Q{QFg!aY~8p5^+krq$@Dk$MM=T?)0*actI zWD8-)<>yY{xHgxXy>XWlq6+y+Hd9WgbJ+~1PH_8BaBZ!K0`uaFpBTm@jqyiE8L+2N z%0H_Q_wC)51UAY5J>DDf6Q`j5{(dwEq-X;gbs39|TqQ^iVEwiH(L6#^7 zH4nd##ieUAM~35^BT?oY5AOFVCL)~FhcZ&_N+c_V{KWWZCbP79>#7`|wdBm{pA?Sf z!}3f1%^&>Zv!{-6ls%4FOg!0AhD(j?EUpfwhy&e`!xN*Y-~Oqr|FXQO=gupG63UC$ zE^;o&spE$NwH*2q8*L~*77Z~+<`N6eq3%t_4vvqlE-hqPDyB@9tjJDZ#BUlcF8}C_ zv4PldKR2!y@FHd@4od=)CMLz~2a3i*Bg4JNW)8ml_K&@CP8+ixB%lPk**#5m_5=R) z16>T%md>6)mAxO)h|ZUEB)xqv5c*udJh1bs*QB&Ga^B~>NBb#On0Kw{VkrGldmYZla&!8&+s z3*kL|Xo*Lglx>vP%uU!mHRs`sFS!FV9?BHwHqbcN4P}xU*;z;#lVtwiCNlo8U#K+QvyQ=r2Vw8+DtyD5@SBjd3!Pkpq1Q(+C2{8RJ~*Wc;D( zx~ZBcohQVcO097sUXe!2s0ESRcxu$L>woJL>|LivvtrMb&H~07$@x&)^@^7eJHa`p zKgzA*93sXp05$>PL`K7aw%cB>84-vDrrMm9j3>%mf6Jti6HRr}WkBwQHMj9cKDpc> zqw~tLX(_m}*sFcI2)R@!#yLN(R7xhh*W_cs#^*fVv`p%=99E*m1gzEg1v8qvty`*B zQ+Kx)2-jR3olycX9P0`irpo9RsH+s@4Dsw{;?M@&+?`&|;w}y`;U@&CiH1<}YRoX% zG#BQ=1;|C$t*L0862j1rhRO!3a4=cmC~oE)ES@v2>b7}@A6lSlI7ADDNp!}==y+dp z?iMbxEHP*qgxz=CyurHFCS$1zoXv{8nxpzqr*$P1*UiU>NHn5o z);O7gdac*M?fz;L=%5$uI^^;?8qA|{IB^0^qp22e`Rf#5ho5bqjnjZ0D5jv1BPTwa z=;E?b%!=$oFIEe9N6;gh$f=)$BU4i|C%xz(rxRl{5;Hrng-4Em_P74%=IpzJhJA|Ibxgnx9-CI&+?(q5F8NPP$%<|G2BS*24Ri|J! zZrg|@yZc9f<@sY?*T@E2ZfpF5zAz{ONmI}<_Hq9Vm;8j{TKw36pcD=M+-gFla{26; z@xg&Gx*dmIYTqb_Nu#Q9=wb-Nm3jOKtK~c_9t%fcL>OgY zcyjvKq3DntL$Ep`*f=CK!G6h}$v^ma|M}|qcMI8-m9;e%g#sK@0bG!iGBw$!0 z!4IpMN}aTIFZkS=IJfq*@eBq*rTtD%Pjj&kkVN3v9IugT z{mK7+_ybqLTOgtDrI(rg!+cYrjMT*oIK`LVr(R4t{&7$;4IiAe(z8I zkJ)qYtfyDltN=Q~96+qICqW_(N<%Na^y%b*!`Sk06$E2yH?z_QLl=A?d_nL44IB{% zfRw}=loG6F*REZYv~(}|%1(8vaU(s#pRMJ^YC4rnu+N_D861iQ?sO4tgbR+e7opM> ztR$mx4xqx;aO>LTiD#dK5?QPZFe4SEQX2eB+i~L6Nw~x{aBB-l3BYQ(B%8W@^XfB4 z4<*^|TrNNplG}Ln$WIDy{3=5on`4g8OrF1T>DW`Jyl&Gx7{k;L9E8uEeeYv0KcCcY zmmd;Ysp0pcpe)XyfQnVp1adMag-?|;{@GAIcCf%tt@c(&&13`G|Z^N z=J|LI-8mB1e#{C-6-CRX3@3daJ3M~p=EZ?$v4t|vr$hXYlu_Sk2(V4Bt-BWtd59gp zRIIHrh+?GK(?=GeoukY2?A`-?Jjbpf!_QZ!@c=lPZ zdjg{d<0Z_)s2#@QIQ`tGdqyr_xq-wk7nh~77T19l+b*z1b@%sv>eCu@`tI`mZQer14V2)Yi z$~X?4Oh6DdFJG<9&&^HE%vif1Y`=ZdP%(Dl$$RhK!<>fSu*z*v1+)cNpgt_cVTsG| z_*2imI?#LV>V@0)R-!d-GsmdQekrjN1Lov`!%x5Rlo#stLft5M?lgD?tWfWvr#|~z zz1J?ij}We4uV?SNn}Kq|CE)7n8-C`c=e_s{248nNh(;`EbNKixzuGf;>-u}w@2<*S zg9A5_>6JX%8I#qnp2?R^eZ=b;D#z{;ibKR-A+%u(?=n@;x}cs*#6<$N43oOG%`mdux`u}CPF&8KeN?mNkHI2>!N z3~*gZrifH37#X=VmD(BU2)k(P10>;c63&$6o|qSZ^AF~)oVju1Vx^izXJA_pB9rw) z6ks&v$*JR`2TysSBsXQyRh>P}L6ggMm>+U^p zV0!GCPdeoubYRnjJlUQ!^77~Z-QCMq7Upj*-nhk;B^*~!TuxWH@*vV%jaR2;o*JE* zMx^0~fM2IfR|Wb9ZhqHZXSh_`&I8 z(l=lzH>d=;0U;duoj?B1cP_s>cW0JqWD!SBP1eve^Tnz+FgSYb_;laYK~{HA`|CO* zs*O$49#DV+Km!*DH64$x!FsHkaP9=10JD4|kW!Bt4Phmisp@63xm*(=gteLZf z5DK%{6E5k*PTW#BOOZ=uDxpN8Ka#w3<(`+%YSTcQK=ZeJ0j;nAVqYH|z(mwU^o*5) zt;xmtn>YKI&ofIe7b9$GLq7|18XH@RW#)D+i6wl;Sz5n0cW3;-I3onPjW>)QtkUhZ zZ)r^DahDKass-X0hD4B#>DX;HG{jPFo4}L{QR$UgDB+L^M2=7llAYBD%bHvd^#%hS zh;!fCl%!nSsHlC~&X=7aAii_J6c%N-(yhG0;}$kW4p}i8?(vX?@ts9qaxlW}5FAg$ zcWH!9Dq-9~ydpb{2Q4e*BWd_UPtrOV3W4)sbv9Vpn20c8!b(eeEhLB}v`pldhwB43 zpQPVfLG@9)xvhR2@w=t_Q~8y}|A%yHB{1Z#8E{Iu ztI*zP(Iz`hXKsKl%LJ7_Cd7c8`GO|AIzmB9x;SZRySO11#}Hl1qvF(YZa0cVbJ?^` zL6n^nZ)uj9Q5ErOs_LMC5N?z}jHO!V5vH;=hfzN@tLDb!BHqecBn)teRS07<_ELFv zIJn|2E@yS^i@`L2F;P=@CUEBDt07q?*9e1`4!v@R62xLXm;;!5bNO4Lkmeq9+HX3# z!-UfznX)+jncn3&Mb|A~A;}#4!6kGDElN$0DjVheLg!T~v#>g@8Hg;O!Yf83T8bfw zNoP%#P2KxM5i}U&W7^;=)?)cob5bqL=A5np1Nr&HJe=vcZk;WQU9|3kenOF=9q3In zH!S!Ochi@Vhgz^LfJX-+m`4|9p&`*_;3Lv>f%gQPHW2aOZz~DZK#^Oi4_HtVRKUDL z8b21IYO&|QM9-kR^M`NnjsODa7@pM$cfA5?6`MxoY{3_Kc_~_K}v2IQ@;rbDM z1rwlA{0?8?fe47@P(TjTz<@+J(iI&zaPkvF&Q9-|LZM1eYiJX0M>K%z0qdRJ-l+$D zV<56$h=lC;_F|$M80}Y6X|v{LS}v=D%1NTN>w1Thaq)RP%(iPs)vB!#ehn zmu374Fh{mOh#sUM#K0Y>;ZKS(BR3X#b)7jzVqoT1KJIY@igJNj$^sEuG{R+C8KoM^ zM$AS=xR!g-@Ys)!P97QZnm$8=a4^pOlN24_id%W}*v)GoLBhqx3x_@gx~cZ8&GD4bw<@X$wIJoMaiUI}vy$L_9(+rU~4%P;6-t%sRj*skLNA<9K7VgpBB z{HSc50X#uqbI|7)0lY&efuwtb*pV`@44@aJfe12oGwkZYGuxGT1v%^=d)+JZTUzzlzJdAa`>83i=pPoP*X zheTwVi-F;bmoCSy#vQe6TcBx-rnm_rMs$h^hc)A@H#YSwN1xMb{#P4RyOYB-iS6oO z6EPCjQY&mBjfOd5G;!cCwks6}sn8!UFPev6q{wm7v7>pnM1WAEY~)rT#Lc{ipBX*s zCe>Kg3#^?8R8T3Bp8=hWfKd0ZN{{1*z8m8n*L3e+nib$1XAi?C#m88&0N$L|z;S4KN)6;4{kkxO!wZ?a>gK`3?39AON zw5W5}uN+=nYGOcKkb!K&;l${~(__#YPF|Q15lqNcttM*(L6RVaxFG{=l18BPDX&;9 zj_GlP^JhViD^L0c7-t^giq`f8fE3smE}O|(ECilqYg z>|u#S6mUghm@|dAl_3yVT3qNmc|w2^nQfHgC|BjNhKH^KgVTmODxFZ(w}%e!3`e5M z`MOdYJMq-m$!P&r?2j(T><09~Tr3?DM_a&Uq+X>6jPa;a9y#&M$nmEf1k`aCz&`23 zM;b)rFJ_7bU9|PU$bl2j9XN5q3$A*#jAJ#}mKzRP^0wODkON$uUehR+e>dRM^s;r- zU}fJ-d;vf=-pb&}K0g=|)!GtZBPPN$j#+}b##XF!Iu?xvxt#<}Pis6lctk6l9EVk% z^9s3$EE37EY^h5KLoBBa42^0I>sFwxOv4v61GR9fwJ!&VSbO}@(9-Yv*CxPq%|ZMp zmg6Tn@yzoRPd{ImpW06eC&FzkF`-3pEPfo8WX^*zSYFlK$*lyD11CQsT!c8~0G)wT z8FF42Pa2cGY@#DOms?2vgV;eph~Y27I^9Pmo_v0Gl@P{y?=9|u$(Gt{)SHFU5sT3;#g0^xiNQXe8 z%V&%h)1nHC0UQB{NcD0V-5g+C*wi=@K@c?CNJsr?6;wD{h*tuz`YuscMf?tSL2D6= zD=dOCJWI!e-8AuxX0vHLO}N+vDYm|<`;mZ@o3Jy0Ns_xMCzcA3g>9D?a@V5_5Y9Ey=-jaJb4r$fi#fEAp|Fp5O=>9xDvKnW3ys05J^x)h9%5IUn|(O zeP`AG z3w2_sSJ3%qA0PnApcxabpuoDw%xp9l~7=3_(8NG_pC3phFnP;Mu3tOR?A1g6x?Ux>Jj%=E25 ziGID}e_m*1j-kh?29N|kG)xAWE9s~lN^nCyQki;Cl6aI-NJ*}dWJ2`v!bw+O3(+^x zjZ4$`QB~4i_@yLG-CU-|=S}}If-rnzRM4WA=2bd}id>qS>GoF!J`T2(sBEYT)*g)q zP_eVE;U-UvesO}9oZ_YZxPb;i4|uRBa+HAv1nhMh9X49K5u;T5+tj+YD%ixYtfR4x)QS3`{&`B3-x*%062Urqhm}jdwZyEJf zU`*C@%${X7{Z@5qFmJhNhZCtliyoNGAlI?Nu-%SZbVI0#ti>ZBnBqzpKr>v15KPoE z)jU>4z$mCfEX>$Mg*fjr!Qj3+NDzHLh%Z%%hN@(!%8Sved?;JwP!yCAh=J@73N{q< z6)zjqR9{rfZ8RLrNkcG7y4)lpB~w5SLsMyD5?RyejI%na_n?sD9pwyDb$1$|En*1- zyX^EN3w`3OHVOn_HLfi}^|k3bL$R?OO%CEVQ3-LPG77JX=c)oya$be_$c^}{?&DZo z!e_dfVkm!0u!#ZGh_-HbHZY|526V7|&lD=~Tz*ZbWhKsCiRp;E}ZTFUn=lDeBHcmNprUG-WrZ5c@;>mbJ)y0KbTjIZq)`^!go5sL^g7rbFZ+cw zaFM98Q*!nDXXn-Sq&Oo7?K#la)>7! z77&e7N+EhEL@V6p*9~zqxDLV`l87u8rf(^HETrKsQ(OG#=O zBP}3cmd?P*v9Cq07(fc;S7OOgAfv46EwTxKv8szv8|}n7z~c7CcwH(Cma;5JvPn#; zi_TqgYX^*b?0eOr^UPkcau)}Rg+!WjlYx;F=n{gmh~bRJN=ySU22YKf1bgr!^>4fQ zM~G{H&)tRyFn2Y#q)LI&Wmmt2_hg-Dw~%ffef8EjS?A&cIm?+Yt}M;`QctXZTmsc) z@9KTE2g_{n-ToSs42=8&($l9;kB*KyDp4TFG0P&amxWWjiqf$O*1y(;TWCuhtFQg#z5bfGLP^1?^39pg^xv zhLwX08g;=lC#doy<=H92LZCqA{1&B_uoCe6^Uo83C6S*Sq{6F*4z3Tb=~5dSnnN>G z4Sn`TE5wb!`(PJWn>C(y`nL)1l0FUA0zXaI_lvd~U3tw7;YFPqU2LcN8sC6bBdUJ| zI;6R5^#RUmy>#I`*X|kLNuC0E_{eIH215C@?&2~G!xT_%#F)z2T}UCJa1qwx{5r;t zP&Le*V5MR=9utUMHY+x{cOXSosOPcj!_tDq`Sk}uKjMGdMMP!R#h^>wU=5asn}E5c zpeB$45nO=u-Eu0rnV~{;qNtZa27gFYE_)5`Mipx+**KA(=_SHvnW{gpEu<|!$7@Fh!*h8%pnl%JU;o<=fRRJpTW z&Cxs`zh0bQ7$emQS!ng%eXUcAd&3u1Dl2Nh4QWcI>2@YoUJT$R#f`ffdT@@B(Lxqq zZc3~axg-et2qSTb8&HZ)2_x-Q7xsDV7bED3*}+ojbWPsP|$pdCdpzW$wafHBgn&p1|WqPV-P}6 ze3oerCx$b)pgwt}vYJdXFVBj20B1XvH1QN-0jz{>gLlIHs6n8q&$x-mfX>mx0Nf#m zrXW4twAe`ruN?JqaS#*=Wo0zNLLBAzAv%H$fz`kQXBSA7FJmpxQk0Y&iPAbpFjfLK zpv0R{NS<>}Z)XrBzutDjo*-b1W*jGabQZ?8C81#APKDY51)-~gA}n!WO5RE`qCtR~ zd_(Mz^!><(swv0DL29h4Ol|A#d}s>+5NE-(8sb1zsC1m+bPuVT1jK~?;Ru{sFvpSZ zT1(Rv?i^89;S@WjhVY(nM^3CG@Mu*Rg}`FcM=dW;ZXD$!es)M8tp{LA<3}!_uqTm+WSFcW1O!116PzS3ypMb!B@-xSIT+dsZtsryx`5{C4rSMT4^8rH7L~!u!V%Sfud}2l@z2mQN^n0 zMr*0dd0O$QVfb%Rnzg*Xe&XtFNIjvO3_KF73)Zj}rMNbzYS&Y1gDBW7Tk}}&QKS1j z`Y$Ps+F1h!{yTS>lr)0|GhMe!gm!38%SLuyHbcOu(fk9;)_A|pr#dcOB@Gb9b64zY47<`YQQue0w_ z7kLRpER7M8<)S@cmWp^JVzV|sF%f%XiJL3+~e7`VsXGie>%7b+bA9V~*d4ZFb)pu!2^^s)_9eCH;SpcC#30T5wnKmqeT{Cu0UXiMl$Xm3ut+c-5M zu*?y4w|P40+Cae2XB4Nm`>MA+rKH8F(bc7Y%?8DyER}BE$?Ju z>>~gN>?H6VMhqTznu57x036^uz+*QN6p3RM_7Oo*P{IyFDk;r$11d>oEbnLU;AIaG zFdLLH1=#^?-Hg_{kAomK4^o$4Q67X1|Yd(1Q+W%UY$fPmWt5`2jAmbEW2d=Q>!|_ zC0E-yHYb(KTsI9nG4DtOenu7menf#U`L`5i$e=O2$L`DdzwALEv|E+}@v(?@%~C0S z;W{zxSpdh{b1UM&;?F-+O-f{ryP$7NoqD8Rb`p^cQc^bON~&8OY&*R!wuY%~LwV854xALz-j2J5k)O-93D2bB*5!yOW1 zy1jPo+71c!>l4EUby1odBiSTX15|-6U4GQSr2P7z^CZF-!GcuT)U(Fu zv+3pi9(*jI1~3SkA6#eLxgXee`tF0&k6G_H!z5QTaibNdhnBdL38!Hd z0(WhcqU2Bl?A$2FFQ6XbE4P3iI&_ec7ZVMSVAC&PYwFJP{UE^b1n!I@03(m!k<Qs% zycdf0@TsqHR^pb9LPEWL-Pw#b>r?END_6epja7~htzSL2DYc{l4In78Sp%TK+>?%7 zu#his2^oMofBt;_FN^EpHJX7qDVM6rM5MN!>RMZ6nWTQM=BDx=-^0F{iNOKjz)TN& ziqElV67bB02)LHox$y!T<`I^%o!$h8lMJdN;D@b3+%M-Fajgu{WYv!Hxnc!8VP|bO zF(D--D0|&@rqP0z?aagw5Qm>|U_PyPnC0M<4UwdxiUp1_;gfoYA=p1k!&Tw`(0CG% z9$T`#R7-SpDIL)v)b^4DB;)=)zma|k7NH^lMSWO)F%ugAfu;cm`z{?P4TX~40amRB z_@o5NC5}GP=Xx8q6z)Hg1b`dG`!73@hlGFt?~sBKq>y{GK0!jg8v+r0)N79q+5Qi6 zc7q{cBtocfsebejLB(m_I!e{;B*UX`%M((A@q(@rY8W(usvqCqPEv!>%>EYpi&L7$ zTsPxlOz{;S;Mr~*-?`;vWLV^vVs}p2`MMhj08GfwBnD7r(HBj3HyQVwhFYKpe$k9j zY8tB6p3D2NXEa8TuIPNA%+UXcgjF-FQU6eV!6V@@RM%YflN$FSiAn}|>yP)*#2*Y8 zeidYc4dp@73HuEJ&}U=%BjpSD9j@Q+wtuM0^YP^?Esqo&MkL^|K1r`OSpJdL`h*s4 z>|pZEv>K*h+`IFIFMPrEc*7=R00?AP&VKJZrNz6^Pz`fixulEEk%!z4t)pd0(IbeP_9ZN}$6_nW-gVkXvO zxwLfg+?{u_WW>q^24O2kPqw?atn15P{<4Re7_*w($4P5Ndhg!MUbt`p3{qXu9bUmn zxHN!vAkS^p7Uc@VD~7~ZU;XsK1Zt0zSOOJuPCVzSU%8|s>01W!uC1-%^U1t&eKmFD z$dTXw{ZGr>S+Bo2lcf5efA|01^2#U}(c~HvrWrJz^V3hBJoy{{_BXv4KBaD2#1*65 znnvmyvzLGP{YSpo)!fQ$aclFo*AM=JZrdUZS&4&SF&M_su#OeSsaD(VT>vO1Puo>O z0!U?pKeaAFnVN`0cDx7cR*3O-%W_DjTI{e0XC}}!gY74!RxIU{i#UN|sfA#1jtH?g zM)WN!gr6;v-uXJ1gFbxjPkqkaG|RfTIg-6SzU2y*d&6KB@2<~Go5K3R2C zQCCmfPvS{Tppj&uG{=zAIGfAt#P?x-&b{}9R7XYBZuQ#l-i!Ot=k)OiU)T!DV*rU?>0_wtF;}s5 zv}%aBJvW0<6503!Lu}8G*;__~B)<#+_`yN*+soNUXj2s0g4LqYX8%UC?alu1yzvKa zjuB{y!F>k>1C418MjJeu3bF4MeRv8qB(j_a5+C(A4%X29>XU^J4~QR2PnjHH3-D#8 zAKBl^p%5$i5tYBct^MUHZHv-^1wrlK*P+U$&&K9pkieTA>7n zGS@ASab0hkdgmwDIkK-3PF3xjISD9(6@wIgrf9GQ?1?mE5%e-SG^4)i-xdt7lm;gB z(e?{lGo9-zRak-iT1DH+&KP1ixPp@P}{PwDDx4 zjG>`<+}Se>4NCk{AXmv-IiP0G1$Ht5z2C^(I1N#mT$isMz2!%r@ zk4Ht>_m2F67YL9a!=caAVtD)ohsG~>^IF1?q0rtxy0OXUL!7@tYf`5}RtoCqhdiX>K!!!#Kk)iOJZ( zV>56bG;XK_l34Q$m^+;@-7p@K3$T-g-)5b}#~tq}O#!D*{u;aJ{N_=M)ktU^jp1`lU-ZmNKP?7f9d=9xP{v z!mC}u;^e^*fv{SQMq{-K2N|~rK=zF{-l%k+%T=Q|l~=r?*Hy06iW7akEmAv7&;cl~ zSSWDD@0~k$UYmWbmW$Q`7#cW3q!RZk@!D#jurfV8#!5-Mc`Yd;p)lybc;(!?*E8!y zd6kzr>Npe*1xk@>E?i!nIe<~GyHu${??yc>o3y;V{6D|(ja)6s0T(P+K-);b%DDWj zaV!?KIW>{I6asg#s!^^;wuYmur{&*!@4dHwdL~iT-E=JjDJcY|!c0s8+DsuWQ+8fH z5Cnu8hZGEdjB_wMXJ~H(ecZQTsQ1rC@TJneRq!Dz$j6;UK%Z7?9(^#!bFkT56`bBd zXfr@|ANTP{7G}`^mQWvzcVEhGQi|E@Sfh&FRe<3p9TV`ifYew}egCrM&G+M`)#$BI5YAAugjkV#IA5ECc*jK?PaJSL0eKvr1uq!^e&zJKO8$KEY5mm4ZixU?QFQ*WL(EjNpQ4 zG7cj`W!uu621x>DX(+8km~dd&3V2&hQbr7vt^M_Ge}IoW3MWpDU=52fml)CvN@NyK zfdnFCWG#XBl!JoIzt$u&Vt8gISHfIl>TIjFiXH+uoYuY0$%Qj&bZZ+oa#9pQN5nP- zjG5TZF*ovOP^x#qc_ot&FrZThLv=@Gc%iLdYE!X~F513-4_+r71|kmJy2A~ZmQQ@? zUwfg0fQW;B-4EbNzxhA@FOuz0yp;n!jvqgM@}r;e0$pA$BCxR-aQDplcYmY=?kYTL zY3B|N9enn)pG_Qkw&ukHUZ{o#u$TMZkN&~)@-3?tGM%XuloTrE0|yTL*317oRO|9U zas8a-l2=>(=WqNs*SwZx=q1I_Dr!G*;>7ePzNE$z=sfNB%3dvh>*qgNpTA;IVnor` zR)KP`9DVM0zv2aZj2lQx3%~lK{}aeyrdE+^GCve8cv3Hf$Ja`w1gkvv+=*w7z7+ND zqd{K3ew{rXWG73jhIHQT3IYxnN(p&Ll(+yHWHK3t9LX=x1lBxpiTmuZZvq0uv57*M zB|)p8JZ{?WO3?TRx}9@!fVDCkQ_0sTx>XUKce{W<9UToW1|u9rWG`Fh2HjwpPeO+q zpOAp!cemVCtuYt~DJ0|EBa-pnetAjo%d~~o<~MxW^5`e-MD7a#Lz$rljREjtEH=r! zB`-iWq-=TbKav1J%?eyN%vxc^K=*RFgT%?g)%R1Oeq&1UT@CwarK*Y}Y>Uo;O;Fcl#Rz z1Y0ZB8ZD=~X$jMBNh7gJvohq)tyV2f}Aw)|QtVliOu6mMVT5&;F#2$wLm4kRFxzWL*+qXh6sQ52p?#9Hcp+l#{~ zE)tV-Is#ba6p%C|Iu2^bUeZSm?BRH%S`n+lQ@oLZ zC>+Ta^6_{=C)he^EC16r!Ri8ub})j`B;~Y8-g$i=2!KA5DnOE?AjM3y_sJ$P3GToK z?5vDL;9RrzE8=D}Xg7Q^Qn568bT&3anf;cW=q64^S&p5&rNAa@3HeNMRfK?*Fxa>) zj+BmWs7IQu9$jD!0*`lkL~x~A*4Do)U=UE4sgz^0(FeluXsyh3WoXhGVxp0_QZ$rj zs|DkA;}$d5J|}s$R3PZMkCO1EV$j*D$qrn$qn~-Oe!&_oj`h52*REloDpw1rAVsYE+O?nl^j=I?sDi#Ng*8tV zI_TR1oIN~(IDl-waM?TWyaR&Vpw4`wq%j(&k17+NTJ`n~Lsq+5Kz%40WH4S`UcK_} zyQylV6pF!m8uCN6P>^NCQaBX7c<~}7#BbfMa59>X<>x05uH|C++wgU}!bxWH{CGrX zF>!Nl@lLuJ!2OxFqc2taA|;lZLZMMMG_ucsJLOo(bb+uiHlYEV%6}+}>+Yq>)_3dE>^7pS}Gv zgcaLsBBf$)H7`q6a?|uoWw}6K;2>@K3WkZJf2ZDgXN~n*XS3#aVPOGpJ0jqvk*V|V z!$JUr0(E{bU%m`@Av}8{T6J&^Rx1M`4~pvRN%9r+fC44s;@eh%aVN~U!}g^JJG!*e zRYOaT1hEUu-&s@Tr2$k; zu!xo1`F$`5I2c+MV0&+2Veak%Sc*kt=hEHoa+#?`O8K6`n&fA^wY~=zBWYnSRv%Y$ zY1lcpxC#>4sz^D8BhbX;6AlH}Bzv%Sc2eID0`O5Pl{$U;v`JKc$zeM?VXH(b?=4&h zM_q|zrHuWewFOGZ3Po@=xODpT68o8B@vx_}Q^Qd%FfX%0ytK5!zT2;V{jZ9}EC^-K zD7CQvRRc8|gaURG9D`(MDN⪻^Nh8sxMioea)FQytnG^Gyx&g< zAY0)N=fMqS<25XwP`7jeHJU*X&rO_z!7YmDepcNnt33n|ozCz|+s1~!u z!oNspo9DYKoKj1MhE>|CCcS26&fLrhPHr`!wHQ<3p{R}l02+Y^*XiL)BzM3{wZsBP zBod{+)Eg+flWbjIDVd5kK&1w>DV=nUK$(PMxzx5Pjg))%w=+ZA1JIs?EDc%|-$J`k z7!mt@61w-;``}BiEiBEpT=rI%DtSgMj!g>Ig0WBxyJ00*ju+A?ccCXjjrJncTx642 z^!tHtKIJ4s(OM){E4g_BVg|pInum>8^Npskr43DAI??+>04^{k8+3pa)M!qMYzNZv zWo=F3D+C}KU<%Ht5|C=kz`(@NJQo-V*uV%e1QNs60ws3A@?szKtJog7f1^7o+CTtc zvQEPxgZ#QA+H8UQ4_IbjTBMlIt|as8jTPi=g%~ajwXiUh5DzBG;W%bi5eX}6EcqZi zYeD8A9Mu|%#UX{TM5)ZgUTM9$vRqhi&D7Kef#Ku}rBNESKy1B3!=)fwKvY7hQcTn` zYfe79eRFlI(6(l(1e(WehZc5)l9-7lOsauoFtQdX6qe_$3u-7@PO!D_D4m5wF%uVS zxJRM>$V@Cf9Q`9-Le*n`y0T-6EaVe~b>6T5zr8WMk?Moi-q?T%a6kd3aGF60v<*uo zFD)-G4bCl#>b%?o1nT2X<1QCuluem16EjQ3U?J>CI%#($@P}4f)`B6Jd&ZMcawsMf zBiS%7hPX{Pl*#2GfV!6M^jKiX{n*oq?oAo63B#5=gPr9fR>x5OKn_B-LKOZSR}SL60U9)7p=p+p$(W&Q}2EMA3QHx zf3x`?nnYk;z|lbLvPJ^D^xJ<}ttIWfwlr2UchCM{b#4|TDP7YD&?=A~D=_5u%CCLS z8#u_`TzUl6u`xF6W#0bQpOb(pa`}89+}coXr5-tQCBlA&Ye3a8?jDIt&8y% zrF35HI|L+erJzDExN=}slnu{Ov;3&EsOH?&9eC6_k!|g%ez!7fzs1G@C2S}Wrp57v<{bJ|e?}m4b zkI2bsNW!8s{2}+5Wzy*M>hJ`m= z-k5x^4@#l23oCX-F&>a;A2~iwgx^A)1FSbP~u2K%F(%jJHB{2aS z=KdZlio&tw0Ifk6l;*m&W?xE(pU&%keA983X6;0R(7 zz?&qaVU$c>g5|Pb#(h;#Usupym@Mc*Y&T zOPpT|#FfTT2)TeikNo;Z1i#GYi0Su@a`Z)`k2yy;p$FABr|-lc1_Fjn@Wpurj5|Dn zBNqU9okMx_V_WJnwEF1gD{-YKC|85YRe-(GRHrU0wv^R*+#3X(860^e)LGT?@M|Q~7%1KPw9fnV zh4bb*^+e-sLP3NUYU7UEaY#oR#i>)2K-fV7KxTYZ0%r{6(dANuICz_4DV!>rYgF~y zf*|0=3+6c#0^lr(dRy;a9mw{c8mWj}rbW9ZV9~|c_h3Q2b9$5+P&TP9uKgH#kbzX> zLu^~y^+Jtw6SOc(0t5#}a7p=ehhPRH+Nqv)!efMhh?C%q!k`I3Y2FFMIT_Yt4I7aQ zpM}_*x6>5+F*f2# zA{Y4Xx1>#Bo!2`-z(xyTNh0Eibs}}(w)%&!0GwbS?UAtowDQK@gFfl04!^u44Z$kn zmzf?$0U~}OM0Ows%ij6j0RjeF!yU?sDIY-9O}E{Y&v?!G+F0V2r8VPJ2_Ob1If*vV z$YInVS`?x|8e-6Y!k-dYem|*-*c8ArX?bPY`L#H|z5$LjbYzD{k{CGvfmwff+(=L9 zu6(~1|FOC1Ml+pjjB64Nu^KHstD5NfncxCcR`Tabu09PW0 zFphtL42%-&my&ne?oPTb5NO!)Fd;Fl8#WCCmarZN#C+Rv+Dv+vq>;?!^s^XR5zCzU zXw??2d(pXh&>i+o0 zk%}RmR|-zUv|KHJ%`T%++l>gEEl4s-;E}ann@w2TOlY($1c89=q)$$B;*k=JY&kaD zCOA|60Aiedn2#m6cg{_1)-hpm7r?N$`?L z@s>nNwk%7Uv1O0<#mq#^Ol-tN?Db+J_8)NUzMB{OVq4 z6bTXp2;#o_zPq~4%&e;Ye4nSf&;S|`Wmz67g2`qfD=YIke)sh}Y3H@|aXU_+XYZk3 z+0Q1z!j+GI(1Ly43y;vgm2a%(RF6S``2qU4$gj4?tz)?T95}GJ(1sefurv>H00Elf z5zg(&?T>%@{nr5j0`%#$r4x%{ivo*N&Mg;P8U{7Ljv;*7=A94#>_|nhKw8Ce8K2uo z!Yxl)CIDDKr@yjDyp11wn6)Qr))x1`)VJc4Mj!@SWdIi4+{{l9c*(EpX_1(W{nQX z2wiuiScOkz$FV`wRX61-0G90J_FbBeuz2r(DFZlL=zl zzXxWm1h5a%OK&9BeChURSe{vIZNK-EJ+ZVW8rw`JZJnmB8?v+)?>3&T(_{ej$W!*y zDhPYt?Rql_-~D>)KXhR`rA>QSd(MhDTI1jbjGJB;w>I$Uw~qw@V7v7`3u=@Fa*69e zxOc-%;xVQwUpb&RKJP{kv>?5+*+q{gd!)#NHko06qN%Ne`SkqbL*Os(~3bu}u@^QcsT)m9}>R2gahZTr&V#qYDch z+v&DV*^cy+Azix$%gp5#{k8R3zygH%QINq?&c{h8ro48R!vEH>0pjv zfrE4Xy4|4(s@#tqn9V(A7RE&C#+Ml<%uXYgX0~hN(`&yL2=GP_D1^hdE<~GSYiC@< zmE$c7TQ7ck{_BPS>`<%Ma@id4=j}VZ+T^Z1b~@~iW!cBx!y{X7<|_wS2Djoz_Xpf^gFU4|J1TSy zyg;LO-14hl_|&ZbRU!Br`aD$IqgSgt`3g1vhEC^`IqaT|W!V_9z&NedBH(*^`ZJWgd1VQ}7vpsZpdIR1~b*wGwjHzL7&{s6Ks6kC~(YBy3cN`EKy>r-m zIN=taG`!xu8*;KOTQa+&4P9WIJgY8|;bul^(3l;Z*n>tmxChF#=X~-kzuHeu>;U*` zT9P`jzjC7X6TJ5?me}FJdxjUS2;!5+VInnRW=hw#3qHF(+m9Z&;~L^gLhx<6q}i@NkQC(t%sfJz=W6m*jp?$XSsmwy2SfV1Up zi%?kO9!Q}rZnJmx+O;&mCkW-RU%?UQy?gFF!FYfw?9IIfdnb3^|LK)`LjY?)h)F1p z26xyfp`@T?9#uK%RL7>w-?l+80u4YCwAG^o@{wHUPzOdJ+pl!AZ6lvv{1qVpYxE2N z+mzMy5~QXYL#W;?J=?Y~kl4i+C?g_FMj7GB^Vo+b;K<{g^CP!y+rrk1kdqMeAUQJkhj+|W9XgVcEaPjAN$q}kKXOgOvDa=#;uU)`7*b)V83>| zfb_TYWeI!R-uVfz^jLtA+ncv-{2mttTd6G-ZD6<8QhQiI8Lb~v}%TvGX<-2Cj?$!|Na8h^(yps#f;l=Fd&zmvyDj?n(&nC?wujo>_ zB(7>QtUB$w<58-Botkn9hR1b>z5BgvnDE?Aowxorm0L}^$Zqni?KtJR4JY7rm94PK zhVmWmZ&5^P5DCh7Hj(5FuX=$FOFvRs-r?19DqbSs#jTxI%Hu}rC!7ZF;SN)s7Aey< zuO&@(QoQ7pD2#|T=|t?GkG3_{eK_+z*rPa%0qN~iahQygiBRL{I1Rl+pp!S<^K68N z6n2;g+fs(fL@VKhX17t}j3-iU2o5r6r`egB-op;LM;^DXx_PM)FEDHStwc8Fy{o{n z;rOXcGf{JSAreH4KI`x%L>^!f6EEz{wvssr(uo>QDh+t4TZfEOQS9?NA?@#v@6A6+ zKapqirA`8yfC~ID4jaCkjtL?1Msnw_d+}I`=hONLR#^g{j#FmiO=y=PJhSTDm54^{g zJE0%iAw8XPqQD6)^P=$5~Ro8A1wUV7=ng=1g90SxV z-d2QeQKIKGd2bvuG9@#eGz~M8b|)FR9``aSTJjl#-d#Y;Ehk}S$BdzyIBLMVl6?Uoq(k%=u#Mx0We+jr~tyNglHv!^`d+@zw>Er6x+KU$!(vJ z6Un}VJb0N6o5i=Aj$3yiND$G6YtuqaGVQn?^i88|%m(cE^MPjvZv>JD3rtfsl7_^H zXic~Rr@SV>XR4NN=sK%mRh$lTz}zWTge%RUJh}$p1;H5*C!89gB*#hftArer<_7qX zQE(fLa~p;lKQS`GVL_dZ-t^V<%_9=)+{_@WdP*;vfUS5ic0AZB>4d(M%yv>PH}M>O znS-Znm-a;2NGsF%Bw@&B_r~r1fuIpe&;^9adqtX!1~moER+vh7ASj9WCXVbSdCPpn z%RM&9!?x5z6J!J8uw!_kGe4m`Y^VC{IAIhfeI~%$rG*H%+(G(5v7%MRYS;m81w}xK ze|hC8m}UAdl#>d9fn~r>%+#@uger`-PI5jiU@U}yWy=Gwu4+2|Qql-tLMA+~WE(jecbwX8W zC9=GM#qGpxJUApf5CNVY&7_GoIi8#J+F?}Z-FHdP2duQ0k{)P`4dZ$3R)CK5 z&?e{+p@z}=Xy3G(#+*w)MIs1@65BRJp}=`hg}e+ZMh+D|}xAd#tXZvHemc*Gaj&}>HNi9Om4 z(+h~ybB20>PVBUbl6(3?P^Us8nYbrS7f?HGvYs~ud|+0dg-+-o;Y3ggf+YujLW2gb z0x#AB>mI})-NCFtsRaWvh{)(H9>p5w^>JWQMAjXP8L9@O3n>g%yWrQ#6F#Y5$Y!%W z8x(-spTLXcY8(ZLWU3YNR4+m69wui;RIZm`Aev2c1W!j3XR{)*u_CP!Ad3ul7R1CJ5gj1 zc5Vz1p=Jb{0W20?YZ=K8AAxJh06_!-@Z(NKh&7Ug;qaS43L!8o2nOs+g>pXw0}=er zbwsy^w%``NWf&+Ufv5PB@gTSuTLgK))||*C1V_ek^D)e6=0zs~6aXtF*B$IRT972m z>M0rl3?kIPaV1gh^f2k}b|9uKnn0Yb@D-2NHB>aLj*|1vhFt1 zsFkt6v8T22Dz}b)1NMnIBR*g*VW20ixtH)Dxf!L>K^sI~kQ>P})b@ zFeHQ-+Co5-_t;&U6bkoDMBb1 zH~dCd&=g(Ad4?UfJ*dU%(?FwQMwpZ+9b@~c5u>W;NLIJFT0?d7O=Zf|Fto;1`ry0^ zPp~7PiVC?0FuDWk+0m!r(tv3lLQ$g$mltsdn39np)zak=64{@ z$eER;G1-1%b2Dv&C(*QrwJ(_l`_Ky-hhMBcP%tb8M+2Xrl}I&EF4#VT7vlz=Ko#yY z7Jk2khK?E%UH*kz$k#XR?Bu1snBmBT0V?%oh zo=I34`N=hIH%2L+{0W1!!>{R%TWlEYm=>8cXcpG)JOCpx4$G4KYC+8jFP;M@u8V|k zCh&FJi9pSFntsIOc*`eFs;|hTpbO3etf!>cjG7X9;DWEBTm@MLw?xO65h9z%bfK~G ztSE)IDM5!EI+uA2;O(%J^v+B{ofAY}#Y^IbYiSO&PV?;qRzR}V&SDUx8g&mx6(1KY zn8tt`K=nXx+>8mJ1^HIkN(a1>mtim)?o?l9F6ta5*KReELBnaSIf)@y7*A7V&M={D z5FOxA=(TOk3QxY1bQ|@0&I?FHvmu8l0awVu(qQahN0tpuDpkUam}d+S;YWxLa39*NfElEkZC*&6?8Hr3oEU^qJthtAH@GmpW6BAV!iN;Tpk?@)PpCyYS7^{M zej|=#D>^eK7#r z^}YJNTSLF*>n)_BSV#trI1pNt0KqVV;jkZgkG&U0HK@@|1PVeB0E=Y#GpSJVGi(Nk zAIAm@-S|Obc2PDDbKQuWzk@k20EPY8zhdy7ZXNWUgFA;$oj$X3 z_kN5Uf_0GuDWZhe7{=*LDyr9zkqa}|zx?7CF*78TbWl12$Ez7yJbrBO>g9>oU;Fv! z*sff*FO1keKC6%;{Uw&uzPC|X-&kGxZ~vP=$`KrrShulR#DNjAx+bB`$Ke-_{^-oB z3$02%l@o-+MA}c2Pyz{UAe1E5)1F%m4VF{_*=C zl%jSXk=NY9_srO1|*TKXgV0=p1p^wM-1OEzAv))#%^iK*|1c zho32iYa2232S^mA6QpB>-BE?4<{}gY@{_L!npC=A%<(P?Wv(lI3f7W{-7GuDHycSm zNO{r<48lj53L6E(w>4^i?hi9aR3H%&lX%!O)za2Om6C?$wCYDFDvpmri($fFcx)0^nNfwEH3 z1ofa<;@p9kZnq-O=LLuimUq`uwuYldfXPV*xoVSWA>+o%QNDu@6cwE2D#`#&CxIR= zLDEarLqa+-?pfQ$g#8qzGU411&w<|VhFzMH_B9Kpure9drcky%pdob2#ij|sM;|gV zu*n;h^6J9c;v8Kz6^yVB8zDTG*Deq37);~`T_?b7WI)DzC#;Qhi`wfeH|EdCsE_nw zffm2BC4-5gObqSx1|}hBfQJtVvOLfh#!j@nwy?0e`5G~#KUf6s*fRqIL#3e|PS#EF zs$D?}Y%)U7P_HfCn4h~wJU@!vAeqN-N!w^lP#hWCne6L#3A`D@O+#l=am{JfmTpYd zs^uuGL5jEym=jHM27Wfk<;TZ&CW4HC$M%7#t?CVc0bV|F*L@`)I2SwxKLbp2hCu4T z3~b(04>$OP2o+Nynj`B|RmRLw(jF3k>QRTZB*i6epc!yy8{>q-h6e1Fdwd$7336-J z7Z;WnXB&+wj!GC)%S?kr(>kT%;NZwEw>aRUb?`E={MwEWR&Wwqdi; z+li9Iwv%j&lj`pq%8gGXNX{SxU|M6rE6I_F8?!eS))(QmPQ;ug*AW!>jwG1IcOOY+ z`<+%+oWvuV8R9rmSC*_OSb5CbCwL3Z`~D zTAjVh07)+q@<4GiDlrXGB1r7owKtI-#an>A7@PGfHv!d!BtSJ_?`k;Ce_@>tI+y{% zo)}rkJ@}|Wu*FaDYF+lOZ%V`=TU0bIN6{z|OBtC*5ViJC%i0lpFw!_J-;sMfBnEzV9iYSbIXi6kjEK!suLW;3Nip}#abmI%_w zZ6+hU3kU?T8r6mAN_C^QvBE2-feD%(U4x`Z=ZoWGlgUC6I!q-qOogXEn%8wIuIz&dAfk-1ryU}Y5`L)x^u}exYDzc9nT(T9Uk^mu$ee4^|CU~5@ zEU!LNfLKl(kze#=J8Z}4wDvKix!xgUa+qQqAZGG7bk05W0*$xgW{4)GBlblOl?iF3 z{VX)qYViVeLeeoRv)YW)#L5XCDcLC>fKe9;h0B*OgBNQ}_IkYS1hnx7T%3s9Ph(?Y zdU<)8dm_c5c^ZyDjWmnJQvcXKCsQJz&nV6Kzz4vDQ{ql}Y3lkpg$g=kj}ty&SKtS6 zB@*M4JCmgm!ZQsL9SB}cmJDw#(cD;HoLgDv^_40Bi=t+m902aUm&@i0!{a-hAkX8x z<6g!iPXW(nrMA9-cx*+LC}g`ybT*923SWd)YHV!MDUQ3z06_2vv?Za6cx=>WX09TK zU8Q(KSJ4%4E)CZ|I8^K(adIVDAv^)2@~_2@K?_7eaT76O3qwQF5++CzsG1X+y6xa; z#1(o4*;sFEBp^F4K&fKn2%7Y(u$7Csgr4PD6qBf3RG~+8WpfN-99za{I^sjr%CDO@ z=;m(sMtu-5YcB#D1XErHBNL9;wQIsD4SFu&J^+eL3Q<5(RgLwfnQQebX>4>yt&PtQk$Qoj z%3(x}?cM7n0us0>R~Z6L3LC&}tyqXvBD}mG1Y!leARS$u!hf4t7`-`Rg_-;faW(1&Qc=tZQBT%IsLjS~kb@LWr z8t_Yp02xNiuBebRf|$@mrLvLE`8*EXJRegMZ=!*|3>#6``?;lK2&p?3x+7a-Nd-gB znil#B#sw5Z+7bXZxPxGcoXYyr^p$FDL(%-Ulky3tC|;fMgMpFpG$xtxb%=l=rfGtF zzn-eC)mBz1z=^OKWlKoBcL>)a4YS#ullu~BWIH+P0I{VpD|Xg!HrD6nXPAqz?dcuj z)Z|lMFgQ4z9o>;i<_SHuWl)EC{0s0NRx2~tmuoA8IS^~O#aK=-U?NQv$Q_+HOtjHY z72!czl6@RwqO_cimAQo*iyKQA6d7gwkT}@pX*AsM;81Raavt;$Dp*-4CdHq|VLr{j zo47S(0rQ}*Imp)VMvA18J>6UNj{@Ap>4>}dVF!7xm|Kt{dxrpMGDxMr@oO3~58(s^ zJ7|Kt#Ed)*cFe&E9q|c#>XvfZUafr$;1%|}=dPcnx8jh+fGjnSjWs!MQM`RFC3-R0BWFem)*u9TtV3w>W zbA?C13Eedprxw=dYV~TmlcvUpkRp~|Givnp^^FV+CHlrZ7czmP4KOJRL1I_d=4P%} zniY&;+!EY9uq!%GxEPtm;a!fGCPRscl8htmB$|lAwYll(>*QMjBf)$xnU3-7J5H`I zKQuI$?k`e6fDT90TMS~1s=nzi2Mt>(3LExd%e?|f+@gD?7^1cP0J{%%N8$+X%!I>W z0NC+|&fKWg0(=J}PK6Xn6e}lbOskGyTZ7b1YO+JpkGBkN6YX3yccS~K1BhCsQo3FX z)6i-w!Vq7tRRTYU1H+J*g&4dZqKjp~tDK8EELw)-_yqwT`F;WICeo6zNy{i!!(|Kr zkOBFC%mjLg(2c+)XP}GcYwY+9zM)Vr?<=z zXjpP(oX|dkHkS#)I`MbU_q|55mWEW~W=4S(YzB=@W4il6J3achchQA8VKQBVk26Cr zKqCp`ZFC&r)5&S;GiAI@r z`27zys+EPSSL#t%O#7=8X}RRUgVk~^>1T)(xBcL;&pquB7(^OKAeb3!88K}hyKkJb z<@vd(q@T^?h@^!CDCivtOmrsz4pIHFM;tD25)=Cw|1$@ATvWZ?Qb724f^v8evzx~huOTApf zmk8^%Ank25D*0k*ej{>=ne_0`W6wSF6%3w9$pLcAv92rnzg8ZIIDsMTp>#lsYFmLRclS0cJwrLn9xYQ5ob_fZL%X$ zNCaldY$j8FvmWLkCcH?alNES$oROiS`C2U^wdtiBVIzg*(dzht-)vB@jfvdes5jDn zu2ya2(tYStbR23mMZi-kmNr_1i5vKu^h60WFVdt@6rgZ`Wk(hsxYT{mpRd(RgQVqg z6!n|X!P@Y%+}}BoZMW-mC5wQK)K((nU(hf*h{| zCF?UBoQGI1qy!#6d>eSlxOYi<{aqe1w zDMyGQ-XMH~f=LkHbiHt;Hg)Rck3aLdFBB#Y0aCKFq7qywP`@<&)~kP0=+BFnaS2>G zF-9t3%*6}5kLvC4@vr`OTuDSB1x_j-Yu7(|D;rNNKdMth5WV!l zTVHzd>xtYXG0hC5hXCnRXV1KI0GvoT@NJ(0ZX4p{zfRrxLf|OgW*Rq)m5=jh3Y>Mduu!o_NxUb3v*oW)q!re~4f^G|(j3QrtYNK4q1j+SE`O=54JoxaFefu7wCbx|>&q|(7wXt~l z?Kl5spokR6npcQG#>fG=b?fU3b64sEgTwni^Aw?DJa!3~7MAGx<#&I!y1Wdy(uChg zVNlFdrJr67fE8>h0-VC&auwuL1;^(vYgM~EdVHS!jzJoCi;OPeSX zLIcWSWlw{L_n%)#{ChB2qDTtB~wpMWxa8OuG`Ygz4_Ynzw>n`e+bAUNPv;Fc2u9f@WHjqANH5PNQ$lkugSRI-OU3^y>Jo z0|y`fVp@g9{83wutlN0+)$fsHOcLtjT^zD5lxZM#(})&VW|}A8{NamVhfPS`=O{9l zY;3Pwc;|1{%GE5ag~S9)+)wI*a3eKQ;l_p2ubg<~`QqN=$tbNn1acVJ-&}g{l^?c~ zaVATZJxiLXcvP?teb;D)v$L1hD~&IG;|~aEs)mkW6WHlWeF4&(i^v!opTLtIt==ID z&p8*j9^3%Mc-S;aGChrSI@^q+xK2b8f1JiT4ayu)LJ*kD4T1wmG!eWG(bdfc>4h#7 zpJK_2F_Qr|><#5fvvWX!ToRi>*wlTroM!pwul$JGw0t2;EQh!$%}Vx zH(&nzZ~uXl&XMU?LX6%h@^YIC=ij?>@k};{OrSz0--sGPHdBi*)x3>z?d7vS-M#<# z?gyVwBnt#*m7{D`op}A^Yu|U

    VJthrBrm&NSOj4bQ35KK1kOf9W6ok>d|BE(1vj zZ2f~*eiSyAkUeRko%}s(&WPd=S9ew_wbSpu@$A!I@kjO|a{!W<72s@4pM2$~K_*TI zi8^K7nJh)~$skQW8zsJWqrUX&OV5A(4-=VwY#tfFt|?&=fNnp)4uKVm#MRYRcD3VN ztOH^UFobD)C?ek>3f$@tORQ08LH?9jHuuBh4eAE4Y?X|4lj)>%CmMINk#1WSN7Qfn zsq*?Nt-=i0Aq_x3Zt{d;I4&!zG#rliDitLU&Zr{67<3h7*qzNi#I zen9+C}5mJk>qH23hTi1+UZ<7|HkCr!@C}M(oN3p<}k}G6Qh*ve^%Hq^}aod0V%l{Ao8&H9+KqQ8AD^|rphE` zQ;Fo#)bz#EYY!beS~_r?O7oy&<^6R+`z0AI#3q;C@0qt16NRggy!^s1k}eokUXMLN`>-@(l`oP!#S%btrH06Vj6zl> zha{$#MFvLH3@d{PTE$h|CHXAZ4X2BAU5vyg&P&>$$|4o~983why9!-lUNXv71AQkc zqIQj(0Xhr>>LsktKN$NRD*fu^jGtlXjj&2xWIJY|3{gL(kil?LExBPHHFB4uNB{Oj zcFE=e$RIL7nLj2#bv|L}RwGx)Q%#JbMmj0{t>l3+`DC#OGO8$P(~mdqFo3emFQ*=b zL-V&-?3>;FM*JJMs~GcCyMqW4TTF#qIHC{QAhiakAjfir92C)MHK@Wyk^eo+tA|AT zf`qnyy{-3HFGs+pNev%6F?{rx6KbIb*1L=-EMUw|ut`NrhRq0!lDp^dKlymHsbS!6N+iy9GG_}cJw9e@xB{nwvoIy9xqK)4a$GpuUipk^^rK}R$Zy7FoBfc!EQS@A)f{Q82 z^#-Ue>MN3gGSX5BKkK$jhnYbV5GV8h^k@>-s}UFn|LGIi_S7@SpM3f_UlHskD<;us!NUo{j7$oKPpT`Cten{-uU>;# zKn3St{Ez?a#-;0V12Gd5%mNgV7LO{dHatF39PMZHiVh%8q|bEDgpjfXP!O$@LS`R2 z@!$Wee|721hpUUrxW;4z2|;4%k<#vC*OlFq`TGvw2867v$FVXJwmSp?SD>L{)y!;j zgu3=|k6$Eo?`S7IxGU_OFV~_{5yc;3sZ+f}21-tja?9v-C^Yf@2WO8Rd)!In%{Q!f zk)@c>^Cxyre)!(2M-EJa3*vI%356n)30J3$7t;xetaQ8^v-8KFcv0(+6bFKasm3O3 zRV)pREv#&e_T_O!Dg4Z50~UJAF~lp5+f-6eM>svRK)I+^u}L%O0C4m_KDOu52j?^S z9O=_cHjgOemqwGtunAIXxCZEKG7nQXuKw1GpEYRLpbVTbDPT))|AE6Feeh0k|D)JP z+5iAR07*naR4!&DoMLB)&7c})s4OJm1_CHkH|CE&`bAAm0b<#TA~vI(hsJhZIN99D z`lWmZqn_n~sCOd800&Y=Wg_+bbmpyhPT&8?(=fOQ)?f*J$2i9)_Fubxsjr_5D^(;7 zq6772O&!Se5bwx4i8^bmmBBuOr*g};J@J<1{&!rHKL7yz7j&FkSPq7QZIeyKfdslf9~Yc!ku|4T$?@r z;oApxj}bLZQb8+InT2Ec>a-&HDfA1S}`}>Qy zg;eAr_KD3fa*n=&k`wbw<-hsszk2R#|420`T9uYwyYRu~^QRtr@DLVKjbd%clV#J2 z#|pwsx37?X5L3{GHx>IX;@+JnFmAM8a! z#F1()LgCVoP?o|sDn%6w2}Al%@v%*T4hA6uT!o>bA-e90=>%mtXp;=U)u6BfFAbhC&T-PkH^@fA*~>AAdxaIhhOb zISvd%RMtK4OB>b7)$3EIfA+oOPko*Qj3Uc!eClW4udmD;KDZlPL!1nsSg}!c?Fi?` zC7zr8_J8_!U;F+4+Q|F4jfQ^=PF z4t-AWgas4rw}1E-iB@^Q^F`_C_Y<@cTbF@psm zLWhuHj!lXOfR-5n?DF!mjb|Cw*t~Pgzh=Fpnvh=t@Ub;nCt((qh!}_#0Vl#zst+Qd z*6VCb6NU%r%Z7;oD81XyZl}-y_aMqd4XPtvyL$1RH-7Telb@k5r5RRIei2$5DwQHa zjWC#-8pt0wedcsNQyMt@2r>%UXzIB(zWpcv=Ke#wbAv^eN360eiB z9CP!lZ-4(UAN%S*=;|z|xpDRUnfKm!;_-)AWM2>Kew6dmscN$_P%M!r8yUz%Bk^lL z{lPOqF+YAt+XoRhT9|t2JAd|>M~~u!2MLN?RLCa@r92{9lUldj2WQT}%6lLE5=1Wh zz;7>}J9*{YI}hQFD?AOEKAy-ksBN+U|ueR8$>)9)0l!{U`tUPh~Q=dNf^z_Wtu<(fYr(jF&|nBM<_!Z z{PC@eQZ{Ztoy*^oUKgTlXR^Gx%_YN;CM84EkYo(<53bHy{)Xl`j5Z$|BMdykM9_wMww?}EPtp} ztv3d8Mbf#2w9m5Zm%sbx&wTY?I@!GVPQj*Tyf%OF?E8lg?xHw}{hQbsgeE-#)*6R2 z^4L)6jkn(`^^cD1KcRKgsJXcQoj?6Q_wOF*FA@^TXq$1R!1>8T2PN~aH(Ww&zy5=7 zKk>ph30^o9PNm|HUVCY6@y5Y}P$ntr2Eq4wwF=>NSovt|qoHE@tyli~`9dK%bf1c& zVdr|~+kf(Z96K^u7$_hsSZ9kZj4tF*Hm#Gv=9`&c{n-z`{q$G9p-8Jx+YLH_d-GLz z)KtuZKCsC-1^~#inn-t4Fi`qB%r8;0|(zQtIFTi+|r&NbT7@!FDK_ zAk(T7!$k$b9BlN3c=ML$R)6}PKY#Xj|FPl`SO_xNqK&H`UHI_iefuY%EXL)w*ubt> zZ`FrVB^FnD#nR59((7-%oz0F6A9_eTCNZN<{kwniuXpbrn%q5thBr(D!dZc;WIJTb zR5GdX)$jh`x?_Z3SyjGt709>B#}aKaiG)*iJRHOdX$lP~|^p=ZDBWcn;K zS=ik9X>r7--6u!PD+bhIJ_S{ScXP>25`RGj6C@=hO_%(Lyh7Ya5V2_i1@bH*=#c4x zYSfgDE@E_Qn?k$Moh2cD1Y28f(QQ151#0?kH`i8P`u2Z(_<>_NBT%tZNp55EQLP|P z)J=^J_n*Ib^};JZKKS(KS)Wh3QAS0(@$O6CZEP$bIdGVDM=V{!BBsO}*+&G4k`C(i zM*0gcefQ6Q>ucXg4h<+bKn7)L`iFo07mq*s5cw?)R$*d7%MBl$j*(0edZ+Z>snZ>Q zeDD1aXd5Lk-L8M{Km6MRyGAF*M2Zvx@T6HjB(U|oy3;0psO zAO?uK>_tbL;3L6|5P&rmR^uk~6LlrB0l5hfkP3=hd2NB!A^eS;6M3_fyA%XbZbc7c zv!+e!X%wqMYiLSL1|osS84`CQWEK|}Iq4!13u{yN34hWR(6Jd;JL;@1Tsrgq{(ZY} zHBqu!TY+qskBTmV=AH4u>}zknMsRTa(8GyTADMpfeN_M9xBl(^N!ITTA`o1BKP*NN z3xj$r?_*Vv&#e8s+c&*`Gde|FQnTzIs@bh+z?qYN8A9m$4vomoMEo|N4*ad*W+) z1V!5kH?Dkm`RqFnKXN}(1cx3$g!otNRqH?9LZNSLs7QUo=UhLx_fc8VNKq(Hs$dga zg4FE?sTv7zV-4Eh7&Br|sANPqj~+!iTDr$%sDgD<^(IBWtl1?d?jKQyy8kN{89&Yi>AVZCL_}8n`^K-5}@5$ua;TV zjLU$2M|iRlh>SZ4RA?z3keE+an(YjFK$HV2XaZjt9+M4Zg|_9sUus;Rked;l^+pY| z5K6#W3t88O4*|D&EST0dXazE7EDD3L+L()s`mSltzpEJm+O17lX}0ysE$23;4Su!Q zH`9TnO?C?&6sliSI3I-qL{m@|%K3GpQ~cFd=*wF|3&*Hdrp3iKsSXwHPrB zLWxn0rSl=L!*vt+d=4CfWpOGxkvqifcJ|^CvB@of-v&y*$|Z!S1+pT=!j5L4GjnKI zMDwMuU1w-`1T#SH4_ISkA|qV+38_nDQuz`u4Zn-^rn z%6s2+cv6>GHADA2EPKs->KoA*G9@8@d0)6Q@4-uwJcE5tCsH#Og}k zi*xMt8kV)zC2lOvEoL%>T}K~fn>Orh;3R>{PiQ&E85$d7ZS{o<=OAPB3ssx!JLIzs zS-6tylv$h>wHhbSoO%9*-*Ga7Z0Cm?3p~|`Bvk&`!=F2Ud3temtr^$c6n5hV%S5R7 z>9BfB8xF^787PY8f%`B7=-?p+h-K0J%QZ8mD?1MD-r8~(6D zSvby0=h?aGM;?7>sHl2!x@E-de4ut@e7l2_yED1`nKNe?Jf;R7pqF5?LG~i9C0i9Q zTCaxfi`V9!{`_w{Mb@clCkM4c3<)z#fnaTd@!(@mzjx-+8Y@$^u74xRI^y<598zE( z`az-^)~-xlXJz*Ao;|P<$_+yVlN%47z26Q`9%Dz(mBlq0MRT=Vb+XYUk2_TGHC9B% zaeaP%{`u#h*OGiQhiyNk#x*_JnpnLA8MnP{^CFcDa8#=?Ki!F|`}d6D0ky+g2B}N> zCT>V5Q7b3&Jweo9VtnMr<&T_@6=XQ0aCP+Nt3TVfcUM4iUxcb?211QLM&PnJHBpj; z`@r#|S1z5G;SRvt-pZv5rMy2dG)fjpiyC>5T@eHj!Vl~Rf)m>s7#*=hX1UpH;@F2(xmJ|2|SP@>C7qf zplmXbbwcEVwXZ4d8LGNQOe-i^UY>4-xSc|YJuz=~DxqQo2A3Ex))nC}GtF*v?B=bv zomH1FoIUg4{U@3P(eXzT3RSf`jZRyYK5l}bMkej$jvP9A>D(FBpVBF{d5PAg^QR9T zJwgp71W8pSbsWepMonCy8Fw1&4O!^hv+rQooQ+p5seq`{Xs=H%E?hZscsFq~iDrX$ zLWVE_1um2itBNG>j~_XF>C|cE_OX^*?UV1kyJy!6YMn6W^FrKERNXQq}`O` zooJJM@{vr34<5U6`J#!kX&*iId0d#fxO04XaG;Mx$-p3q+Z<~*TT&z1X-BbfcYMca zG3~B_dL8T@5xw==D~~<)7=+ETdk^yPsL^gJTSKCNy=O^hB>ac&f8fmf?}0pNFQ?`5(G(_uA@ww7f(Y?EK_ZD<7VE=g~)x8#)0C znzmN@;N}?4MOI_i+<)}Qg>&bn7cq>q$+c0`H-WXy8EmMRx8O`aUCN9Q4GQ2 zJ{o5XzY`vy7$;k;*77-a`Zh~D)sFt>GwTQV%oH4g_f|8+QzS792GKnkQWp_LpaOC} zTL7n8I%5nKY)A<$(*&rnl8vvM3w8Ou&CZ(a_604FokVJChmWX1dJC66Mk%CN$ryQ= zfQX-;!O0ZAS(>LcP7sgiou16JUETgE36or(x+ zu&k`5vvl#?$Y3AAYnJXJYLFi`5RU<&itRJ9CKJmz=^BD zHfx~WtQnyGP*$IvJ=ov7X96>A>DndP^MKTIu^3N0csM{$lPM>o1q+y$l99bc7N%Jv z6jdHQaeU_b<<P{0*YT=&#kaE_{7voH%aFtefI5% zY7MI$u_D+JgS8m+K<0)xAjXPbrIJN{0vj#I=@N$Nl&_Fpxy;u8I;bYFe>b<8A2rG$yLvygerVHpHdlTsU&>|DQkO&DjOc$=-(@{Os{k&)4% z5!M(f@4=$81{MWPP?V$hXCU#01_xQVv~um5dPGVz8Xvy*?gPh;Gic(N%!E-<5&`Rw zQ9zbr%TZu(;{Fro&LG?%A9S>P@#MQl?>ks3l^V4s^Hec7_6>SPdqStpHq-9p&Rv!A zM!d9a95b-cdhg_0_a8e7pK8xSgnEn0P~w$@9Ux82$_V*-_{hO)*RE*KcY%qV7QPd7 zEz-W*v9z?bSvu3bMZh_ z4ZBYn1rkz;PPWibu9;nANXVu!<3vDYV26zf$QNovD$gV^h)yFt`48ln_oAv!HEb#U56D&BZ?bptq+GOvE3iUz@TaeKIPhAp(~mL0 zPtYx_n;7(;yyIEy}&J;V<0WPBRkQh;pW)15OI>N_ctAbzm&|xv#MGu}R=UYH$jKrw~sUS$AuS>%Reb>{{g}dt?%zm;}fq{?K%B zV?*(VsLmhZkU+|d6i6$9X0EKWOefh6jV~q`5KES|MuTb-!xoolQ&HUmMj!hpFle=v z#VL3+CpTGG3eEi4CW&A(qz)xG#P!(si4)SyK3{oV5gA5S)XB_1Q&=aU86m*rkVvQ41%~HOX*pCed~f17 zh;coqR2lM8KBGDAWDr!k*{jf+Zfvl`IM}a|8Ds{nVCVrtd8 zs>p_K6p!fb3S7WyA}PXd4XiR`W3*fm4b1lKv`bJ8=EWtWH;GveO;9u1{4*DS$&Wi{ z^@|#ClbN9%U;4`L&95~sPc5FkG&8fXapwGuk1owzpIQCz{M6;^%f-Q+&;IW3%Un<% z7<`E`^gpg7@XYqz|I`YcJpzGiw*GE?&4Yb7P_W=uO4Eyt#SRn zJxA}`b1bP^ax6TB4EbLGm*|8_(+@xQ!k&Z2FHbF9np(VgZHX~oygGOB>cZvgOZ-hQ z)s8;$WNvh?-WUOz{S+lRq;5=30tLu=B`3RM_j6zR#(X)tFtvDPdiC6uxeM2quFq-u zXD&?7EZ4p4z!P62O3voa*h$cK7vqqs?__`H*{_ce?wPy3R9nro+@?wp! z`SR>KF|nsU^V~>b%m7RAk-HtRF=7$c#V5IZ`Eu8+?Ay4rWK`6&y6K?2}Nvq6<&0?r_lt+~+mHKLxHEKzXIVhv>`qZ_-;eJxJ!~(g6$QMe0 zDp5x61G;vC)sYD|ok=HF*A^7e^t|Pz*_{)^5mgO9T__gbwL1eoKRZHdXG8MV9?_!3 zr8(_bOtPWIK8chkkQ8k}9)O*#fGYJ0@i&FfwPG&CnAt=%mkX+^^BNut7?+n-lc+6R z#N&(~sVj3;Z&Abd!&fAh+8iA$-ne#Iazqlz2xwrn1us3wF<#E6r>AYHqeW})*R3Z* z`}b5k?J5Nb#Daw(P%Bqcei1tiXrpFXkc%tJ6Z`krI+HtF6Ij@PnVD89G*0h)?e@a( zU?VgZM5BNrI|Odv(6SLE$zq}y+K0|%%{WdJ8jO7)sf0`B*xQ9gbHtg}mN4#tH0!OW zrmqYQ4Ji5paFuuBh76k#u)G~+il}PW?(mIVj)3*7L{1`EUs@Pqb%091wERctW;QO7 zWu^$iK%!Wlh!}3WQ(M(Ol%3Z6-1P8Z2_mH&OPZDnDIy3|m74d4LjbJ3p-qrm-dMAd z9X2~>vqHVR(|87E`dY9L>3}Lk2Ro9`VJ@GkRLis|;NV!&`aKUkJ;p|BJ(x2-_(xG6 zg)KRCb0cGJ;8QM4!7|IYDbHdn2<>QTmZ}rt^MWkpIps#9e|SXBl5A4xqnn2r1ponY zCi=W-ZehT(-DkeQS}L|PBEQHm;2lIztyyDZ8(JY)VWQJcwboc$uciiu#RmMT^2pe= zZgOyVY-)a98;HxJWS0!Gq1HBrviL%kY=;q6Y7Nq6s;ZM&V1x{J;RTJ1Q6qxn1n&x4 zL@XX8i-sRA3ntA5O{}c!Bo>!e{Y-(t)6IG5tUFPR53pb%Ci#oao%D9?-NzaNM$5|v zxK(baqKy(zK!eyLb09R(XhtKWquRs^sxrR<2U=qz`>R*4-XwK?zViUf33qjEgqyl&&^?}Vzo)1q^7|K8PV)e2;2 zK$sO@OFr|Jpb=W8#=c)+b8LKE%7&j!|A-XRk%q#}xm*q*$z%F%)Ur-Ewf`Vv?a^s3 z&(G$4R(Fs}!BJril}MaChansg$O12tY>8|!jhNEgsN#AeHt9(AB9=Tt#1st4kNhCFX*xCnWu+!a#kH0EZnlm-jz zG7ISH)Xov>4|t)j78P-Y=`_N6kR=Y|&o9gwp$J=wlGb+cmI$89JvEJ13EM#udETqeYO|$MX=p6c*h4 zO_(&|qkG995x{ohmBqRK{t{Y@FDCW~%2`w{c=DC`vg(Vimar$aGDh_*DBg%5=HTD} zfgV5&SWx8(E91!sL83Ke1r#)~V|0Nk1Ui*?cb8XJNZ>(-P&{bogjq5agGBapLMmry zs#x@wSI}0P$?DP^E5HcKBNKQ8*5k6mR;DIF!7cqoz;a}~^ykO7-u>n$croQ>JK&o054rpIE z%@s;JMk}?dVv$_30v0iV^`{|7)PO;gqDSb2ATu(D`}v9) zhc`vPyg!YbIYqv1te5k-3?rvCF%dhj6Q9ECMeNrJQps+FozfY9ZedpCRGqN0Fog?1 zwo-20iBGithraun);4exf%Z=vX=ngir;{uFAgEzaeXXqD* zYAes#>%#Lbhrzs61yGVVwsY6|Mjayy{>G>KjESNh?T}cSsOx=n?&`$Ee#I1+ zGi~xHtkH6cle_CtytKGlZ<^@2FvlOcSJJ1vS~uo|Rkj~}^wH#=19}oPG8`%r-3af8YyBC&)YwQJbLi1$@T;Vgn8BpM&40m4EnvyabRCRdT&t86BTAK_{5bFt031hT6ptM8>dx(QrPA zzXM^=N>m~dmYe0-h3jXQFP~bUy-=RJxH|RG%FMay>QuY2mZp?1tYQ-X{mh{!$_z0` z65g#}N{K||Mz3-gNPz$B-CUQ=09wqiSjt`bd$+bg`6nKavSehkm=LC!r=dYoJ28BT zta55A)s?xG>B}oO&hxiEcXj3Zx#ii5_3}JFLDykzjj9mIGvg-kxcf#JX(gCxbvhG{DD5ootLwi(|xcWrZNTQF^N-_1)Pj)>a${>O$% z50a^67cGP-yKCuSTO=U_-SJ?B=v%3$GWD|+-0dNpxh!{mK^jan~4f0g`8bhFF4Kl-HNFp|c|5>`a z^U$L^4}=?Q?7)C=3PmzCXpSK#(&P__fN(dPJL@I2;tGvGi>|b8*7cvb|5?1&dZ<@a zW3B*sNm4mKM2zGB17frqFRaqP+Z{rU8Wj;^Le0+3QZ$FbYIqL0OLz7$OL2T5N@d=5 zLL$UDv~EH7EM!6 zL?R0JEX?Hd<$9G}y4Vd}lu02oWC^;kd^*j1vB;PhJ5q?!CZbT<#0H;|S17cq+~fzu z99cZ2jr1_XFf}o!s~g>_jXNOdITk63LaUXsv>U;bZuOM{Ne5Gl1^BJI8lr^BE-T3 zp?VS?2qxAgaULiMpq1=klaW*_^D?wfa39?zvnPfgSA}o^f6l7-GwMtH}!*Pp1lP>&7^LQeG6UNW4fAQwg|5s*E5_OPN;_ul1j zM~4P>ja9E*5BzkJaDj@5u{Eei2w5@5T4iXj$_x11*2sZ93Tkh6fI(;m++DkN?bxwn zeC^#O0V*#am%~Je^nU6nS1d5!L^8l90_X+bq#A3qfaD}Ir1Aq=O zRI)Fxnk@VY-Y!kiheHW%o1Fq!gTUfd+);vGB%ar+xNY)0*f5dJZK|ZEDUlPkq@o1n z7vyS$iLbXMg?ei|`dr%VIkh!)WD zji_9!4++b) zWHvcDdG6df5Xx_GsKqb8^3}+7*U9G(95{CM><4UGpJk&HZ5e?ptTl!FwF^GZLM5!P zHNww6^^7!-fT=@G=wQg)2*t9AExZoy7`FLEej>yc?A~|iwU>X$DhGA~CCotyE>V@b zJVRDIBoI$*oc{jl3(r6QHOwRM1LTaLkW#>t-hcS$OW%DzlMjXlN-8K9ZZvl~hJ?*m z=jx3c{lnuvK^!SAC4692$Vvn7g30?1zyJR04;(wp3%<0Rljs$+NwEMAysZl~R_S*Q!Ww;{FIL0v!Yk z>b$@awMEO4(nu;1xgOW76}6|TnQ0IPC&mQ!Q9HP;xVaJ$9MU_y&>~7_s8HY?m`y?t zXs&FD{oR--idF0Sk-OL@XsbqCPw`>^_8Q^u6=*{^!g7>C0DKAp zQ9W$D!&Yf}sUA*mgRDmeSr<15aUoY5QPuM^loF!F5LoOcNm?ghHIC2r#0qd!t4-d) z!7DOrWqL7^-%2S%WL9`F38o1!>6`;#5}})zm;fw%)f{gHtls$Kv_=mfoV#?oQYrW4 zGAiJLCpq9eNFI&ict8y2ndnAoXvk85#kpV$bT@HDgz@_I>u@Dc6Zf&d-tnoCv+}V> zqSXyZkU+G^4Q2!}P`@sX0Qy)h*+PsI=hv4d?6KlZNLNM9n0}brtv1Vp*hD?aCL_oQ zQ5a++MFdaL02|W>yaj;(ce6ZH^jV{>icC=}_@?NC4zVO)Pi?P3vlvp!BZ7A{BIT%J zRrVDzDOQ$eCdC3^g!haQ48xR-@D<_U4N{?`HNy8ZVZzkc@oY9%b@ zidt2w7?~6wu{>fkXkjYzHX89T(V6~1i7cMIg`sIy?O}Bc4Gl`u?b{CkEX#NC%hD8# z87U{8mB>M^cwPZGaD$~QKIxbRkw_|L2h|aO=QXZuxiAPV(VUm4@_I%#RdCwrfNWVM zDC8)6G(Tbjigb};*h>n~sK&=?dr;0w{WR}xFk7qJ@E@f;E_=kuOm*FIWrI8u)uy-* z^0yK$`!M$P1-xQ`O@w(r9Am?ZQu1;~ctoE1CS?wB*XofWwEJ`7r?AO^&f%qCY^Hf2Pz0=&ND@K_Lc=MR{drfqx>&6Nb zGx5$$G&zxFBs0nZ7Z@{=jeXgOx>hMLL*CklAmu%)Nl@8L2kDb!1FEppPC`;HvHeCCu1Mk@ELY=+!Xp_%wTNUF4)wR-gU zqn|Z=jGR-42#0P}uW8vbJ~d-}6GrV}8NrY(7x>AZ>IQug1+rA($ODi5=*K^J=z&9p ze1_C7@qLQu#jc$YF~p*ltJmhAdFG43&`$W1CKRF&PnhbwxcS&;Klc~^;om*<;89jJ zng}kmpueuz($1&^B2GJ2mcgDMKJu>fl5Gt^o@DFv$g?I-}DFrV}c8IiV?WEJ^S`9 zFP(Ywjq!oOKzA%@>?f92~=fj2@kbjd=CBxXS)7AZYu)C_;4 z4&KojR;giFdhgAjWK-rv&NzX_4czkWlaEJCG$tY;AXmilGGrK2`4Dd` zhn1C6@4f94Cy_@_mIaG~(3Z+2gKTbLW%aX9Je8#`UezqxJOpM$I!ZV4M{@{eBd56f zcYes05LS#7u&tYEaN6Y}PNZN>^xE8K)_%1DV_k4B298*cGt^bDnaxIQ%jj<08HEZg z)tD}AZx~nPk2#kb$E?vi*nxwfxOwy%Rh*gLjOC;5jHVTMa3IJabHAKKRb}vq`PgjY zwtr_|4!6;?SMt@VuA_h== ziE2#^Z0l@jZp&euhArQJnNzHvU-IapG*C1G*Nc#5jxGEN^vROKELLJwu?q+fvx7-O z*dkM=!$IkjmqeW#3~*Dr$l^KCsA3`3WUK>!N_GG#2N)&9gTp{mG>Q?2-KA_;!8u36ks(gVAGzc)EDPnH1=dV!3PPJf_GCJ0=<5=c0eYz{aLR5d8yHvzs zDRIUHwPT4`=CW=e2n#krSx&RiC0*_Qt8+zy)U|ps(8VOeW#kw5L=T8E5LGOW3jNZU z2`H;$^{B^93(QF1!c9dVGbD|~{NLe~S&CW`EMa4IU)fhk4&4c)!3+{pJzza)_sQPw z+B|#vVFe;XBKZve5KJMYLWs%`s#?%Uxh#OpE-(U@+>i=i|6aAVYA4EgTlYn7j(W z!3V-}#pi<6v2nB+IqyEA$P45n332umz9GCz@Dr&*3zW!#GVUZ3kWL3$f~aHwB`mbs zY1V7Z0@#5oEEZZ$XW9ILd4PKumX|g#41qv(NZcxa1|@2dbUQHZcN*`V;OXJa&^r_8 zJ;TqIQ1~kVE3gQ~pdDHJ%79AalLw#wys`^wzAKV+ zYN*^^;;IcY?%1(IzuaB30wNfc03eTGJ;Rj*2ACdBKJejMkWSWEWzUxXAWVIrN+iQr zTEU*17?Wm_!Wd66LE7Mi)mn@VEGdq|P+1VcZU*dcg-5T!h>Lg-PzdpQ$p=uD?1M{q zVKi+hso+uBS_7fxUbPQ3@|Y<%7fBFIAu++WA6tXr8+ZV7j~R5Wl)1mQIFj1@Uusr{`|@H z@2r+9tO-Gw6EB0bsg+-;)=Io2ikjm7p)b7nB_}X@;N02{n6ei**|TTQ$&)9K9zDwR z83VWwkbxtEBC6DB5ecT)jQe>tb8CR&Jmd_diZjWtC{9QjNE0A#s7A;O>T?T3t4T28 zvmhHJm=$lq`NPp8EUPFe0a0*chXII)4pET@kAQRHdfm&S2l`5`0$r@g{W3w=lj&0UYU=)~O3EGx02@AIspVG5h55x0>_Z5}^R0I%248(gB zl#|hdTiFE+5D@jC3}~&*D~t$#@a9QzCy}<__C|4`XbJG>x_6Czf`3f_LIX3D*x8>&wU;6S_6-ZLRMdDowe)9l9OoMS6 zk=DIxbr2>3j|}fW`Z>or`@#FvTj2(WVL6jcHySK&1T+AZKx^{BN1n;=qGpG9X23(R z6%A>6cyz_l7rye%AAavY^QK>3a|EljWel6SGZa-vb_R!cANuT*Mza7_1+TiGp)NA2 zjURmk?mm0@{16M|9SZglf!YBRQf2u#39^k!{m8?gbB1cdhY*aa{t;hF9ku#*P9A#hMJ*69raTa5BsMP0Fpyd| zZrs?rcQ5B#y4-pydx-FwjC_jdGxFaMt!PhP`w7)Rw7{MCfgvHKFvZrP--LbbYiy925yr@1MhOWa)B+?lwtDW<=bU|p_xXIY*4q2@Ak;z<_+6@ZuT!g5 zRaRDJR#sM4ty=84!};oQxk7Pb~$pp*$+faXz^jt$Q~`z(7oiJVVQIAS8bl&=ZRZGGE^ zKJvAH`iG-SOL#SuUtVu#`4<_=a?SY5`A2zs;^@3azSRU9PBrz}}UqA&0yMvsEV^0WFG zyBIeRG6TtZ#hF%Qj6h3J<2vAIzDA?f63B_0(}=f^I&`58{)<}iuuX@DCn2A`eWu?4gdD$qvjN;*ea zR3e-7`AD)m8CheV!->pkEOrYK;R_ZD_ub>;Q%z2996rTxOSK!9@a6L`uAzW(n&aTwB1 z&nt@Z&_DbKC37ksE>Gd1!%LveMK=OBy=+JfuWp7-h!6qo&skA(3oOZlJf+!F$U+!4 z;T)?eY!K!P-M#_f@jh+}rf0K!!n;8%>Co?N0t+Yj4H^PZUu1~}Fq3ml z&Xp3gIC!Zc__*ZSGM@qA(1S8Qosg6TM7EI-D&^iakBRReev%&XgC88;clloXZ_(e{ zE5yd&{KRXnyXRMa>!HtmVR;#uTR*eJ=|)=?mmx3j3(u^cUYeL&c;^Q`ILm%;`aYt- z(rg^-uo%n>dC0lv zHRh`xTUh$VbDutv z8s)~2b>4LWH}QMk^B&B2`(W%b=(coS1L`ksR#mLSS6Lr1Obam)b|Vgz(cF40om7k04EMIdtp$ zr{|t{{E;6$_YB%cyDrL~j*U{?x@G%Kcig?}#yfZom+r+e#^F-@=;wXHk$yz9MhAEN_8^BP@Jzms}R18-zydfX*o_VbgK z6PzYCzWCXHzU78}Sn(WXyTmai?8b4un5pOrs($CTtq*B@dR{XwsowKSzex!y7uw=fBeSCPSpAMGi@XW* z>Cb=S_L~kd3*Rz7v$AxS;TU;leU5>VJ(oMS?c?Jv_r3f6?U!GLJgfEt3Jn=;=xrq0 zDqfdBzksSg85CL6hfGP+Kg$3FH&m@Y8r6>0hzbXtk!wLEe-+_*P{%_^6i1E|B^_>Q zV|2LoLtN*T7^}7~d@hbEI$S}P77cAT1`J5nkN)uyiy?TuvIdCKp{&vuv>gY_C}2Rs zQ^&3?9@)^aq^7m4fyyvstc7^pQ`#Rm8lVFRF~GA$#(|pUZE>wqq}VY8nW#?;SQ(*G z-M#uNKhF&=_O{USREV zR6b(~7D@%DEiQTZCJ5w#O%|6z5h>rM<_2#O-m$m^BEk4%!7 zEr@)not~O0WjhHDD?nxx<5me7Db}JfQ_L<580bcoPm)5s+F0$ z7;PpJ$}%x#HsDAq-_T1^3b8hkJ1&?mZXO^HKH{pYa&{R;{+49d@J4sK{`%`5eDFa` zTL*V%t&DS^Q@K8=PV)Qk4jvj14=XLmtuu45U&0Oj$?M%G`pb925oJOTZ z!J;lSwIa~4>_}1wc&6^S;|>famiJ{pfwJfWwT047ps!DZ`TW*nk(k)0{E_0ZnCZUEW3oy7~dtsOv-pX-#PhGY%?Z^Nib;S%g7F;xVCvV~2sN zLjp*u(wPB*?IbgcwrSS`Jw1g6$zbk;5P~C{O{5$;li3TKfPBeO+i!fDhF} zQ%OjTT!0l7Wk%ONclN#SeedAGgA{Gk1KV?<72j?%Z zSxhW!**3jn*VqE?IqBSFxP#Pl2ff4>2)V_xdG*y-_myU~kr^820>gdU2Gd;6#M@@O zY#qlDL2#xexU1-Ye2PiCF{yE?;GHT0FH~sLXsk)k+Ab@vwmUQ+^*gERx%q_Qdyr3YBWECu%u1k6(WMox3kPaQx`e#nY$22iN(Q9V#+Z%3jVL-?eXQ zE1v{(L!!%L)i6j`?Lx-3&Hn0d{jVpFA31&MI1OR@lA{T5IA53pSQfU;@4G5e&4{*X zOviaSIm43Z%yqZF=gMnuJ9+HL(jrI3!xGma^a72~>{-}(%kImkxAO&IS{F7s*tAHs z2iC%Mszi z%JFlc23kXwI-&j}QST{xQZdalY)r#wdt*)|HgA&a(x zL?p&OqO**Wb^)jb12NF(47{l1wnf!e)=2DvSd^OctW+WKm^Lml#@Q3LYBbq86^|0m zA2sU6)ON1a%ZeyiVjmKRToEhURl+6uEk@VMsS&i4z*iG~>6d34kOzpflR%C;{caIj!aehv5{VWh8t`JI5 zIf@=}Vjn0*hGV^kmzs~AU!vy=OHhGrz>o!)B&R&zwF^K^jI2K%2l?)Yk1g#yRwWP1$}j)+r?cK>2Gt5goO-lMWp^ z^ys6H-uaF@P2_*N3*sXJf<;XIi)9t#3V~%DcNGmqg3QxohYoY%M|L4z->fMRu?WJr z%#!gqo?pqL41f#`w$DfvPH)dB1!z2pfE85+8$bacBH~rV#DZ4zV81>J;n_sSK4Hu| zD2@oo&}as}Wn{$^7q1K-$`3V4I9`ET_PcQiPj*l;>~X4rMlBWEqal@Va!Ls3;JTN? zwjGfcWy43~2pK~}vQj&J-23sVQzuVy*wMZB-pdKUh%0POhN$O?A<|{p)5vRY`K8~z zX7S~teCvh9Nz5FT6?G5z9?`Z1UbJUiaHD$Mk8~m}7saN7Bq(0XAmG_NaNt1y6kUp& zK(5eojm;HWy}rtsm1r0Yd`Q{Uy?v>gL}8ak;1$QqmePY5n&tEkw~DUuy;I17(P8(8 z7jT6GPgn*J>vfpuU})cT+wY1?uDl(?Sb!&(RRLFz5g|E_2r)CF8GM(}%%m{;gT{VK zoEJ%X&zO2xjhYY9hSsPM{Tg-+tHL+um~I z$IpQ9cOU!C?j1Y#?YokV&z`Njux01vS7N$nl@z?_iTK%Kw**5jm|qKXv#i2m?W+QM zz7l~>gf40GGoaQ4d|~^6+um{AEz-{($Kab%>QIDPoun;?rgA$0z*MpsqiGA)!u0sA ztM7RK)n+rKvh?M;_hk$8D{uE=38olbF{T+7JAng3=XW2xZ}-7_6e3C8*IodqELiyk z1aSnv)KPnbfx3Nc$M$=Et3&*CYo#RRwGHP>tLEdzQ zH4sk*0Y8-Die}^eL4?g3wYIcU?Gi;&5Ys%H+;PX--hTVrDwjZcY6c`t=@EZWcd&Py zaV&sxh&#&Ao@EEG?+{qj6$N*37ABK3^Q#Z$S9n3NE9jLt2S z!OC($I3&V54nLtM5~pg1)N{Jr#Y+Ax(p77QHq6o4|Ml~qzUlgFd7VHDgs=O$YY1BS zLMwyv3?tFR#*K%r`Od>%d*}Nu8=vI0eOrhoq}y~YD45;dPxlwD6(1oZ7A7W*d>qwj zjUUCYjyZNNJ0Brj?77+D)*9~fhIdz#q{{8WBMhzSZz4P*g-sz9;mTw4^rB+Yrl6A| z71UVW$pV#$SD8k!kIq%wqfnxH$k0?y{jah;u+yHv(D21-QBe_6V`7(fk!qJ7P`nN`7Y>CG&zeA#wP9XxEvlux;$JXm zd>X^W);kO6_-!$QFo!iJvpF}TAefX5F;gzZf+@NlAVsD&l65)i5wLp9g)1^Cxd~dz zRLZ7wZm^GX9ZhGv6VH@7?^4x`w4p-*3RDwqq0jrAM*0HbLO~_HPaciiqb^)~X~5Ek zz`!bE4-0Q+-+J4P6k#@m%|x&EeGR)y7jz|I!ifhnHn;e)eZV@LqG3v!p1khO=%RGT zSl*8Q8h9YfL;XSW+Rmyt>3l~*oagXM!!zg6QR`5>iEA_pL!U(Y1lC7>b_TWrMa+Uj z)m6F#<20mjw&1(aC1hmN|KXAq)(?h0W~xRkS%IUkp9(aozGJAm*kMy%ZNCo0jGi_pgpSO|QAB*(P{f4T_iFijU2ErEiX`FeP>r5%#!(kPJ>tyV4sx2P*+WfIs5J$!}3(}l4yp?Ua6ro=~hd+NI7s(ie>?!U}FGM zssM#25Qq5)yaq9ez*E?g%%I^sB<%xpcEZRaJQPqKnNBs-<|=|-4KU&owF<4WkfYLn zk>jqPefGD0>$iC85Qj@SVmg#J{}q7=juD^cyoSAsb+Oorz)1;4w!uU>IH&ESfXdLk z`R1GHgSd$?aBF_T;tMWCwyH=XZ0JZKP9?!P_k$xeD#W)AssM+Y+*_SpK#{B3kp?0( zm?CDz4^m8PJd`^K4AcgBfpKf$u&nYxb_wolA(YjHA^DI|TL*B*D{$miqFM$x6H=Rv9^S?-?pJMReFYoxT*Z0 zi3KLN&+OT@mt8Sk4jvaFm_|ZXs3*|4Hq25XlWzDwHMVtj``pg$SA!}Ih)@y1?{5|S z2Lh<*dlX6q+8QSDbaHCT%Qz7C93*b&&8Dcp5n}h71eDcYgZn`M|coOu=%$hTJT^sq-4zJy$bAnj0`_%+@^Od7~Ol6TMTwCanUDUddQ0XL4^yivAu5cIOnf7ljvyi+nMsL4unXE^MglQG@d4K_ zwsMCG%~i~8Y~?IE(lAKkVrTY@sOnK2$S)s&V%4xGBQVbi>9oH4Gq%9HTw8ZTI|RBT zyWQHL4=_Qjx_G4|Aq0Yhm<_n781fDsy1_;Q#~vp6vXYUqpzqj`E&{Nd!@c=I9?hTD%=2Z37ZOnH;LeaPD^Y z24z}BXg0MZ##X3HB!OhK~Gnu1Or_zeW=x z1WJu+Fjlr8CM3mK%&@KvMh(@(R3JdgmWUG=-rCepSh{|9Sb4x@nHmrD(EN1IMPh#>kYD-~V$yRITy@)K7Z!Fd04*XT6?n4&27nScu+$L& z6F>)~+E~{b+Nkkg{^ef|95}!&mBLokf{lqr(H$$D;V0fux<`*jgXDAD9+MPvv3iFk zWCTx1fjC~tn5$_}E$z#&sIkXw;6%|O{&{N06cd9Lfr%;DVDt(wfkE}q+pxfjf-jnn zuWDiEm?sPg%tAnfH3AhD4^&Zv7e=~DF}Sb(M0HxRU^a|n=q&-M&E>S-}=_K ze)*SwnFwZBk+Dch9_j#z_dD?;lAEi_ajg;M!pZmW{$EU|u*lFPA5(vX;3ci+zY=(t~VmVAZH%3L&h}aZE&wc!Ja9`qu za$~#!9ra|w>e)pseLhd8VZegAX0`&U>6kvp9YzRWj3)z!XKvZ;M2F(oQQCgFm#AQrpJO@~ZJ{>2Df%|G<_W+GlTH1n+NBn6j1l)=9zYpy}e;;54(uNMH zHIn0^(Eu)FHszs`^Xu7^qq`A8CJz3kOb6^-$fb3w)QD0U7T}TH(NN9pAjOE`rlXNh zX)k{|`VAu^u|T?%%d2eg5M&rfzUHWAo+d;0$Rhsmx8=v`HDgYkGhM-uEaM+S7H!@YZaE}qjU;`2 z)a8#25&SAXM8#ZbEaOT0(Pwjl0eM@BO{nct*0PzmG@*M0^7P)op)C2VO)(turJ)xL zGp%Ass~?25fYVEKdE%{npJ4<}Yi;)jRaj9L&~T#=$IxU4<>?b={k#P)n59cI3Dr1s zIEAhlh;9wQa3yV!4>5%p?u)SShBvyX>P@1Qi zTEskTZKs5@HFnambp^u(vatiWn4qo0_q*7+BFwloB?46ghufs+1;|s=JIjFM`%SdQ z%3n@rtjeTQHzr;^1&B+>6{@BE6}yDM=)z~pIXd8NZ+jcrNWrC{%^{ecR2B+dpp&h@ z%H+^pw{G!DI~$kTQtujdK4i&s6buoWg!ZRyyi%dF0=S7+kv&29H+hL$RDujw&>&9@ zx?;}o0&VSK?0)e3V;*7R)Q2`oCGn9^+y-eQZbk^5VWSyn4=m_@ZqG|xhI{rOo)J2Z zz29wDo%jdoqQCIr6d%%p_#wOLB(j^N@r9NKrSQu{a5Ja<-w~xCaOD>W_>Y>*6m_aO z_`eih*2sBnnU`C6*PiJwh(Z%)e3{-bZn~SCjjaL2f#*x#{N^|By6Y~8MlT@et1wfl z-$t1`bxsF>z&3-Myc8UzQOKQt#Ta57J#gRv)jj<1!?oj*Y%bDNL5V9<5E{2_!Z}H# zD058&f|DttE@6FpTG34}02Fx=hfqZ~uJ#&#DHVbb^9RWyBOMc;$TYv?$M0b0vR*Pd z@g|N4kytRTy7&SyqI^YSBse*bU?f?@4B+oZUb`NE1EIl$K`dur!lG#17h;b{Dab`Z zW_QZATRCvms%}63`OmY%5$8MHF#aQZ0dH7`Rd0U0bD2n&Lc!2^Z4HFVOHE)7kM6WSbMnNlHh1{$xx5 z2YF|4d%sw-ufv|$@NqcHAta&%@|7oZY2w{4{R&cHG!i(Gkgay87ryGLk}(b%PFo4k zbyLGgvQ$9v$r^3w%2-kGMr;S#Q)ADPq8R+tL^<#fCPz?c@@NlOPODDLEXd)uNheveRS(3dW*{ zA`Yw~6Cxtj>{ybATKrYHP1guQlPrL1{oS zqztqW95T{#o9csuog!-8IiwzGo0*G2uwH2957HfcAkzln%XFDYoW6s!usxBFs(H!* zZNmsd&b^gq?oN$O)@1;(Qe2emK8n>)&z>pb$B!TX_P4+N*0;X3bXp^CDhDG+v8MAZ z7e6HqL^KlL^`cX=EpCcU?dfqj^hrL07+A^XYE@d^lN2zV1TaU}n_pO9U;kIW@|Ei0 z$eJC@!OxF3S{k(zp3*d6YOve;90=$r8U1OsJ)LyXv%+rd4$+`IEDpr#rMkhJnVA580WBR7bv*V+k$X}RC zMJl&wRN0Y7jn`fGOd`lMNuYr#rBq~49yWLca^(Rs`{%6PzyfxD_7y+?azKs0ME3Il z77$3QhNj^`W=Jj=2Ql%s!#;>fb_k0)CDM}=Mo<%i9ux&>ooq_YsK>O4Qx_eVdG*G2+$bgZuENCItBemJSErq0zC3Y(`y1!xT(M8go{6sIYTo{3P_9z^THNOf8D;;u-MlZVix><2llp`RaLtgz}9q7;dz2|WZNv4-i3 zXccN#afv(>Lv<+V$zrD`qBlK1XezFk6tKs}`C>?X_0-VX3Wy<~OgwE4+R`7GAd_%3 zFVS!dZc>Rx3=1__CBtHb*dQTPN1ON-nTjZOpoHRzi zB_L<`AF;fa&K!Fa+C+`j-HMEOCse!_3mv7qza?n>wH0e(*=0H(Sk*;mXYi#n*6sP( zeDMO2P(bz;?zr^`!xu8B|5<3k_|7gat%G3hrNclQ>NiZPSfiu^>5!^j+#;je-gK08 z%0%xq?U%+?6RXvwj+#=EER>>(x>iNi{L)Pw5gIXHO@q&ENA5Sb`GQ%_O?0H$6lk;I z!l2s2%EGn3?<0L;rEtyEE{spHzO8b#34})XFn_XbhEoLY9$ffu4M3aAog+Kvw)Fyl zMjIk5S)Lyw+y$~Hkp~!Rj9&#mZ6S?2uAK=&6E-#7F@Ce3_9ZWb0%=SE zm~YaPQ7-gkuDoN#+m4@l;8ROyINOxh1A|MNP+D8N23;cNdg|!&9MXq|k@c+N^GpUs z=Q#XVs74r1Pvxl*E=fr?tpU9oF-*-lY_{VufotG21yMQjWOxxEhDEF&tI&?!6fje{ z7u7%u21HFXEm*a9avHDDKk5?bmxlfpbG&pS-$(w;XFhY^efP0Njugfl1|FIW$uxt! zwi>%8iHtbcjEJmySjbtLF`Zo&NKmtK^)?}(Km%4%7dTMY``-6HUgE}C3`0rjbu_p2 zHVugsVeSX^JjFvwS=$m)SHl}mt?iXVWhm5I&PMj8=1hdGBLqZ-X_Sh-YQaq%X2Amz zF-yWGhb+lkWFddX4S^MgA-%HH1rw47WQ5RhP}vS~c)uBUmIst%rsu@X&BIK3X?l8` zb3eXgwY>b<&wlorYp&tUO#~cr0h!rCSJ8QYi=nX#?E>}!PVbnmO;oi9oNgnNUoo(W z3@#AvzyE%=lCdkiFt=F2P$$UTy2JI_km7~XuJ90U$V7i*29~Er@oI4rCwAnj zv!gte(4gfjx)%%OdE_a8Mgwat_7GBOa#6x&#)Yl>*q0%|hv)3-vGZs35jm$&#o-jr z&Ch-AbG2F-MhBhBKuMQzq&P}WG?|O0vCXYWda;g-=>j355*1-mvVjqY1+s;4Jj`Z) z;h7;E71O3+NK(!_G3Hj*G!ZxABTGRuuKcR3?II}fHk|&Wxe6%-~V|D`h}9 zPPZ(Zv!AeF#7J;69i>T9SR&A2ck{VJOgxOp{JY-uE{I9(P_n!m#EYJF6*FZEUZ@xw zn`-DWMOUEMO0!7th!|UTBv2go70Vdx?zrP^4?g%H4o8UuRjVM(Qc!`XqQ#xaM&EQ_ zM4p|W2o75@qwrDpmQ0+#v{!||uLW6b2GJ_MP%zJ6a?ivRbjh4-4m!yh>S~|J2*6rV zW+O&q3b_ju1snT#kf#lY#8X>cDqHaB$~s_;W^**bH$=0+1~(=JVO%;Gx`-$QD5X^D zi*LR`!1uoQz3d0aY~U#r=1P0(*J@6W73o6UUWGRbxU`P5QsTdMwFXw27!eIeLrrDY zkS-DO!o-q|+wZuIlhavU!a%HL6iW(5u))oQK1nUr;10F6ly1L_Y@G7 zA=g%4L`gMB6YJ}!Bl=)-YRt^efA6u!arA%S10R5XzOKY89E{X< zv_MZy#%yh>gFMwZQbEVXo9a3gx8sBptu2wEfY^MYrA zPz;ltLZi)36s-6I5z=aUVp^b> z6cdSz_BbC%57WaSAV{9Tw14^Bj%=PlRx4I!c%UHp^EpqeqLeU?W&y7uWju;0uBpk- z0VO7cdB(@_5tG8Y7q@gzQmU(R+l5fhw*+L5g@c2{V@-wiA?`tv<%JCS8TLfooC!3q zO4J=yU^$d=-f~>xA0Rh?{Hb3L0l||Hk~BlIkqV(ESdlVJ%%eO3Xdx}$mA6&XH3S0S zp^k0Sfoy1SYm4(Mu61?v83Iuq!oS8Tl86Ca78h1rsl_-2(}7z}7yDD(wEpxsG#DSo zq%5#8t{m?I$Qt<#8)m$vrFnU1Vhdg3NP1z&LY>d(XattMiC5KK$)Eeb^&uwClaU(k z!=R0 zj?eSz@bRlBHEV&VF_>K&s4mwGq1U0Y=kpQY$Z+1exdza$Ut<x0RTjRlJ~*(BJy-Cvmqr`1#iWuFJ)->NBO5e!duJ&9W`Xv^*2WQgdLK0!59@ z+O?xmJ)?p|I_G%bMr$sb_Pg$i!7Akm&Rl!_G)1j%8<@`#jxT?q)shM=UV;9ZFuT)-U zTuToG&VB*%(dw&D@ofT1Vbt}5h-Zoao#^9#jwq0 z2!d%Nx-P~MbZYfBG6A^S?+mjfjsD)XA_nv(#5diPD&%+s%u3V;;kxUtyYIet{?%Xo z?6Jx>e^EpZNu zbbC>Y88mhq$Iiyj7e|$RaMk?E7A3D{JAihL4mQLPZ+w3HJizv`~dnZ6&gc}uaEUV7=z{_M~0 zx#u1Z4ImxzLL`iMNGOB|OcU!&!>T#fN+k}@+N7$oSf4Oa>xg7vi`7W*nSsAl&Dcgo zTwxQwe*BDo{nvlZ$2STqvn9Z2vU#eO??4md8-4a2I7ZWKom1LSjJN0Y-aza8TBZp$3P$|JhBpg^7JvYuNF|*GGWU7n3U?hbZCO*Z4F)yn)j8xkQG#k&v* zhj~iAzYtKp64r$Kck->Vq|Bt~^qrc%8p#Q)5rZq+tfd!VbwsuXID&o8Kr9<2Jxn%+1WN>L%_&LF}pn zd76N?!B)EW&AsgC6BH_r7{^ehQ!aKp_3aU?|lQfhHS7Q>~T$@sVXq|H}ejWGU?t!tuzR%VCOqQOkMD0H>0avdFq` z)cA`X61gaVJ_CLjM_G9dsi*}x@l&4|3E&!j@iqhTGCSd?0}aGu3m@u=TTYr7vs378 zL>HB6Dy77gW!=C0%fEd0yWfq&9fihNP;)9M3>*A`W(I-K#aDqUg=Zl&<4tukJYHG1 zktkAHZ?SzxQc@%=;E!!i{-Tcw8x#NfFJJ!b=O6sfzwjzSQle>9ox()C1!ake?PyPe)I3i`x8+-}nu-gmc;w8NgOC zXjBPYS-0q+|I_JCNPvJ zB5Rk^OkjLs<$&f(cG(K)BON`nxHCa^wlummUzk5E3;J8fKaH^+T4M zD+L5FlVFhjJO4`oSe@OU8BbE%^At+aBxIDVl0Ip;Du8IVh%Pdt(ZasJdTOsrM$}7Q z=j{dzfR6l1S(!%>u332i^u{qHv^xH03$~L8x@xjZ6Mij!P`<#WxO2_}`o*@187zlv z@XlH}6gx6vf*zgyY@g3;(4$SXu8ymHxbNzMo}k>y|wVsyuBbG^6{y7~6cKegobyp6KOA zPhkRNSbllk;5L>F|IQibV6!=$O3?!U#U3 z5Ofl@U+hcuG$v*z*iBzay8hQugPk~3QN<-^S8pOJD@s(THg|WI_BU8wT9IMyAx)xK zgZXA;VjOiXs)7#o1g@rcjVMvyET%nfv1rY?nZ6-e;UGjpK~s^}Z!R5T)x$`F-f`YI%j-o@uj~?NNE}xb zuls%Z%U}Ne-~auquDZ&~BIdmCK0iH^Zj8xO&{GqakfCn0S@~bTj$k&#UzhF#=^PLp zC_jra%MvJT0w0c<{=+}~Ly+O@3tsdfr9w?m$-^cWWRC_g!{-rDDAv>AoNiErl~BCR zSENp78PGYB#Ll#&>B0I7<+v67aWRNc@TYNE}kGitqX(b9!4mM;g zdtP9Ufz>WHI{TQK;0AVgFbt`A84Wj?x>p9P-~I064?OVk4}bW>9FtmRN-?py*Uy7q z39LKeYX(9`W~YsC3KH{%Kk%U}PYHFcQs-YOUXwi{SSo+?M}PFgAO7$QU--h(nKSX^ zq=fstvf~K!6<`y!5Hi?|b`YM1s(CK;l;Wa!(YfY(o2kYf?2z2?)U<~M2jKb?5ddAB z*3OT78gvZ-i^pKI-&rI1%my)RyT9cd1_uY4Y-~%6kjFm{gz~2#=?mMo?$W^o{Y(MBam||{{DW&m< z5=10V+8CBfs}_hcJ3TkZbiDzuGPwybcU0TZipJE1n2^S|5ll&nv$-Y4a;qgt*r9x@8e)Bhfla2hGoWN7^ps!$@dm^(aP^EZ+ z2=J&X>tsF}J4pz3{q3%GTiHji5`bSPa@BDCynQAp(mmB_1f}#=#9wV5Dym;A&@ql% z`(sfSfO=TkGjXfv=#?}w6KcTHpyZxwFr>W()Crh1K=2P`wz|sr^7XHO{h@~*`jbET zlX`ze%JYHQrKJlp%Sr?#O(Kn+dChFxSA(2L*!e5y`yi(a3ti|Qb@v_{0{ehdOuYG) zTYmLde-#zV(NzQ9MM^;C_=|T+Xccr{rmcw(rS(0Xc@Xq^sTS2vV^A(sb!4{)1GAtQ zqS7KF>K_SCqJvIC_cu3DL5%{|H5vl8bm=hJYLcy0ktf9KrxZ{b{EBW2z>Jc}N-YcV z6dD*JLvEE~Sn)OiPH_)7<{|R+%F>XYnZ;iQ}E1zxqs)CaCbO5({Q zNk-8M=vFNc&JVOTk294Au!_fA(x8Q5j3)(s^lIkrCtEw@1BP1+Ir)*=(P=C^VpKzG z7_6))!YM*emhZ0=hNDb!!9nIc-<^~+%gHKlCSD7K5rPw|l}d^~pS7*$`q#-HVr3~;^OHnR9w%Wpj&nM$j= z)6)xnbwjjb$GH~inYu~$_c#SYW}p^sND~qy_^qiX=!(AjFs_Um*-!`;X?Y<*Oql z4^>*bDgJ75PDPoY(;_g@g9s#AKv@S&8V-!Knh2z0Cl?=>X6r5{=7&D?A%-^yLIxr{ zC9-9VPmhk^od)3P2p}sdQH(^Ul^90srJ-3q;76OxyGevhuzeIL;lbCBN0@m#V=Chu zE(3fnY%2N8XFh}58W#*_8kEW&ES-gSlbl#oE&v(``!tAjNZB%LhO;(V`vKW&+9yi_ zg_QNT^lpkCl+uejZ*m}mIq;PN?dKAYcpKZ>m9|Xy>4jv=Fqo@+BK%5q@GH63?_`2O?HzHr%|UGIMX zFYny7hmKGa16YZff{oHK$u^!77?MFmOIM2MZ?|Hq4sSp$=B6_vc3b%1B7s^Em|`CY zz{(GP@PmgBAO85K{`t!NS6_GV#=Uzl_uxpj*+6(_Bc4vB3<6Mhc|nI8G*H@w(cPyx>YklfwwiD21uU3@eEcx>_F6#q`ERs?ACI}k|H2Lxs$3ZVqzed z_6t#pYb{+Y^CrE>7u5n2AX*Y?USXIGeEf2_(#4CTll*W_r4$uUVAZ|w!V8>^$^r>r zp25wLa_yot$9G0&q-#)<01Z;FVq91Ar4Hp(wDiUEha30vfu8-bVm{9pM%e}|CHM~I z^!jo&vw&Ak)d(g(eC<7Fn^K4;G|^)D(b5nCfF!g4V|B9<=$KF!1;o+=B(^7zzfcdl ztwjEWdhKDV4#d=%u8f17Kw@YzEXg;mvvaKtMy+K&k7m$z=K#Bwzdfy{*{>lB9kN}sGvO3mz|!j#Iv)aUMbK8=Fm{Y)*3b+R8+Uxi70G;Nra1WZ!$CH zZ>fE1rq?j% zw@}^Rgfxi19(WsclVc>S&qBm{Fg}i#GtuJ6*mVc4z3lSs-~86Y-}(4sobP$d^*7GW z&a-iPc6!#?KHE6iN#-UoWq=M&h+^SyBpeKU4-6MGyCZp3Dk!k5xMc_# zlOYcD463Dw!jT$oNcA|Ov@cw?Ls@dPG2Z!>jXDPEgIM68KcN;{m6tY{hEZGy_0uYV z?O-9IbQ3Y*ryyYfAcC$cNNvea9)2*#U8(4K4(Q-;A?ycU;o@7Vs3xBAh%2~og{LsI z$sk1=<&_hgVh2l_Vg?FaBO#=!^r5#+W`6IO6hb8rX@;T-X3@BfaA^V&lPIe!WFF+GXnu-t$R)rDvG2^^ah_Ss8{Bix4-c6zi|XdYZ8 zDOHO*5i>(iEk5p{D%1p~U@Hi)!`*bB%8NYZftZ9ZRfeIjs$t5G2V_RjNl=)!;?}Q9qJTAbc0iWg&t*E^f>5 z7Wy8|Jwif%FR?*>upI8c|9(CS`tgr{92I)#(19I0c9Ce?p}E5thJy>vlnobj04p*9 z&+)RRHaXA9(Y&+(4hFXU22m|W$wrSWy$o+`0#t_uB3v915+#Z+6oH>SNTpWBktnng z@sMo<#Y0+vCq4_FP*6=FYle%BV(5wMHWQ*&UP1MhZ8ccEf5QXN#cPh-(06Uz6eoeY$vHa_ zd}?*dt|0_YEG;5tZ=$-H+0!Rab&pF@EKzOPR7&ImJqrg$=O<241pZ~wSh|0G(rX8NO!|mTt83my45Luzevn>qK=hX5wRyf~;7C}Sw z7+0u@=bfo5w->9o>{Yp%9Ogri3QdKF4noLNS_8u_3Mu(G-gjn(2?gE%6)H9B&nU2E zk2d6mDmy;onmxm&@%{?-C47X5e0T%5emNOdc@L(1FT8Nds5j2{r65rBJXtG{)k(5r z+v^sJgQI*bd#0;1oT9e$qLx^H+HRU)vZ?nq=20Ru?sf}zdzeZ>_446WnU+e!>oCtN zwKn;Opj%dCi+a6APgJ}I(@^T~D`)hokJ8pAp4?XMAm4fxn`?$;D|g-ZEv#0Um?B7f zLeEO>`n>gsT9u7f`w0B@_$U8)9i`$zm7R-k<&nfgWQ!JE&QZ#Qw-)IW8!bnw>WDOw z6fUNSqhlYEvsl|fB$HBDxww`EFiK_%nX8AgIWn4FNX87Y$Y|AFeJPwlmki{rBBlap zcw+*0Noyu7xBP@z+3GsPoCg`XEqYF#N;>yix)`g7;SDR3J$IOr-0CjKRrZ}$~f?V96V$UR%(9WQn&o|BU^Z0idV7+PG?2WhQp--VBR!D<`L=~QGW z`z>Vk(`Qa0x(!^y5$Z@o3Tum1tdRvJR|!06lt}Lq0VDjh_9_q8*ai^KYEAFDRw(LZ zuhO}qDFLGGhIvwU3i!K)BIe-O82b(|J}?NFs6!17d0?9NmbbixNGZgcqSr7(PQFDV zmxeU}Iif)eq>%&@Lx`4iO-{1M*n)x#&>|^5Bp0Af;HpxXSOzYUK81HkIXnSZBb!o5 zu*=<2G1oIHz6fxy_{+J;l z+fb}TYGbI;^whwI>;j8SRik7VJFilnKgE#@dM3)FNWOW*(>xAwl}cf)$v|N0#V$N% z2e|BI>c;Zfr86(R^z!McV~fXMKK|m%Cyvg|PG57)b?j z1FeKTks7H$C9TOF{eTIbilJRgNjo>dIHaYF-E#L(Df^la9J=PoAN=qe-}>gt^6K`5 z`Mta6=f_WGH&B^uw!I6oRV7v&>{4lyQ7P%{2@JU;1x7$&dy4nY&A1Zf51Noj{b-Tq zNV$`pAKJTkWYspd`b)tiDUrU5ncFO8*2%zAAsa#{Quv8s8Fxvf60zz-3Md$uK!c?L zyQP+oEuJ`elBJQCkDNyGdFP#jS&{IP?Y5GBiGEmWknlr;92c>}cW2|ZibG?c<|T;W$D55j6!sCFX=4lP}rXEN_VtmVSm2--SMXJr+2SJ60&c`_*&f3A(=cz?6v+&54ABm=j zq0m55eP~=+2iy2H?6c22{o~WCv!_oSIrh?vXU?2rxH)j}nmg{eeSU5``N>;4swfFn zONa32+@OjPJCssG%vj{Gn$kZA4m8K76-L?$BQO|h6I|8Bs~T)nWK#F8cfIrFlP9qp zKK^oF$*1|W3`hbI!#m}8K)R&IK0W}Ze9ww%Chx+ok>k-D_v!%9b}j#@-1R{G2QIBjT0F-@i5RS8N89p^;La&Pd|O^ z^x0EqmX5!Cba8oQ>#oZo4TsW$}({@hiPGk|i@Bnl>E-1f3XjQgRb!fkfHRyER$DhjZb0 zj53PJM6QAjRhUSlP8Za zomt#}t$mOWQILlYSiP^vNJ_0=R>y1_wUH(0jI_3Bi3OE`)eqJ|mJvotJVUk7SR)8A zAZxu3ANhDw;89;!*u>MW%{&P8QUIV$Teb4Nl}N8C`>+UUw;jUVR9W2sJ{VGUhp8xt zN$#uqM0?cgIoFYgO?XFI*BTQkur39JGz&2$p}#3^d@LQ)-DX#IGyLRGT?2kq1giQn zC!sHHRn9w0Z0M_9<46~4jhWFYfeT-15UjCebA-U8esCe>sRpcRAZITts14 zH0-s9W~&nio2`4^8?xfQrx`!g@x}&6qHM9EClWCA*(<EOE0=0H<=tleT$qO-JV~}1ND5v@jokUP z5-QSZBw<#1X?|J+QPiQtxboBTF}$Y6TJVzH#22b5VkWc;gpmr!W=fd34Yx$vd*L@I$k|oR z-wsauJ51^Fr2ZmbU5CZ<=v7@xBV!EFnHk}@4U`6O$Mzj}zx~eDx8Ar?Z48GE)YJ+pOYb#iKM zg>!(%bBtPf@`9sNLk@F5*KbTLt*$-z&_gTV`T_XNoOy274Oi*;7Q&+d06+jqL_t)# z((%ln>Qj`lQz-xXN}$g;I_R^1|MyF)Y}p&XdAEk6)6q`v6~Z55O7(2&1v&Oyte!e`;`zgevHlsl z*zd-*JW%2j(*zi`X&3y7W0+2EUtrUiy1BDm5Q1K#b(|wZ$6-~Z4xNOKzoSg=;`}vR zV8KB&GIynyldMMu^zgY5n07)XyxSauI5q2GyGtv4m|J~<4n9! zXTJ2$AAfP``!kb+>9wP4ODEUYFPp|cUndvOeCqH1pOx|XjkT5Cvr8QMSbJ;S z7^s7@BaRg;PfX5WxGXIX{`H|R4K^N{ot|DkbM%($t{@vHZH1CV`B)Nwsp&LYm=Dpc zF0F7(*~;0~!%rMuo8CRQbMEGw4(-_Uj``VnTzn9^s$u+vhSe{i52S$0;l)KSw5u&z zOZQ>d&7CI)wURvIcaczTVrt|$O+a`mJ~J_Y>!Dlt4OTWz96R>%^FKIs^u%CsnfaVq z8B`#@vDQ0ybBwaQx(AC-o;mvEuYCFFSDs)$-?p`5>x)OOxbiZnfnchMun28!px?Qc5{ z*cM@*&yy}kGiAE2NX}uj)*#Zyb>Rle^p4pV&mN^Tb&(ZgUoP5e^8)oqey{{U==rP5Se|nl&L15lHdHl?o zfBfhFu{t)nZTndAHR-|ou+>l|x~)6Q?XI?mZYU#v}PoF%wb@wd9 z#(2mwh%UN}giJp<3ut+2W|AHKjJ+pLE`IWF|HtXcy=#M&%cqY_t)IGT>WaZKC+JSk zPVhDWBLc)i<-(!?T;U}FG|I~InJ<3v^GjcQjP#jP&+fbR5HlrIy5k!vXM9ziqduWJ zB$iPg?X>a2%g=oJ@Bgp0v6;1%#cTJ@f)Bep6&QxMjj=%%1EdE-2|Iw%bCWYC7neT% zPak8u=k(;_>hkI9c5_@W`8>68k{Pu;qhVIYY;KyNpPil?H0sO`pL%j*ZpXHr`|wHa z+O?~Eh|rA!#k=Yh6-W5-!b{bf5R%Ct$!dUs($%fvOsS_bs3w5t_DP=0$SU6T#}t~M zom*QOC?T#FW;{UA>g>kJjU~Rueg)Yy7>08vnqW7M2#Dy~*wVv~e)Efu{urZX?$pcsc3lf< zc2sxI&>mzGMr%)OY%tKfPabg3A9?BHAN}ZHde6jQY4^q=_9qp|i7u$!GJ69%B!=T- zQ94im_?h{|_4#c(dFkQSn{S$1*fKdi4>mxAe&mk_&*d(-pt~hYuu#4&5(_+iL|({{ zbJ+wGTGoZ+O1>8J*{J?RRH&=g+j#jk35I%r;nIg$U-V$ zX(O6UPcck{hwK>y@$tdN;K46HIQaHcD5aV8m#0=vyk*;g$+bydvyve0nAX-pmD6Qu zCrb>=XIG{treApenUDYd-$9efwHJ5H(lb4@ZeogE^}MX2tKII(<`NuF_7?w~Iy3mf z7av-l{oXXXZ7VFufAm?z#5Vp3)#?bhdQeH=MhCJ=l>5lqA4~x-#%F>p_VX1QT=ulOZV0F0Q5;#w5u_!RH8?BBmXZsHO8k3#I_rSB1&Gxq##%NwI*TU-?ms!#*c z(VQ(?ww*YBig_OfDifm%lo%$B4jPr(Dja5ct+5O8acpcEROvOrmIKY1gRya83f;Rl zSYPJFm6^HqwPo4^*W%NtRRApy{ZK5Ufic$sZ&t{oK?Jzphkw|C9f2 z7MIQWF9hwC@^@Ny6v{CF#WAluxsFyj8jlv*lu&vpNtM2dCY96*3~?C6?A6CNL4Nhc zev$J(It8lpb44wdi;63^)vk;qZ8-E5@0p1M*Izp}*fqZR-H8d0yT!rCJex^1o2w|| z)&nLDIt;mW_rcDcyWjD_kBrS-;VZC5zVq1Ezr3=tiq^>cq~=X2Ym1HT8DW^ITNW1X zyW#eoH{883Soz++{nKD&X^LYT=zQSSPZfeXquvaMtZ_Q&%3$B#z3+JM2gfGop8oz1 z7N7YyfhK?ycCc5D7-i(c7S3Ra&AMyXj=S!$tOHHiQsvrqlkKV($Hp_rAA5ANfxEgs|tlw$^8O9V6&ss)KhB5U^BQwG*#M zWnhyRfy9#=kH7HJOSCsrGwB1n^bzY-cbH@}#i>eUVcXWb?!0St&)xcEmS6nx=l_wa z^w?<@FipUqz?lIb8PBul%T_r9y!qyv_uugWlAnF#v&$!+<0R2pURRIP1HpimRuv(E zHN5u+%G6-aytAoUXE~=duxM;W z*E5Uw!6RQ=U3^)f`V-ybB4SQp;J!OGm;z1l;`7GJww*ifxcA<%=`C8>9JS3WpX6bB z$9$!v#qm`OFwOKB+-`cH7`<^p+3QZug=N%EUEv)FGJtPPO^;u;Z}(-FZ8`b;*x|>I z1rw)ZU4%AJMuaJ=zhZoS$M&7K9lCwX)pt(LER3Cf@mmi*Fj!ulnV+1QnnlcMxyKbI zGooS-mNR~L8w=@uzxKPVSs(r3!!P~l3DVs*lvzDeMUC&bL|kJSP5S`=B|i3*#w@7>q;MJKl2pp*!v&;n~CAIr03XB_8Iq!&k8R z#HNwSWyE%c4W~EUb=SgW&VB)mNm=}Axtr!{!cSsN@N8iBsB*q8*S@a-4YG4PYB9eq zRs3v$6wzoG3`NQO^vH||cGFs;;|}oq8W9gXAO~)^c5G$W`ibvujGYAs_U0+sveLh3 zg82T#t7MFgkoVsIp|P1O=n9L^e(UgK{~nyvn;fIs$WuavK(4GTvBpD1_rCMa9f$5* zUs-l>v8bvWGhdi%wrxN=|!0zLL@>5+3T8%q&z#-(|R(se$ZO5)H zTX%1nT-)`?(Qk6Hh+2fRC($zont&5&b!Jk-1MeYp#vROwNq7IJ506b>xwbl3dFhew zedi(OGORLAkMpVSRlHb*H!R88ZEMBs+`>J#+`8+gyH}SNzxSn&;f!8|w(5MRc}-K9 z7uE=Y8ln0Aef#fv&##P4Zh7k4Upsr^snF9*$tKhLV2dzs@{!p4e(6`{_Us*7KJwKs zezwU*%Li~x1)2tYaDe@K45z2|Uvu?M_uV@-!`5%tVk(3LWWE4j7z>d9AyvB~ulrwk z*Tx;iyMeEI>q>`ZIM}`8*0JTwjy(56ttk2CB^^*HhTJqS+k%zzh85);PRO~_qATG4hP(gm+JXBpDtmD(U{awE?K09;d zN8fts>Bkijq}waSVV)fyYmAPDwCFz=AKSfi*KOC`vg^QG)|b!z`zQYoia>KWlwdQ$ z_tJ0hpW>mT#^Ko5ZMVMV(CzPF_VN6aPaOU6cS(`m(m%PvzY-d&C3S5b%i-$VZryR^ z^}MNLpTl+<;a6a9y0}{+`@u;awJ|nj+I(|9cQ!vC-c?RI1q6hH)Wx$`UwjDY7qN~4 zP_Hyyxdw=}nA78$APmf85+MD{9Cig}!26KrnC_xb9jX=B- z(be37JajWB-pAwtT~cC2v>h0v@Q6gpq6}*Zv&vEz(2DYvfjk6jig;gA^(T{$_O9rn2&g)p@f_wzYndupp$KV%wqsT?2 zkhZE*$Su5`Ma{s3LW{f|DzhD-M~!`Zcog*ld}r)OukGP|QY(}y`AqGLV0mLni3dbEQ* zWrKAb)^uBbb2Ib!kJy<@6X<%$LS}x< zkN2r9hchVHNBBjvbIaMR3~y(b^UXK<#N1Y7j4lHxgcJ>A#G!;+KO4;`w#Ig0wy5zz z5apS{VwS|FfXqsFQ2%tp9B;kq-sMzu4y#yM8n}gRVuqEf4R*NNRdJ4DPE%lZVuSs* zS?6Oc1U8+Sl`Qu7&vJvZwONil5`%W22USx=%n-E^mI)qHh*3)R*#z-=gr0+=Z`ztz zJsAAaSG{Utk01H~ov!1@C8suwaG2*fHU!;%YJG(vd}eZd2DN~)(lImR4pAO?t`(|_ zO*E*Gj51!)^pJp|O8>Gz=jOLi>l9y-jNi&NX-|oXCifWVY0jn1&;Wx1Mao0d*|Ck; zEwfu?B4d?2MLcqq^~G3xmH99us&Z}3il~xfmzmIl7eDQdk)J6V5>Ck`Hu(JGDtw%o zb}1mCs0V|JN5HIfZtD@%TkOb4R|GX zKvig4IiO^36JD@P@kN4bK)tF!&dSU<1rc5u{;PucKYga23q=VyShinJm?C(8`?^Gc zsh;21z`e-V0*L$!SxMCd=)fSzu)-+Hl@waaXhj56323LohYx@4YhUA2=eT0&tu;{v zc>zSiUnr2n)Oxa4m9(^mM4@;^5es4*3E%Fyg@Nh!k@r&+V3_``UC zt;IAM2hj8^$2+XA`TiOmzq*>;UE#l0OrrDG`Hm5s}afX~2*StSFbDqR$%W252 zc~bs(-bAK)zOj}Mbn-bZu&@rm40o+sfZ-UkkV068RA@%e2Gva_%$UMPZ3AeaRDHGH zFPS9-B@rgV@PIng85}Aw!eXCNj0TfH26rRl0Ad!IEF==l(1l9x?pihm=H9o`=RYTB zU*xBW6bDBkRCUzVClV)Mk3uC#0EsXjXalW-Ht>s42?rcUOp5jthX?R&1BOZVUgKJY z-F#MM2BuIvK1(2{yfr8UHG9*e?qLJU1H(NVsi+FlVz zy%gsmGm$u7!6OLi+E5@sFm!`5m{aE%nmm0Y0vzN>taG@=t-jHE?@8|%AFgGM1_J0j z>d|QA%pZ=OO22?MS?;KoZRl2Ra?Z@2J#O-!DVYn%*;if!&>~#txD{Xn%}JwgJ(EZO z4&9KBP&4hkR&T#--QOOEa!^fR;Nde^88CIn%j_f{N=FPOc_O5&58N`~l8`Pw>@)eL z={en;@^;~d6xppx?}dEHbe)Hg2CdQspcVDH1pktPz+-EUCccjGsmUpAXfK4-?^bZ-xpIKECEqfJOs6>(F zPkM(h959o?rNrm*3JiV=BMR;2z!;iNWxb3tjy&pB?&wYQ5ELujWq`{Bu;S^w%!Yd| z0$m>5@tab*PHv4y)Ep|r(p9(ytOS~>>v)V>}{;b?@OoNhD8=%e=y=Q}iFk8!^83s<4i%=Gm9>dG>2l(Ub9HmhoK z1gN0!Ta{LERFYNtm+@AjvTy>gEKN6iLqnwvCL#wva~2duSf+v(o*2Qr*Fguh1RxxN zVAnN79`gDv4-_y;p{Zth!$WS#9QjWnZkq%+wj!jycPY<^fGTHJ00%sSYJ*K&m?Hu9 zk*(+8$dlJUin2oL5PEt}jj+@qA-UQDW2#wJUuXxUuQz-6o8cJY?nNG#-#Gk3>+|xE z<3jSYl;itqv%Kp~UobVaEXXDl9OzSq92r2y=-fBWT5Q1UH5}{V5eNQt5Zzd`9 zmBBJL>2ywG3|mNxw^&xDZ?bWU170@9x0Jo%QYAK@JL%Xi05sZ~>+pg#1bmn&g0hJW z{7pO1ONbjS0H>Y`QcoQ5s`LtTl-)znKSd0jWl92lu(up~kbR8^7m40R4-r=bH91u& z6^EryNX7?_=yb^IV0l>=DYa~j&AZY}PFLZ0%Y(U@>De0l5EPI4@D=Fv z8_s#7D0@L39xNqkjhYue{S(%xoG`ef&EiL$wXXM(6Fps|Rjc<^d z@0#9t#yFo<+9nQpZreo5$8+K$FgNX zAbgY~Js(f4eCwKPu9c4q?x7~YN5;F?g^xg^BIe!*27Ekt@L<*Z8c6YrB0qBqoaeOr znPc<|1l}wP&~K>1yPO(bmK?@9$?q$vGuk7Rx%O5 zfQ!$tUQ&*VNAGji-@@DsyGnpmqenLl6AdQI?By7+^JZfi(~=x9?g}!y6@YIp{>R=Y zf)3*4U!1qhF54mQ%rt?lLF%cJnfAE-0Yg_L^^9@K7zgC7@;zrJ2V>(4>?GoV9voEb z6FRlrP05sxy^U;~u_BzK0K}?f3kkL;eu)uF5z>jyNQ&qskjOZvMZS5YBprQy!6IGJ zOZy9`;vfS?0jI`u;PBzN%Od|%Ll zch|RsCd_v-;bd@Su*^oP1wWPR*__T)%-HfPdLHG$ z`Ex5+@N966U7u^?b;N`k+&M@wg^HT7W?-$u8nwt%=I_iOGFQ;}H>+FCd=@)a|? z!RoRy2q%@6*&J$ZVbK9gf+$p%*k2w|){;+)yb-=Nr)|}o*tuD;)OsDf2djdI30783?=hA9Uy)+{^ znC@eQiY51SH<&XRuXm+1mG)Jaub-D&FBR0!TmEyJ#x2@x)NfT8`i8%H{Cmhmid>Yf zM6SepnC%u>fbs4`WTKBc#f>n_u@CF(r_m`0b_5FE<8MWVaX2C@ybrGa!qI>-%tVR& zI+S%M=$4{8Aj_x>&evZdLoNF6Z5K$)v58`@dFg3Xq_LeGr8ZdOxYAiPP}4u9Bh*|F z1z~`uu%5pwY(SoxLC;O$T&5t;Gl+;rh6n~K2+Mkz0|x?w34w&b*%fmKTe2!A5Sxgt z)gcJw@laFOJS?La+2>K7+AC@zmo394Zg5^=P7GKdW&MGi9;L0(B-{%9%&l&flY}>i zgHyFg zJAHwg?7zc5oJ_L1wlv1OGYnoXG60=pG=Ub&#PfDz6yjDLXHz<+-PG|@9A7ig3;;0P zM6B`kuBxu?o|)dA-PqWjUnhG1js8bA=C|pX zjg5|t*@&H~Hd9Wfs$@!rq9jtJM9~u-c;4>ko9DfYiw8phd=DaLart^yF3#4E)7=cc3=C0z@47jMemXkCbcCc3vxFIeTgQN{6Kn%(FE-q~#^j~5xQeY{w zBpiduCi?to>tS>*_z1EGy6Tp$yF+qCk%n4!dAUQhl?h5vOTkDXm?re?kmopRcL?~Q z2l@bSlhCqz*2BLMJvX+IO-@c;zjkBfjZrtU9KOs$=cF@0s|h^L!>qNdM$`x#xW4$} z3lxi}0@1yqAOOhWTXHS|IiLkPyPr(y?OKIKmF4`p$+HQ z8qT^y!P^7uE|w$`Ky>hrnZR zPnRl{oLMITVbK?UY@7WFARF=;9@p2?BO{~h>+*NxPjei8Rv=(w>a)_?<94QZ(WtJP7H3ei$oi~@G8cZF>EoICWScSN}`-5-6WMf2cyUMP*0jr zAw@w3sp0Gb6C{(Z!I&UVPF_%6LxdMbg?hWp zx`$#V)(~m&+TpDNMJg^mO;wPHiabs0yknhVCB?mFydvwov99Y z72-Q+tKNe4{9)8*2)D9DdIGs?01c8ImM)RDItpNOtyY{S&wMH$qA=e;-$aiN0@R0z zBT_v=#R<|p&~cdd;uWTZR|)6=-J2X0lL0_Tk0S+|fd(A`U4eIs53|^g+PJQwp)PcH zf*f;1Aj*ZpL6d5x)W{ng7>FH*3KbS;fZBk{o(DCRXYE0T-lSr4EJZWD`fE}!W!Lvm ziS882#GcC0f5B)pMWjJ|yCAZ)4|Y|L_?s)ET{o+U(zVkzn{BGb@Yodjls|f3 zWbI@+;m`{s*m5-`+wyK74<6St5OL=+ZcNGb!X0JPZDq)AS2d6;<&yZt5m7-=9o*H9RV;kwrx5I$galL7DU&>5b0H;gL%nRieZ}KR z;WU^43*5OA?OjGUe!=6h{$*#3Z69$;K;_^+U}dy`PfC1%R9L2};a`WXGO~PO%Z!?= zODxMOa*wVp;*e16$%JMNc>3J>1y2SJ6a>E_nJS{m+)PU19dCI@o20$c zEWMF601vT)1b9U`Xj(aV#D1~{c}JQNET}4@}7JQAS^*^ywE^Pgs=gaD9O zSub>5Oyjz{+^{Etk~S7aqXtG0$XHg6I2V&7F!V`vNSUrW*Ky>zNL+%`;qhkxkPQ)H zC43Pd=%}myD6|=}oUW4QiO|*gno#4l%ry@!75t~()|UQ(DvWZhR6Za2F;bzOg#B-! z@p|PAd4@DZy5(3tTi@%KuNYggB>+BH5QvLnB@pA>NR8n9G#HB_DGTWfoQa@|)iP$p z;Juhs0*CQRR= zLhY4k>pQ~>Uu4mSk*!HKpX@ErumDTTD8(>i+)QyCIv@o600*#a1oQ+y(X6{H;5{C& zhMkjBFNAH3ePUwb^Upt*#-AC-cX81Sbj0{2V7>-tFqmL@`r(Hkq8fhbrI#>16&rV5 zj|7k_&>*qJz@-5dGBdA;6zdh=&>+P{Bwq?!d+xdCIKrJTee12aC>DJspf(^-Amv*X zmg_)Zp#4@gT3UQPQ&&E}&?ZK@yz_8+(KkPz@jN&?<=U6IIt5|o{EE|FPLh<$D$9=Q%i!!yD?lPj6k zI57-K4)G9?%>5+QqkEGVS!o!h9WLfs@JE=_p&!h>By5^E*_c;jz z(5Lh3nMaVTgnW%r3DD4HTTP0wi{x&BEhUSkK&4oF!cI^{ zvC4;+S#bvUW6gvv1Cguui`7FFqF(i6s09T#0l#>vp2DxuceHK@*JHc7dJ5f3Dj`RG zdg3xNhU_iO!Ui!mDu{i+Ttc;c5nU%-OI%Lrl}qH!kAE7cxX_xwhM-WYXPrX~5JidW zkJSL@Q%7LG8re{YYw?C696LkBLKt<7E0^MMw5rh#_DFiuiPXv7P{Z`Nv$NB+x9vCo zxMR4(-vivnt;NwcVL=x{g%p5%0w2ZqyP-(xBY_A{=#dzpVBrb_ZC1I&sl=TzCDz%h z5(6}5@d6t|1B2nk@d=>=h)ShIVPKyzMX~|2BbWvt5+YPHkk?qmqu>^b!fo6#6+;ab ziF;93oud+JPzcR%hy-cyQ>~Za&{||hvtW#noRrsK-UUR2Zgqf>t6G3!v2`s}a?(G# zqV3E5bWYfMmPLX@BnK2mhD|}Qy;l{kW!;b()sest;I-~huNnZ;%MjmU=KED2tDgoC zpsLT8T!w}bBrv|vE9pw=lF&ZA5ah5RgGsSAeuXfAW$}@>>@1^u)}g0v2cXrVI9XO3 zU%(Q5obu@qvH>+bLk@1J<5AKAHs+vFT{{h$cy34Tl-# zwxX<#IY+#Xvl*%+$jfG*mg-w2Gk7V159|j6`~XKq0td+&694EOQqfAn7yn^Lkv0`^ zsYAJK7jy*(n;5O&@;4~g?OK@w)LP%8LMe}&q$f?2*r9xsFe8w?Czmj&?CQDnOVFgS z1Vy@-W4UUniG`Y>z%E3AVc3Nbhd_v^DQ_pLUM)}$6&2beNaGxCv}XEl>JTY$`M%vf zZ@IBrV|qpaYhSnJ^-d3vr#Xj)|?#PJy+dHbu87o+hzE;}c)4k2K*G6}PVf zfrPSe&)6?@trYZ}0Vv2frd~t>YR%ceu(Jnp^Z+)Q2566}<|Beo z14@$Moe+;kF&f9m1s7mqPNJDR4nt6DilSUH<)bz(J(3FNAd+d^IArmBks|<+6Ik1< zh)|`>_mxXbqA>olJ$uocg>6-7pr~LNp+98)hvT9hH+C(MNE^oX%9lRSO*qyzqyu*Xpf~ zDkxm)64%?Qf}R2fy^?Z}V;i;C8{z)&b@m7lr~=Lg-YsTF z*!uGxG%P^-Xaj1Yx`P(-RHgTSjx`JlVdh{49qx107d#*zqVW`Bh%m0@l}4GyskW9W zJ%uzwU{!m`S=lPVAxJ}vA{&<}j@@oz$b;AF5^~Ri4LfS9xgklof(L6|8r7tK6zh?GWV0@HC?i7Q+D)dMS8g)%%DWn2O-}hN5j-Ob{ac;Fjd9GP zh-~yn`Az~d^csdVr^Iqa=?h`qy1Qo;3&rtkRG?_Xn%&`Alxbm?Ws`mRBkk3p?-a#{ z83w&>MI?aVY#2NoAQ~t+tR#9^*H{Nu+hRoWSS5!LB5ZxERKJcT z3R!|0RE-gNob`K$3pPxUPnub66GXh1gzqq`3}6|UhB-LYt@rCZNudvGeHjUhaGVfH zZW)5)3V1xmZ;aN$TyJyf?6@gaDKVRJUWzz<$8Oc1~Bkj6$~!Uj5$@s{zCJFiG@G2E$(^k|>a zL1V3q%`&ow6$?x#f$%{;*RtQdc@w|DdE*f!7XM-?`#F$h`(&W%NNrb53q-w))B!2673I{mUTsDttA8Y=zbhq-H(E}JmT7?}4n4k(85_{o`MFwQgrW1kwQpHqV*MdmCqm)OaOC<)Xs%qB^X zfJjQ>5n<&9uojM?!bBvM=8H#KhJ6I;D>rZ84U-DYi7|{JQbH%vb2Nc;i%{zn(-NZ@ zASpo>4KifKwxSk{#@WBB?7LqEK;9c*88U62&@@=W#CI>_kZ;V4(K6Ev8}ORSB9co! z`7pte!6{~6oCw53S2)-HKuztiFl9m*1FmvOEEf=LM*>P9N9I@HD##Qr#C$;WUFE21T+ufGS>-3e0!pERe9%7Xwi2NaQIR8jyccl%9wvntO;6q!WTX zFgLOZDHVfy?ODW&E|$lESh+~KsDH~brV5(0hnaOUWIA7X@@5wa#yP^?ZJ3p7UCtk5 zrpG*0_U)Sda6wg#`JQOrezZZtjfqEqk>zn4;hG@XrbnbAHXeH9QE!IRL)A%(Z*!ZB zc*TW8C*z$UUVys* z3RJ}~a4Q0N0edql#2HDT3O}JQW8=W*(08Ow6VDI>kxrZ+C(WZzBOaS}=6IU<@)mT# zU0iqbFam{pVSr`|7rxi2fh0(IZ=XD1NTEgo+<~oU6J*Ws-=Dm?BP+qa#j4|@fF5LN zhB>_QItU5sWO=}rQ3w~AjNtdqEiN4l;}?ewAW|f;=%mgpE5vX#iCzy5h9*uefi;Cu zPq?u_J)JrnmjmsPqR53;C?d%(CpD%hiNq2dtjZ{&@*R6;=?Z-l#Zj+*2;ZRCUeRur zuo#=@J`_L}R8%-Orc%))X(KGs(6ij4qGMT_ zi!9^O1e@R)1#z_M_OHlk01#E#?Bz*)|u*MjNk>-^(Hl+uE7lrAUZjLiEt`EQ^|1nq&HMDB&8cilVU&oZ8}c zSjXDgO0(9EGz764PFYR@krWZaPjmp2aOXlzX=tTDFaCg%RNd2&3V+CqR7&YMF|QLd z3s4NZe4cVOa0xw#-VAFL=o$6b3s(iLxvaC>-AaTxgF#~8FpjpJ`vux3xk|#hyI>>w zI_~VL1MZNwSQ~;CXaYOD1-CW8f~MFQCSQ%}UmOn;aAbS979PAqBv5aXl`uGkS>kdU z#tSz_L?i}SAwVKAsyr%DKXt1(&_^VOO)<8=1fe(419tsDQ`8cI&qE1BB5AUajh%rc zLOL^NOTq&v6h}Wez=kXGWi+xD;jm>y9geHOSUrd=fh=fPbASK_V8kv54S>OuSN7wb!|1m2^miCa!8Uitss2LQE) zAOj;ukl|p#i}axF#k3G66wal+C7}rHlwI~HI@Kg6tWqOQ;%a%-Rwh*w;B)K_1ww~+ z@812xAO3Lh;>DL=ewhR`hC2;K zrx+AtR!m+>piB~T_mVXQ>@BjTRl%dx&Y;X};`;UL=gyrY#&TKhO(nGUkp|FmE)=YI z>Sr!Pm&9n-Gbkp3wU#ukt=bBte6Lu8+m3kT6=3lo!y}hvB(NQi6i#A*Y_<3y>1DL+ zBr`ldeE9G;zxfU6jS^^>M+ROCy#Q}1XGc7OO7!gW^YdP-{NL!6a2$C0AyDs!Pk%#? zTi6Z=&~IS}%qkITVH3tmcBu#%n%%YNb(z6&s09}Np{i0kB39%jF`( z(NEnfomsJjW)|2H{H6JjW)MlA6No~QxDHhj8GsRa6s%TpkEl79vw@`?=Icxb$R-qk zeI1;L$2wYo%e#sjlS-*nLgXqjMk)Z_vU40-5(VbVaa0Cu6h*CwkA-3h?E||Yiv>0l zb?A_KZ+@^jGz$(k{@aIe_b>klu2j@gvry&cR5(8c4#XD=YzPG-B*wm)A7>2$bDc0L zryTT_m_P(6&?_`mQ~yF{<5w=^6A@;K+7|3)lk_i67?%>ckjvAVxnvk74Yj7$QWZ0) zE>&VIFK{S1Mw3OlQW-Nl`f^Max#fxqYftFQL~VRHm!|1Llk{<1h%E7B3XMZSCT%Da zC|p{IJuY&st*x1UQ!Jt(M(>g3K(A;jmzRaGP4+x4Y60)jOZhYh?27;$%_dBM)-pPH z*2GWPct12eg7-%@l`poPXQzKSkw`)hFlQ(e(qh4~fEp341nopW;kY-wM&pU}HLL@n zy$i8UKn&^m5c@PTt4A1ZL(g;>Sx#z`G~`M=7dH1a1A92?qAW26H#dM}ajAnNSGWz1 z5M&oDg!5|qMW2%hR_fK_d{107o~*IOCw6CIsk0d~v=2Kr4R2FpTS7b+KutTg9yX_l}` z`9d<5#Cw506wsiXg@3-|GPm z)#Qhe0AJ$=Ni$rbqm-N>eyBC+a18WPlR*+z<9LzQp(!4gMOCyMHVPePpg?vfh#sV& zmg1PTGCJ>h~ zGuH9A9KB0dS}z~q1%f(-49H7AD2k^KI!!vLal(O)B<0XK**dWS1z^)P>4PwA(82VZ z#F7{p8p@SKERGDxiMilh(g-eaV+`o9eWNAy4wyDQlD9&&>$U2`v=Vi)=ImNll$ugY z#o8;TZAs-ILtB+&(6n=zX6YOX(GD*A;S$Idr5q!p8iww%;0{K>_KN<+{%h8*om4_Q z&m+Ccm3U~JtVtH{cOIm4m}Q=AN3=)X+`gq;;?%)nHV301P%R_#i`}?o05C{@$DWQI2?a``{cK>6oP8k*cYMF!89!(e%CY$}3}IV?6R|=TaP}vMk~QbbEJM4388`1OZ~az>Y=Hk~iLX z1CEELm;_|xvWz>Xw=_&QG%hq0k(LFkWCWO@p&^>YMH74*|L^ZKOX-&U5UT zn8viAx%Euo;QgsoS1w=TwE0+qVNbqFvFku~Omg--Mz>|ovdnR)JKoGBc{9sEKULv? zpd_>LjwJdftZK4M%|lowFoTeB82X_?wUl9AMG?%k-2{bW!L@Uio#t#zMSRDqRp;CX zD=}xuAoXIUoJuB`mor04#+j2K1s9pUV^UelSCA3NVTuq`` zE>sHHe7w8U<*IdebThGMx`G))3XzkRU~0?6Qo7_C-UV^ki`o(MNvW+KcNtVkg^CL6 z!p9|V&3h^}OUB0*hv^n37Nyn&LJ=>sx-1#56w-Jk+Nlt)t>K&XaB_*DZy z3`xFJ$TQ!L1;a7Ut3ega9#cdMtW5Dyfi6b#JZ+;_;;eJ_cIFNTC&$NWOdJOFggs=&y%@L5ux@2SHBHrST z41=`CY=W#|)&S9xX2{HgnSx>0BO~Z|G9k@+Fu;_o0IgSWBpoRf^CV;TMKU~rrxJ8X z`7}juB_aoZwJL*jF}nf0fPyULaK{0u<1`Q>NBlrkC@dqxVCkTNBKt*=;iRUMITfnj zN-+{FcO=5a0!u_iO}0T|tq^xc&D4&Bq3JruR8k|VWw0jGtP&b*Fot#rbami}6z?uL zdZEh`j{=%_XRJq!%ruJ?_KBm#W17eDnAjLH`KuN=;wBbb+t`5Zpmk2BkcOFyLVvrl zD?t&YGudN>oPa^Kg?F!;du4($TdHY#)%574e`*8PU=L_Kq6#!^yS14wwKPVh2l^1p zYZ+k_)Z$gNb2-?Eg`=rx3Z@-{Fl*IBB$7aEWz#?{l*9ohq@6EfnMQyCM7xzCA@vkd zu$GTdBF5N|o-fym=%W~y=>4b{={W4Q7nQ=6(B_~#sG+f9;WLDLRZgJ6*dEI|T&lVe zgmn(s2O7p;Cw{jnt(4Wck3b`c5yH;z-L@bwCXH3*Fc5u7| zOc$lGtT;=>Y^9uo?2}TSsY}vtOQjr+S#j_R!=i`CWxjP`XG!V0vw;e#-hr?&?>z#p zWds0wCtx#Si&4uqIVulsMF+NVsCkFnO$MK&<6+(t?oy-dC<-RhVK)E)b5RgRTTq=)RGySnb}&h2+5T@g&>? z={|BDItd(dbv?}>Pp@)T^bTpRIM+|XmKeI&b4XfUI$DBd8*Rt}251%;z&XFH2mvD| zv51(@7UB_9_Hc2OyTOiR3I)AZBB2Fmbc!$Pay~~t%59{R2$T3YU=cUVxDSRQc6vt@ z9Xvt^c%cw%$>|?54#4CXq0WgMbP>FU;rm&vNuolL<9DPM6n&LaC^BWUUSMIFArESy zP*x}t@lc_d5#IS!)cwRVVAY6+*3f6O&PvA@pJCWx{+lb%9#(U-;YsLV*;Pvi4v&Cq z3(zG$5CJnmo?-I**a`NQ+=qHF3h~z4tPG2+^VJ&65O+>2mQ`YjaITO|q&V0U{VH1~ z^a^DTYGDFAmdbe?NU*el)Q4Rp3Ro%A%fX(`9+?%_@|E&>sFu~zGpoIs9P0!ef-4ap z#>JvOAcSctrB?D#vKr)BA3(@waUe{uDbeqQ5jrnOK|Y~v(&vg-OG)i=RaxYZ1IrR- z!mRoiQ&D&W80KIuWS?BIgzN{P;n7K&ia^538jBPtk=ZOIsROx1D@?)<3`IwtfY4|t z$vKU#u2Llr)w(XPNsxB|I)ffcMn-clexCvQOC9VkgAH?g=e0C&Z1xbqNcT>B{B}PO zFf1VZ9ldD*I{we$qf@6zHf`bWYU_dAWR! zNUTBMdFLI>d-1M|{tlwrTRmq4Z$(ixMR_syMfS)mFR;CZ@H)N@4I{ngPk;Iom14z3 zW45QX7=Qb%7V9{uVu9QDzV|&!fPe5KW{+}qEB_Gj1_2sFUUwHBPn|q{^|LQn6pP2? zz=UI#XpAY24^hSTEL7yaRH!7tz(kAkAM7Q`pzexd_orZ z(jMtaK;a(ftQHCZM4x{8DR}fMg*-Iu9$(KO1a|Gl&)~NlH~C-?@H#&|nW+Lwu^rek z5~aJMY>5p9`}DiX{RW57=l@yiZ6J3!$`g-lM+U?IzDZzNJT8zV9d)}cMan$lpRE)wMPkC2!rvIQ*$G{27@C*Lzs)A z{y@jVln;e3Q+X+2B#=u5tTo44ZFM8t-P5OOTO{1s+qbr!Q8ZIVH;t!WT>zTtHHD&H zu2wcO>AwB}H|4^D4&{6mT_MAa)QWD>g))Icko-Z1jy}<>HK>j71A|gYghQQOJy0*J z77VmNp2VzbFn7X45B#}FB^!S`*D}S9&OYj9Ug8IlA7138J6OF`VN(Ul^rvrA*i7@N ziwhvxohdJ2S|}51>Dg=1u5Rqxm@hL!Mk}Tr)fhS{AR4DT0`D9oUa735GszPp_14w` zc!|p9sQ)RLz>n|FQ>1MzNA>yLbz!$$wsY{CS<8UsYD+6*GBC<>>OKIZ5i*ncR{ z)6U z7+hUl^&Y)esVo}VXm-!Q&{{?Z{Q-1=NcK2&JsOZH6j6~#m9xCMGC1r$7PiKa-pR6b z-l-`y1TZ*Z>LUf9G;t+V%D|3#C^VBUq6!us@6c1>g>%U&^R{5vM}2+Q1FuysG?BImkJmrGbUxSIjNPQ6A25U z{^1eGk}(HCB95tJrRcWgFjPTYVN4)tKqMH;tmXRq2F2B|NZ#e*>!^#D5Wvx3&}#@7 zL||+1FfUpG`U7{Ah_Gk`xxS$hEJ^t&L?gDYup~*BVqrt7LYCDbV>Xna@N@n#r7}fh z=N8Wx%^3-RC)!>x2w&Frphzg?mKpfuB5RcS;=teughL3L+PXSZ`CN`F5KHKK7=ZJ` zM4u4~EK{FU^W{n=Uruy(sjg6{cW`K7W!1@ttOICS(o9J~m*f-rdz{TE;8JXBXC{DmfT>@uxP5|+bNI$_j;Ga`D0(Ju@&g zM8#B?Ix(Kf$&4Hm0PqgmWye@Wo0ft}=SdkUOzU%{^7=+L+1n>Lkfx(^V*@=hi*`~q z(}|poBY1Wze^7wDRI7tX6i51~)VhX8SW=ToG|PCbjytyxh&Qq$k49Nw$A*FJ0^Amt zRwCU!hAQ3(PR>U**x8OCq<7{8YL&aTCuqFc@)xi`%v+*%d9VVZG_n}JKq6$bOY-CB zgF%3S8$`%RpMU;&9J4~Gw1ig-JoHUELaUlX6?Rb|0q)cSEwQ|T zn_%pSk@l&PdaXz7RJb@52mv;l7e9$|ZF`~k{@_{n16 zvR71xju@96e*OO5<@n2d@-PiB405*?;o8!x{2B2vMZUw%nrxSEoAk9**8baa%}ZW35xO6CK-jK_gz z2m-An_h;y{kDCevjAa-okWpYvgpt4Nz^V6cJuy$vgs&3p8Xd2O;|r_lY=L9nl9gZ- zyBRiZYWk!-nqe%jaP@fgtFOjq=9yp19s&E!O6b)1^!-Ne?SOahRotD*I7}S% zQWxqlgp$YS?;hyt8@zt=Mk0ZCW|WCKx;=~Tzbf?>pNvO=Xe{*J2Y;NNnM3_3EIBKn z>FMd)-`rvkCXzkot5|TkW8AUqKoN|H5GMPhhYJg9s3ymprl#6?avUmoxVS2#A|(D$ zxXfY#bA3!j85TA7jMSpB_dmWeK0SvhubEq_Yvk1Fo44ZD>3X?R zkr`b!os)i8f@~Z+?_9R@^{s`zi8D?`Pv1?BPt4r><_8o4!it6KdkCdN5%z1P&Bt40#DLco5;jeGsOXExc<)dwK z?(Rx<_g}wtPii*#z6|NaJRFZ;uoS~}IIB+)l7D^c{^ax-6c*B93my$-wG9P8522im zoTlGwxqz}AM}{wu2qPuTWohibt$|N(gbBfR=PPI^n%ov@9{r%BSQ~eC1&3Aw%KZ}) ztTwEytkEis11J#~jaUxJWw7*=NDu3-1+IN{bBwV-y&L9Ej|q>CO@4dt0h%Crvt^;M zzLIHU5Hzl_;ZGlbe)80GAlfBn5(u1_I=i%%&7?WLl4)iNr52kXWGaUq0%cb)gkCV6 zt6aZ%`;^8Vt<8kH2O`Pt`wJ_fDEvnnDYTNThdAAglr$)Q(KNmH;g!=2i>Or4f`o&o zCMTEIGb`&k+_PgniB82SvSWo(Ae}BolO2q(>)F!vukW0iVlb5^Oh54kyJ)Q%Y&fzg z#vMRp6SjJ)w*8?`Q8gGurQ)l^-z81F#`4w?X00*ds*?+=17r=2`MLYqn za`nrv0DVA$zo*V$kOGL$WK2qQOwOLY@%3F)Rcu5GN94GM(JB;4bl^FPApwu-rRt|w zzUUhqWmmfJ9!(CNp1r@gzP6r?a4aG8!7?)@hZsp}%TSpykKbA@fA!0+CNEx6mRclv zaxBr)`_-+x3{Os_EL9-uaBQ3xZ;Z!LBIBv_qbpy|oqdi)IS|3DZes4t!tx5cQ6WW^ ztRivjfN~fuLM9AWsH0iETUcEF>g#WY#>TYpR(<>ct(6IPc?`q{+jay)iDZWx`c=3V zB`g>Izi~a1>I@`0C6Y;<8iCO*t*&JXI8e!j;^9m&pQG0?A~XJD z433ZZYM6DJYd3F?PRz+sJGm;M$(i$C-M9rVSWu8zNxq1WI5h7Y8*C(EhnSqUzyJP+ zLnp=q3Ct1Dt&^;o+M*4=3QgkpvuE!h;}!xVHs~*5eFr zEPa_p|A9~jQv>1D| zPGQv%v)jdxMIn4CmHyxdKfvJt#O#%}Js0U|7{zk&mXySB=HC7L_tCJ!V3}?Z$pz=$ zMZFFe79(!$1s)lKDU;8kNldFblMp;23*y31+oM~3Q46>O4typ6w0hfz$Gw{m`hdBi z1}`G0$Z>q(5%BiBtiHo@9U8ab5!cw4FJGo8zRu_nL0acp9beBN1j2v&w}1N#Hv72A zPYMA#F#L#8;0W|&cmZJ~6*MoVEOU~jYGI}TcNBxv6g_fcc<}9aev8{+Miv-pp6y$> zi;bnym@@>!2$T07EWP^rPm?34jdTnhHVdezx_Y};78mZ^xff5xnRT=Emd(UyC0X;u zH83U~3rp!_=fHD6{Bxvm&LUCsV)4@_PXGR`e~!jdm{%1`97t7VwubnQI9n`LbA|Hl z2TMQxmwz7ZKdD23A|2?H`}?~;zjB4yniK_$+$Cl*nwPR~ok=BA@`dH}#LU^z3oqi~ zfI1LW@j@^*I{e$;{x+FRC6XyzpXvx26jkik2}ZKT^2$d3!Nb)*`}4mF^e|7*ZqOYs z52OH=F%m%&)#{EZTOLV`=!iigzNieXyH-&v42_)l^rH{4(&A&8Dj@44J!{4-m2fSc zDLhzQn?C!(iFrVTgpEKi1D{A|sw*6Q@4XK?yL+)1XEKdm17kfVcxY%=Gr84uyfRkb z{Hy;J!JWA_hsiK4IeBXA?O*+44EG?+7MMM2PR{Vp=FtE%4K%sm+*`W*(rf+WXLY0u zl0l`?dt!L`{{4Fo9&u_}Bo5vyc+;aOw)jE-E9o32=N(-G&%OEQ91sl>rDv>`M@NU> zed}F_3a9c%5*)K$K*5L*Ec-={L8gK2m8Dm z@|x55?}Qb;nC)*DE?ltH8Xk8_b@({1zgrRZNUtZrG0jA_yvD<~b>BIgK1zgFt+M-! z^Q;MbU*rY$U2r|2SH8_^m>4kna18fu1!`>wIt+zJhlbyM`)yQp;SgtAg-}pPZy3YM zFAEv3tYz;#Tz~zIKTD0yBAT*95n&v`40&+q!L8d1j~*oxc)i3b4Q7R_8!ZA#2UM&r zEU%?{hAzDEbMYTeCxbUdqZ21j{^r-e!jc#MgA9xsP&y{b&!K!Zz-F4;_ZNQhvwsur z8)F~~L~)rN8X4~Y@Q)wnO0`%l#gK~Ssg^p5)gpV^=(Z=SrMEVJ^4@n{Lk`U zs;|JsGafeLu#`NjQzf=OHqedU!_uA>^{*xcq|v$z@o(`I0+`G z6&5dl_Lu)duwx*=nO$(mQfYW>_`?t0M>&O_3+!k)vlK-U1=mBloXr*Q-CsB}|NQWo z=L5kMD{L}Gk0wXDyWW21op=&?1_^`DigGVHcN*zzp8YH7Y~{_L{zaf?NNt0_90moD zZI^WcFo=@TFt}T%*p~J15v(AxyXWSOn`2`Wa*7SsAY6w%a43DN21v0^#K2@K*%c3c z{K;pSPIGEmx=@ODbZu~~MKqR|IdEWUEtkpHe*9N|jSK)|XR^=Y487yT=*T<2{T+zY zQn5P|m3}5`(`p2#4w*vb*1d(xFa97kIadp#U1iC?813wcmdhV~{Bf$ID-um0i_tDA zA)xzUIGxTftx|vOjeqsOY0$wO0WJ}aj-NdB>wo@71RKT>)XDTGs%Pf_neh>w$rc_h zXXv@XUKSN}tOYV`vbVRK&fmECE#q1!*^w`>3W~ohj$ex+8s#godk+?(@zg6n{Yw#8 zC=Niti%~U@0rUcnqj81Z*o}KlRJVxJHs3xxdLEaP>co{op(|Ie;7OB!mxFlM9!LM! zY9PP|1D(CS%L@-~BZntDS!F@Cko|oHS&seGxJR$txxX0i75{uOKr=|E70Vq=lh zqkX^m)qhKM^&sZ6F$-^SREq^~hSl71U}LSYu#$fLCqE4jun#FI(~xkyySH!Y?(MbJ z6%3Cvg(5q};I}ZwSR%DiC}awyM~iI!7(4sMzobL4kqKWt!H%xJc>Iqayq`#Qpb=)7 zAEqsjCgt)*CWjkvr1xw-@RPs#pS7wd5k(%U`^Qdyd;P05%#cw-AiZO)j_AM&1wK-0 zQPkTDi>wy+&c5<`Ac!9cd~RT9qS=c&ySo~&5;pB($U}NJC-1y9I4j4V7d;mNV0?U> zRF6$;X-v{1F^g^J`Z2-VvxeiByF$Rw#A^a*sgU|iTe@q87QFz2ppP1n`uuwc@UHvi zmtXK|aBu+4Bl=>v!G5S>pY1|$LO`}+$%4#`?I!}&Yp=a#L}hoN!L?hxk=(<3_wM!e z^^!m+5$VtF!p3tS(L!NN<$u`ibZ2zUVAJaw|q5@fdcvU2X}AvIy|xh>hNf@ zT8BY(3R}Pkt*@^44UNWohL}(TW7jT+aVMv-%?doGl9)%Z#u^0jj4UiX7&NwjL_=9r zH-+}_IDh6W)Y8bs-{AfG59pHgh3$nL$2{c_I93xsl+Bp#oin`C@iL8VkEZE(0-skXF^(kYA}7fbd(cj*Vo6ElGzDm+&5 zG0;F@l7rciv6-$|?}NqrC=;`p%6g`>k}qa)6O7SssdjQ~X8QTp0-pIp1<@;eCRY%W+2>IatEPABFV}53z(KH zu%|s=SzgDSv}184gGO>AS1ScO#wO2?o_{G2N|gc~%sBC6zz))2GBrIhy^$>Es1g&5b&pR!KQebYP)p+c6CFDfEmY}D#u9yl zV<*NJS98m&={3@4nFHt9^|_9fPc=R?I&H1R%X_R0+jP> zFI>FbbrKzM0#z~}Bx=^UXrczNsgJv|Q+JaA6#dNDQQEdpCfEb#REo9*Id5`ul7Lq! z@!0V)L#7YXNSZE(kKypC$!WIBE-tTZtY_$Z*z)Al? zIRC?#0kt!S6mr_j?D*75y12N?Os%w@2CD4$4y>VxuSVj@-iw#t2=t()Nn+Vpl!s~d zmnJ68%%d$xvzD`p8lbe6E^QQR`9g3V4P7mAYGQ8Ug&zchU6oo~LKIWAP_(meaJYYP zX?ZQ3W~X)~TR?qKTutZJGxboPv#yZmY(-Wv#YvH~D)l$@nk4NT8pj8(%dR4gqn zXIYV#!hEIBtP37Qlgq{(| zSA(hEx#^j74x%kV!0BQDU#4i(&^2YV)kJFW-19$-jZD{SDXc$H;$d0OsDldR)a=>r zXkzivav@*cpo64~`7}-rI5-V=5VhfnxtYta1p+A;2s0#is$4jj?3kIEMniG;0Ua!d zQWDGeY`%t_RKwk9h@5dT7OS{{NSru5-PhB1 z`|dr|?yKp{dak;P7*K$#fTHk;lT%YKzX4mQhS-3D{@raBiNxmS&MdEDRmSR96%?#( zWY)9!Y!MUkWH6k1{_?9KErww-nUn%jcE_Rl^A{?aVlG$2dkO1R*!6Ct%dCWDa>WpX z=Iq6x*-L>MxjX zWM?IuWMht|zq?9s%8VIGCn;&{v3Jc6xH+(q%b^4|O3<$=y9Juc1zfRGZK;OaWN*HCL}*Wq}#M+G}3<0lt~1 z5wf?Qnt1<6H|3O-fAi)|1o%dg$A1q10fWEY0VYll%y+^55pjUZwj~9Lo|>4j*Nl@V zN8ftuty8B?+Ij+yeA0~oYOua~#C(LIe?tbvW2k3~cb|P&a!J`FstR`Gqx_{?R zcXyWqlsG*BE^8+e8UdzP+))#24Wnyp$8w02-L&RvDg;Y>T)YgasKGAi#uO?bTg!i#_cy#U0+ zcaa7)F-qrw0U6L7U(Xf7C;vIHjEe^%>o);fh^LQSW5KuU&zLY z)v{-*)kc@vhc?*`LL z_)yexQlEkyP?@Mju)3A+0YKzzFLd%-Y-D17WK3JPL(=cb^hjDuu!*?}d1XIN0f0~$ zsY$k4iIeAFIH|qAN+4Vqcs2c`-d@308x;cu97FK8f`MM=@XXm^xfdWYhp?dyW>;xk z&hqesE#ko>4NGSgh7MHI7#fi5A3t|_{KADmAnj0`!ozJKoD;QBF4-|K#xqJ()J5IC z89Ps0G*4}|;TbitIj5t8xVLS;Q4s(G{4?d=V4+MiSbfP`x3DY7A= zOVKr`S$HvO-O~|T6yFPIk8;WDVL zFd`S}?T#GGDor|9Iqvd$Xd|LEf58-DkQGjixCr+usw1&A^&C+tp5#h>EC-@#Ga1qP zg})KoWBhM-3fG_I81v;X6&*15E z9j7n8q!Ny^<0Z)+S zcd4nA?YRu~pb`vXYh!I7veB^3S_L2Az{PB~XW(>q*J&Whz+tcx2jFL4CQib%w2V`7 zKm;mlxXTCef$meYFOIS13h-P?V8XD)fp~h-=u=~xIY5+}pL)WvXh)uCc-q3>zL@K( zOCUK+4#GCcMHU7rXp9}UZ0{yI7r8dF6yf?;yU%Q^)BgnM{XVgvGy>01?Ph{N<1 z4OcZViYNff7gV_zrX3yXYXBYbjK6_oQZm4>zJ|Aa6^L-@+zGM9sO4Qq=y3UX8-w9T zI!4buKYI3}LkE%r!#XzGK#Wld3{r^TPZn4@*@GCRvcf+)es1KXNWt1;Dr@IiK+vuu zQSF8DSW*bY@KanKf9VI~LI-0e6vc-?ClWARm+59mVt~@8&_a5Cv=dKhz2j$L5%c0F z^l^xj4*``LU~+C?YDRrf;<_OTxf+ZCM*s)`%cTg~JL= zNDWS22JbwR5gjr%^SILK0*FrhgUG?Mq%tyaN7%wKcmM49{`s@YRmAldtW4e6^>?Hc zmXJ7gki~wb=fXWQT{3lY{0wI*ur?;DpndShoS1Nx6&gGV$lf1|U|z7d0|&{tD#Mm? zczS*q9cwu+ywgH3j?BPIliv_-Q7E(i1RANi%;_iUrXpUEu=}GR(qaV}43q&(9=$&3 zPva8qa5I?i@M4!J3a29L`w$Eg%Pt9zqYnlFbCb0nlpU_#B4OSt>KLbw-7>8X&4ODK8L%} zq}N&=x%dRfBN};qf5}37NRx;sP!#V>wA zBzbAS*K7Q}`7zxSlXP{_uxCj3?OTB1_wSmHW6v)CE+z z;5nx8N4*Z`c@jK2rLe4>fFTzFF2_jW_qu@jZtWta;Ze$B=Z+aXzWeUG*h}r0h4d82 zn7AE}yt0Z6n{0PRR@||Ww-=ZzO5gU$DkFiomqRtD0*dGqBBC>6qHrxzptVzzb|a7#Q5@419JmYE(2!JsKztS@l40DZGj%#;3Y$dUDlft5_}!tuqD%FRs!kn9Q6>=+0K&H%4Qy;|<&;wGa@ zT_*bx06z`7ZG4?euz4eg8*CdfxNF#Y)!CR2B34Od zBevcz2&uWw?o3)jcQw%f(T+yzBM8HRvRFH)%k|u*k7|@uFTP%`{xlMz>~^v4j^w~X zFVk$gA+a9YbSYYP^LVe7Z;F1*{k*K2GeMwp3$rqq=SO?ldD#FEJ9kLW>EvFt|KAwmscc(>4fM|SIy%v&e zy&Ca`G8QIu$v{GC6fY@{FjD#gJErXwZw;jWWM2BF016S9Fp*8gHYM{CSTZn30Ga`b z`0(+>&ZcDB3pt7yzr0b0(=NQxTC)kuqiCI12iWmoj$#oro!#soy(}#Wc=dvM)M{2H zEg=9};iYqP^Gi!BAN}bHXS+d21tjTOF&bD8l+%II>iy5JJX*M4EpsGzg_Bhw9!_e< zbvWm*j-DJIoSq4!o|lJi4)tL#5!R+S1z;7k##%^v^~NVF_ikjC9!bJLPUGm>MDogi z{yi?dy9Y0$_b|Te(`tJR^vlAoLQZ^on z6!THMjdLVq0BvtL&k`DEBs^H)5Vu6IZw^w0t1*$l;b4W$8%$3bQP$cdAeUXahmK)l$|Q(&zXdy+yijzNDY#D1OkKlu3j zZ@h{BE_$-VV_gm;4AAHrJ0WW;N7yOF{^P6<(rV_>empwI`5F}GX6G21KmPQi@4xag z$>b=#R*u#-w2GWwxOe@FM-T6DY!7k-Vp*wDU{696RuV~_7@h2&y&Q;hm|Lt8Wj~7a zurXH9U_Tgb1s-mE@%iGzuU1zUDna;kOe_7d*wz2%kFeIBL3UD|4fTzOYpF`Cr^KP2 z4avm%8JZb!V3yP3eX)V5Z|*J3OpaEH*%&9q7PB1qTmsipO|UFm3PzI;7uJ@lsc`=U zi*QIoxN?-EGh~=bK+Ei`(iQLIuiaR_emlRuo`YbbvIQH<6~FvHZsLNid!%pl#mk5b z;ISO(7lfWA#iV~(ttdFNml)3jIKeV=8vA{k`q{q`jj>untf$9e&unkedSRi(>XTLzyERHB|S+(0L6%7;|^hF$Van_ z#Ul@9ai*d=LX;gAU?{UZ?V%Gem_74z>OPjUm^Zhb&+pN6`Wh2d6dXtnrmHm|?~$b? zuc(C%PB*vezH2--^+z6ez8XE&pS}^diRGR1wl5YK!p-OrQ~^S{9}PUsn%F9RI`Fs+ z#>U$HcT3{unf35CMdY@^+8V*iDDXT4;>cpae{A!taH9|sTP~~wsG6_|oCHV)gFQq` z9@~KZCh%iNdA?)RJhs^Y4G|jjJvL=)6M#xEBX9@K7_dR;F_})MKL#33%?x>1~6x5Eml+jiy?P|LH z#Z^425i>n^W@2h`YG&rl{5kUCH*$P@lHubY|LZTyD@%BSsIVOqE@Uz?`b1gi`>tr> z+zT%~SXjPw?@=WbD`@`&8v>#gj=2s-s#x?#lN?!p_rb!OKly1Ou5%aoOzwOy%WaUm z40MX6hadk5C$<}F>#2@}?S@1QwhQ6p;e!Xi{7?TUMC+R!<1c^t%OZG8#0WR>0dTrWaFN(^FTC(*ap}(8hbaHqpvg&|m0(NYvUihjER`O7{PEQ*SJu~8IYS~5i=f4KNQ`5* zj>LPa=+HRY@Zrqo%{KCW1|3>51}J=kJ?6O z+av8xaqg||jmuv}Bd$e%8D<#!(S7W)1lpo%8pZfIFAN&hm_MeGs?C2jV0cvswsJh6 zI0YIwbqO4X>UgxUi?#w(MjhRT^g7{en6|8+&C~?IiFw z!+YC?fCs3p0S}~G0}a}C4Y7gApU)+BJ)+(n+R)(UB5e|7;||+SnUz^gbGnv3d!3E| zb8^#ew6uoZ3iE?|dw5%}t>|+?z}8&y+sbZR?l{xum0erY(54`ibx4b%kfe3hDkj-RtNt$s4#6#^npvASx1O~e|BnRQb6#Cn9HTt889F4nEhL}o3 zcegrVcmIdCDa;6tBxZ#l6%z|kBZjC}-H}!5zR~rRU3*7!KavOsMs#n;c#`M+KUZq~ ztqZsF=I>(ORLH}iACms?ZY`qQ(usC^yJNh+STs%d@87p%{`eg;9&Mx_LH@97g@-K2 z;Jdy+C>9<^mYG#X*gZ)WrQfzE;Z8d*ZtpwK%#}T1L5E zv%CNuip1Vyy93R56b4g?u_WDX>UBwRVsNHiXeu)Uz?+}X2#`29Q8 zjkPGyso}z}K6EzVKm?;6UYj@~wDVv8+y5oj(R1b6H#crCe06)_e*Mn1TOjfF z)f;y?%;Tqj@voyj101$1kDeiHzA>HAF|+)^J=gu)w?Fyt1JFksFgQSxjK!nXQVBe= z;+aY%I6{4NXz;E7@?Y8ii!*yHqJ>BFzk%>uzk7$J4lWwz7a=@8hKd}?w9)M2zt0i` zj*Y;dC2X~@tuSblimXf1nNg{r5ty2qvW`uH7P`f7FaQ8R07*naR95!{MIn@Izlhw* zam>pY;-~niWvSoYXGnv*!?knl4hbI{1<=f-3%-eWp`#a5XdzRD_uqg2u}yu-5^TY* zn;$ke(4V5+@&cWL9>F|;?m!rvHn*ld&(_^gnW3IOV!`MDA7m4}Mg}|-Hg&_TebOhL zHnOz*?~ZJW&WOB)w=bk^<7QT`A-CmiXeto7kbTD>5#pit9b4FxO-Eh3(Oj#gv1HBf+VBj- zY$FS^P{y63A#y4;d@vJ z#lRrkl8G+e&_+G*_#L9s@W9;}dhY^7_gd=4UGU|X8T>`GTYdm;2oH8{D)k;27*Px{ zu?LSwW1Zj!=>ft*me$X>?Cye1QiF>Vn-vasL9eB0+1)?T07KkW!P5>@x*?jbMCUzF zMoVnjksLny=p&OHh$r%Bn82P5_V*;hO~`6`+_R)Mzcd(vO6FhR``-8bIBj#yDjr=h zl;c$d;s|^CG8An%oj@geu2McTImXejHx-QINDx2j9P)@-ws`y2SC=lHEf(_~F$_hkg-SJ=NajoE{_q;W2_@Ja zqp6Jb_mv*p|8`{d1fClpC9iD?=u{AOup(X>gB(NCeg5S)&wmdcbWQ%^<4Fq9`(he< zWciLS2RUj!%rTw;IlkcQuqtLY5W%FlSh;!q+CWct4BrKKQQ)9#?aL8shLUwWjocO& zQ}N__p|E!Ao1QZl3n9D&;_a#WAOGXuojG#`@3%6P_WKUmC*F*(Lg%K$}UFSi6qWCa`I*rjq=r)tbs1PF+`)`L@IgWMC#hD8&hY_2jiTv zfvTc{fjDZ}yWd>9_}uwoDW8hR@zswM7fr+q#UcO%$e3+Mi=W@@0+~Cpk*sO1W5b zhc}3>3Z<{XV|Q-?H-lyL_;Mjck!X-*bzJ3hKxs9}0zjp<^7YNWxeFx@rHzQ=86G+2 z+K(ygj~*sXM)e+#1J4iyo+2*)3_5L1|LsYN?& zqIu#GXtde+eZnEE1N%;)er4P*s!m=SGF67H`$S4YZGVt zsJbOdpmxSH408;Fl-Xi5&Vily@U;4Yb35Z%B?z4uV!R*a|$e;QO^ z-p|b$K$)NMRn$^G z;O`j<`?0eF&_QsYg5&JbZrlBuhgGr``f1{ z#;D!Q7){x9WOxJ!&<~r*%&GcO*1I?S4R>48)T1_e;P%mKOM3Tklr+HFBy zf=xv zPx=wk<31bhN03#45g!sDn-M9&U-doq(=NB^|*0v9-<$v;K zH=!ObYK+vT4}=*dUwiGf|NMXd{g+oiTV7tof&mn>hXadE=&VpJ%fyuKfk|;9j?NFe zTgvQ=S1bATYCeM!FpL5gZDO^!zOopLhGOvu3gRjr1_GgMsl?gbIMzjbi?y#KT{OYf z_0=UdZgF5|;{y$eb&(iHxA-5(SAvyLio+l)I0=t*2Vxy;q2vXs!a(P6nKR)zWHOkn zhU3*pjL+esd&(a&x3nT#7WO9oJGO9LJL*;5IldBYp zQiPYw_$at=@#4#`zK;1cWK`ye%bf^_l%_;(d_sHwHsMhQv&M%6Lj2Z62Kt4^zyJIH z>-zOCmsgg$QYcO%;1LuDBVmk!an{ZcN5Jt!SBTSFF(8YElJQ8fkg4XfVU+Sf1nop6 zpI%wOuY)YFOYF;xa?mKGO9LpXh(l`O5gI{}Us+#YL?)F2zCOPO@QtlA?uc-xAz$HO z&luL*HI8YB;FG!wm3}Rhtb)PLVR?Y(kZ_I+;3Us*6uMy{!?7TnAfXZtWmZ-Oy1I&a z+zpqoa0idoQjyjQ(OePfsKH$&moMNZu26tri-q#rZ@qi*;>A~AebrVd83A|1&=ITq zcWlFPYP8k8?f45O4!YeY7A9Pv@K>hFChY_?=I;@*RAnwaXjJq=(w}tH1SH z*O)of^ZE!|@~JmWdg)sn@I%bortL+xxUyY}gH1D4V0m+w_eT=#;jS4$kFA}-bMxAU z?>Yr+rf6G;ahg+uQ4`Y<7%92C(X?&#H43&`Jgr$H`iTDefxkzUP92O!*!ykbg%a$y zNA@rmIEUc@&ZLUT}oVBnB()=oe7-wGmdnqODRR zZwyhXx`jmItEX*2!@Cvl(Lmd3+fTu8494yw!(jc>8&=`e%QAx(xt|7qXK9R^dOWoe z@ONT=->i2@*EieUW>oD6(l%jkcU?{8!>A2|N5M9m>fZk&$dYDhQ&ccaQGkas!u|yN zFZ<9b4Ptf&hCq^{{0xooexS;5X5;>TT0()_tH6Nt?%lgy?W8eIyPpEvnFi!=*}a|h zJXzVm3W)_)W&_ic6?U-PW`){>P>QhW1L{A*NQVFW-~9Wpu3b%Utd%Q8w1+4b@!}v~ zKm3VAu?&@-5wqb?#OAE2Xs}w$V;pLfP_5$g4FzGjBOb%OFLGs&!_cBpeyEj6Lq@cX zs1wVjqRe~o4p%EFm#m4=PCBO-((%?WPB24Ji3JH zNW&245r)8H1$ySgYL(AE`wXhMbm=?Bm_mHtOwB;<2gr%gnkPMtbsQs&M{TN%qx)A$(* z>Wy%Jwn2L<9ye(~xYp0@QZm$V_kK2E@E%Fo<_iom*ke(GnwWtN382jtZWeFd5alK+ zPGXOYeyD3p2lUWNz+uO5XARv%jUmerWF^#5=@7d4S>Hhj#9>X$jElNGFdBH<-5RTf ziHgTasj;P1`}&UKZ2q3*$JCR2sWlkTG3lZsw6OhF6n6^eqLGNC7%YP?1C&KRN#hTe z*I3Fn()B~t8?aoUUc4^>>s#K>5x-a3!J2Z&)bM|RhJK2T=Q?TR$S*KR)sRXH>14N$ z@PU0(m*hp(a~b^Wd~CBMA6neVrD3b?X1(rj9k1)|@=|(%%~HHjy%z8JNdt5NR(TDB zj5|lu5vT?FVaQ|LVR@Txu*u$&e#{>E>u_rb51Ye{@_4B90y~xD=Ygd_kSQ*RtN)x$ zo>}JA_bM@jd%vAZYh@hLm$y7=TbW#eWV}c>vre9<3`OKpv=>UM-dj=SzrxG{Y;zYoQl&q=C9( z(P6|@Tt}11O#C@k1u)@M)a&nSZGy%5V?Sf20`( zf{>h8u#yF%maubr5DNW&?7i8SUB_|mdFE4v!bA$31+f5d5J`yuG?d9bMxBr-a^ICrNL-$(!(%14#S(7zS97IW^IEW-b5S&4f0D&6L+`lirz4Pok zb!s|wssQ>{;$TevjjXOW2~Vx0_u<4kCN|Jr0cJuv(5p zURrF=&ddR>$|(~iPMK(9LB?84oR7QmMKcOxzLXPcxb&!Nv@L((0pG3T_=3JHiRvci zZ~nBq*u49w2iU1+n~nD3B1cki?!k|K@I$_}{<+V6j*~G&6>zo>J~Cld*yUZ=I6j6U zTfHqe8yvfVwpTfH;NX4t-S@;3zsAmrcOA8`_G%>^V1sox7>|Xp6j$mEc^K;pa@djaHD`_MS8J7iS-&EYd<1`#t+m?S zT77153G4I0Hb-AnXS##hQm0p`%~tCB^esE!&g{qJqvsI0<%dHhcxRR~uVv1z!^eJy z6c|~>;*BG?zN||J#n#ZFy(Y&tf$Y_{gq%b9D%>#8PT z$eLpS&N83mH zRFs71UWDtx)twH(ri-(>XzD}jB4S_HX+3c~~ zOeKrxrNl`i8!y?OI=~-tb}Nl})X@tn;pdA5a4WSpC<`eJnTeK)^68$j>&|X)ZCy!a z3|G+8qn(U2>_;N16BA)F>buFUT;Uf~WqXWu!?fK>?Fq}8eI6>X34vK0@Dl>vrD{5- zC0|pBpzer;zU~qV=_Tw zbE?0Lmh9S?VEsSwcLuS-*7YM?g)#Cm5#KLvM_gIknyVMP(ULP+L( z%Ybm}-$tFCdcG-!{6zag2LryRMv;=m_*evxie9s*JGMYd90A3xtMGuCGskc~^)2a4 z%v9?NiqOB&E?P^!JUAwLqDWYK-yJ+syNUIwUB(i;^C)s+7}5H3S0BFTo_k0qlQKLP zLj0*Xm>QN$RRsac&w;qkPF%-tp5PX_r zmXnLMb)u2%@n@hB4#mVm^Qo0}j;+SnpaLeCEiou_k{Lm+!I-1*m8sOh94lwNg82|^ zWG<8?5f~breB`W!jTi(Vf*Qj*9NHsZsg&e~lK4L3JJniT`pNgc$AKI7-*-QPKy#fF zFL83M*M#Zlh!VaOu(F|>9SgK#*yH$!ofmquPzp!Nb%UN)UwiF6ci)Zo^h~pf>#%o= zwK}J3bb*Fbu~|iJ+mIb$g7tZ} zSucx780Cjgk47lrwgjqNY0mEJ!a)qmdhk%H*P2+IJDf&Rt>fbiYeY{DFNWT^HZG$u z14Vq~Aps+h144AXgt?3rSgy_Z9P^-}KiD@j%LySr{PuTn@3{Z|`{|L9Az~toJYZr>9W7;e?x6X@rPE~gJVykuzTyqRWJb_y->x0@vF4X0x_1lG6kwjiD zS_N33cp6&dpv%WKC~u~#EVr%&yW__|NMHqA!yuSUi;N1zrySgzsRRvy{_3l*Vz*+! zBknn}0;MUppMi5jpn)z$;LS%;c47R83o%I056l8hwlp!JKU{`QN)ek^YAWRBvsIjm zUlz%yW&n>yg0*6=PC|FeRIZ6TMBpHdd-25=G1QuyreP{Gl@`|lLPMU0JT7vhMq`~S zW3y5%1L5k3=g#m7A=8{n@n9d!LWLa*e8WT-L@yYGY?$o`vtgCZ7x2nLd9#MKhIb$! z7q@OVWqDF;9`gDn0iMyYP;Mxxit7#|IcTU!k>-MC0TO|?J}roNQ;_@EH<4ovo)rHC zt;H*(g{F2Y>Q32>G0}FX8q48&3ez0ygH6^P$aH#MN`F_=yqLS?INF002qcfH7Mr6X z@r!h`Am&B3yrgv|Z;!n$zu*?IbFP4;O0V!?!!MGACdB?vl8H_Isl3*rYa^(Jg~3rz?D zgtu_4NI7IUY+vD|<3$7;aS9ZTA>%QVlD8AlV2c0{{=!PG9KAw>SIZM*CddevQ`U4N zu#C+-rzN0&0-0OznQjwfp+WtugW^ZDs7U^cd0L!BqI)0@*D6pdzlwPd6 z+wmk7nRxtiW~IoW&oW1^F)8l&dY49ZikKQ6gbaSz$D|{Wz6(6=^pd2pDeVOOz zEgZn;C=SVLS-~q5Lu7a#YYY=&)k}UsOFI^BaB^5xPqFxKh z3hn3CgP8K}ARs6#8#3;(l+oS*)1bN^_|&jvpFVx+=+OX%KzYBL?)k*sEVxXUI37cb zuaGMW?4ST?5Qn;UR#llD3+4sj;Ujo*OS{~HnLg&`kYR|cwDyLOd2Vi&uLa^Ot#8Ot zoYRDR_~g=iZZQ_)+)bK@3wh-P%o3&p95A>J@bM;Hp1de69OEF{viPV!A|t1fsFga^ zVFY!tg8)}dgl&wLq-thMB+Vi9X&2XH{=!6Esoj?^uwZiHbY%cMjuZy@uzUAtI(JXz zkPwz(jX;f2^HJ{YWu1e;;|kg(xm0CPzI5uv56s_u&%O7m&sCmAM5lFjG2AAe1wfgw zNJZ#*SI1yicqAjGDK0rEb zQpwW~bOQP4LpJ?&0#kMREvLyLED(K=H+7`iVGBxVOt}>wt5TlRN?9f#9<|a zhNyZt1fgpd0fJKTb|>OSBYI!PHs?bIccP&Ym|6GrvD}z|x@Q(ig9&W`o&=wQR;tT4 z9i%0Or3|XTvAkZ41o4qVC_!iza(aMgv1KiG&|`80lZkH=WFWvn201oI>{goi(R#X0 z+{ET9(pd%)9AqH*TQo*v^AKFpwPJq7xQ~&cEbjb^xVXhUv#@l{bY(-a^)uZdCQX?^ zJTHNaY9}U30=%6FpbRZV*aL8yCr`F`e{JeADxnG`0Z!N49k)(mCaFl(8=o1&C z$SZbBQX%*hcqkqM1==t+7O{-g+roRavKa@_0^b2XjA&mRI>D{y{xpd;+W7FERtS(0 zRRJSo@u-|(p`C~Yk7CBf#7RwPep#Yn9uW&ck5L3o2-qDMNn!^-g0|b{=J1LM1+HOn zL{4hhsRA_Bq?}^#2;mBDfQ5j>PoF-W1SYp`E=8u;`dCm93f*tlG>3$PIvDdCqldIM z^&0^g5!AR|+aw|t48;j-CsHsV-HMe3IB7b=@+h1d*{O)Rx7f;a)W71P6`?C{-G)P+t4!vNlU( zvW#i!>5QFWEgKMr_v^)EN6tgOlVduY<(UM_(>IWyAdyM}lDKb$E$d5(t!U}^@#7!3 z?bf_O{dM!|MAlj{Q_9TlFj4J?#5Nz(t*{YtBO!1d?u{sutuAB`!)PqR=G;^YU#?Rg zR@R#A{>VVdLxU9!illQYnT154IeFDJ9|X?VNN~Gx)t3X~ms6VR8ZEq2KRPA?A3iwQ=qu2v13QkL{&l- zATH^bg~Aj&7@}D4!n7g9P~us8OAA=FNZh~BBRQv^pN)ok-i4Ur$LaqPzOhnDFe+YV zpGO3-iA3h-lG-m=94pU1OH!osaE;t%A5F#=UU&g5lbthSu}Ii$0$;y+P%K+;X!PjO zqj*!2$oi_Cm3?pm#B@;tKI)5LYDXR0G4X@AcGvvu+NG9x%*Z;LFQqHG!{f?W)?gE+ z|I8aqqrw|IM{1=q$0~7-m7n6S05I7N^r0e(9er@z4{~{DjqN%C4my1@_>t=DYRb=gD9yH zp#Wluo++|bY{!;|gQTY!hqaM|&+v?Mi*DN2vDK%ZXvul=%{QMqb!uYZbcec{VbMD% z*6VQ!T_==K3%(({#0r9F96<`ATu+jonrCeMH*tyMMkRpnCVX{qv`_4e z829OQ@*F7&u8p8>oS&NW*b@?AmB^Nkuo#jRA?z1Se^`)-)qJOn?p&#}yy(XnbL<5O z0yIQ`VNW{#J)8u`_g{t?I}@XE06+KKbBKKp6{qY>YvZB9HJZkW5+kFhp_!Obc)ClW zSDKZGNPbDYWKt~rvSPVGf@z6QKUK2|fHWh5$qF&2Rf~NSZ+R*|FRXpI%+5hzWlSYN z6UG>zJi%>b=hm77baT4P>?SK|i&v32E)!e-RC>A@AfY5^%@;*Ni(o&UD`})^CK5cb z70G9sDrV~r%eQ!zU5NyPWnvazb*s@4i7a&jH;rHh0SVj^n=lj4&(D)+Cit;|4IyBcAN?DxMAf8&p_3n;E^Gl?zOGl^d5voBNBSYwb37JV*00Sj<&X zsr}N=nS&T?OM{wzT^w3>e7&Qq4DaIR6pC3sAh=Ncl9k`HV8=<5H-HXfZ(@(7JbCqB z>Cs6Hg|3{H35|J0d5ZBITZJL77!V7TbUT<(*iV)1I?Cj!$jLMhB!?4lR!fplUZ`2N zn+t28jwVP`56ID|noEgMOA2KUJNv72feon}g&dEl%D@~nDMkzIU|uwv4Zd-<<^&l9 zuMfB-g>adRrJF z?uO(u4=6APNh8*#7wNb3G3h zh)y|i;sn+W;!b1%E|$z|Y~ybTMwy*LXl8(9X+)ACu(UPvug}t=O`NoV*jr@?54UZh z{IjhHdv=JvE^%%6XoMeG+H@4<;0nY;u&+p&{Kf*5cQzs58Mow+CPMKZ8JvbQViI0>Fo!!1+R|4JD5MEYQasQhE*&!` zY(P~TCM>tp3L1Z)9rDUBd2f^>K0%4Lj6Au7X|=c+I5dr!*gL&#Hf@U05vDeQ*CZe+ zAIgx`SU%krVxh~XK6)&1n^HD|bfFBxkFPk=91g1BZ!#A0rxtbZXyTSzZow7E4Usst z268~fkx=Xob_%}Mp`n_uI~vlA=W6)Z$E1uWnv8G3N=_Egm_hGRl@*8^rL%*JtXQCO z40h%(q=Q*JRbt&C4(W=$+nF|pP(90x103Y>8=>kXi^UjEWo{XcdGdhvN=-VS@KJq3y~o+ZbS5O@cwt1k#x@TnW4`eeBpV zB4!^~N45%ltL0Ne;6uPh=5r^u3TMmZnJ1zFfk(*VlLZ+Bka;5Zv&cMKqew}-#+@II zV7}+=DX!8S$PRwOKE|4L2>@opj&@|1(J0iwm{Kr4@XPRspmebl=5beIV#?YDR=QCv zj1bgkUrec^8-t-~1(O$2vqZs5)_NaN0Dv$OWz;~maDnVzMhG~l!+(PJ#C03Fo3=t2 z)+EXJ0<{Fro2HOM>(XqGr(jFYV?Y7%^@@MbV?I1G&{-afbADXPZ!O3~0 zk*yPJnT9gg>O|a(VN;u1qbL;bX%zaTl;X^K`st@py2t`0h(@tuZmO~q5wOBcocPY| zx~(Mqz2rbY8TDe+OIBfLOw}2f9P~hU$PZ^J<uj#DY+{zy> z31<*hdWd0Gmp{8~CN>{!?jIsCHWutEpd9mrPRJLEpGlKld1 z3TtdA@Jc1fFuTghBUG9~doG%;QeD(~N^lHp1i6q-0UFeE1tM9YnBA#nATS&x!Bd_$ zAR()9&Ix~mp*&*G4&NwL&7C-g_8tlLAcOOmfAl12k?~ zjd)!ej}jD=lsjcLC6+rHm%E3SAH5!En-umX`wTlm7QZU*0F$)Cd72ya>-77M4!0)WOLvTjjJ=hE*{X4%&{w!MPzKKAwO< z5Kl?@IIDO%=(J0G+7;bIk1^4Tw4m|J8y{BRP7L};$1RM1zI(>YzxGFPmPxmrRyJSP zZE;1x#i1DgvZ6w~c{p1u8B8&InTBlem@3k7#0hgy1TswY0%?=5ReY5(#w^Vya1_kv zW~eN1Fsvc&8Y9qy(ATZuxlw6gGTPxOE?w-hii{)6Xlw*yI{D@x9~1%~pF1%D+{Ah! zA^F4;1PdQe#_b-DRef<Eu=4f^{zQJACLhm#ZrYEf+DnSDa3%+6bZE@WZf>9%+JpwKX(BQsSd#)UBmOrQxE5W zcx=P-Fded~6(**iSP|1vT+djGD`S{cn1Jz;FH06b7^XD}Z8kUYTU!iGaZ6NLudvmF zR07g*PykO#gk%gIN4V|+G2Iq85J713sCxuG(swABN65nAM?s_zYJ#y8DN!YmoV=L? zr!$t?%z&s2Z9ytuL|{>uIK~J~b4I|?T%N!QlHQCOc!AAvYY^%mV8SBn?wD)Fn#XO?)(@lH-Me)lXANJg~USO~Ys*BuWTG zFuWCQxuwE-mgQP#GC8Aqn0>?C$*nhL6i~pj$@U~PpSZV@pAdIMFArotM7yxm15pkc zX1427bur_Bm;Dd|vlyK( zTdv4bEysJeh(j$}VkG6YkmWq*UIfVpLy$x*$}d5NegQ$_SJE)3h90wtqr)F}=?6gh zn3Ef2xQu`xKU^PxAKV01;GlbjU<+cr+iT!bq-h_I=aJW23A@k7tzfrTb^{9JgJFYW zd)fa9DPUP_fk+%_P9SpZ*s{v*>+7Bl5cN~G+?e7-d|0(!g772Shh zwK{`*k9TnEW)i)Z%J2uIEZwrcBIm12bfs{-Gvb8(;xOw z38Jx2Fd4J9LM@&e>DC2AyX2!GaRrTMo_U6?g58ImMS5ZJTNlsj`G9k zs`V*0>TPHriww6Nw97m$*u^EL$2XljsnlI8aSZ_iF-=bmnH>lQ&S>XN2%ggr zXvmaIG~A)4M|uM$wCjxu=K!!{pSazGIZaq|1u{J8mD)O>B?M%*)b7e~tm&oew#jQs za%K3jSsm%!_HUtv3F3wT=v%r*b6$6tbo8C{RA&3_8bWFs$cz>m^-#|SdB`Z@410ON zFcG|8h_Q}#EFiK>A>@ctm7%^AgPd``R4Ol*kYlB;^z6(+9*#(_!x2Kh64q%AccWc z#2K85pOzj}2-Y2#xGX?28DkhxF@}L#%ZO;ulu5@5Rx zuUj8Z!7p0J85RdFUSwIf_!Kf}k=~{KcyLc(A$rhVYdvca+~OyDFOislp>n`}zS0w` zRqs-%&u>995hk(*^-u*-D)`a>qd@9k0N2Wdq5TKIs%{2)R2Rz>1NNEAhJSb0b?VFGB| zuhM5HRp|`MNZF-9snstnw@M4B=x89}6fWJPjIT$`F$CvM2h)dH=7^;hB>L5_elYV3 z)osUIY2}P;t#^9lF8W|b#|d52KIFN;aODB+9NyYC_=^Z z(&sobxnK{w;FusskzhpzEq~4e4dHrlO{FtCdoaa`6cEHB*p)5hTySHajf$sI@n9#o zCpkavt*EM@3SN!OuFo;i)b(i1bS0x`q-hop&@)U;%14T&F#A)=H; zgg4vyh^w2|TZ($1N-CvrI}H_+9Xq>pc6vRom>2O%z`2;sI^{-XSwLpw`!|hFl}h&B zy*4YiYEfEjZ=xGnF1qRUK`k|9P^`?Ed3*U36!zJPpr^? zamtM2%5vyiJW4`?DuR$klKmTz$F}0-TJk*pDT*OyLhf`ex*4m zr6hO?i{T(Frhw*5ylyX!^x2J5yjALAR)ABBVq%F=$M1Zp@-9D%5$n(i+bPo1gP*H) z7hZeE;F92*%cr1qU$U4Fks2WbgwNGYLr98WSz`(efpV=+p1 zZ&`9r41{Zl%!m#5=UYVpGIG?cKZ!QNyRCv6Ti#T1R$_|3@Q%D9y;xu@gzY3URNN`I z1t?1`h{k9p&afBV^^g){7RfOYLS)H!!2>M}Qr(bINUAbp9zzvlo-1+;d+`!`B5K-c zPStlT;D`#XIEmXmGBJ0QdaJwQM!DjPXU!oY;K($G5YmVJu*?KvkTzYRLYQA%eXH>g z`{e#M5?FUwu{h(qtLInsZlMfI~x)*Zi`Lw{QNvGvXGv#wu8YKy2Tm-Oj_jOER@x}3)^DM zG_?i~__gB!0Jls-mse3(%9}=d7Y*eDt$+#0?^0=i=D-g_e>e)Xh!?CnU(}) zdO^aN;TFmb0u#Gl(eW)&Zx@*8J{c2y(h=QbkAIn7*N2dxEK>wRWRjDdn3(((-Gigh zUDRMa!*8CVQCyw@p|RV_PaLtC6h9)E{>4xhwVe$h27{P-xDvBoC$3H0Dl@1!ktc)0 z!t~(}e;DO~!*&^F+sA4$G^o`tUFwuBytDM`%V(cG_4;pLef#-0-#K;a^`}q0{?u#d zPMu$Ty*;>CX2Zp+T)lRAH;yJ&S3IZE^36^MTKOF^Xax%>!(-rGn|d_I?DIfM=n)wg zV#U%S!buusVo&Q*l7LZ@jdHKs9w5PRisDrXDc+ROV@%CE%eDARag=l>ShlO;R^Y=R z=ygghQcK>!g6(UpmK3C%9)9!VLvvvwRC8*i=X4ct;Xo3^vLJM9FD8MmWx;(3Ya^Pl z66LYvePEbkm881*8;})Q9oRnb$Ygc;PNaC2!AHVcQ#kc|4FtpjKL|5}M75wlV^uKy z>>0a11OgV?Iwp{nK)F?l7!-ulA4n=xpvf`!!)50s77BEA`5d7wwZ$I_SGd%t6=(QchSM0Aj+_qA=@v8mD zjvc%D>RCOEldz0c1@#kQ*lEA=(y242o?lvgH{NUQ@W`v%<9T4MRI4Ak=7wXp-rhWT zHNsSmU@Ko>lN_E_e*z9(RG2xm79zcawun^8)*TM3+p;K!cX(E!YZg(OA}ifBt2%Jw zt+&pfI{gygf9qG~G>~{Mjis#HruWxgbJhF@K5+eYSF!#;jsoSiAe}#Rl4BX)dHXHm zXcej}_wp((dDmWdykKK6v zHQKIc<#8hF=}_=N_rja6F$dpy`z%L=FeiE1T-LFk{d%K{bH@!I`XfBlpsG6vZHW*P z0^5(6S+EkTI`Q=JIzy%{SWG$-VY=;(#asgz@VjnZadaGR46I{+3rbf)mH1{*_nW zICc8;#fuAUpuvGek)kh40djz&DvunV@w%hF!$2)R+GU@7`pLK7eC^$f=dmPffcLxJ z^R%khA#y)3f4n(2mp$s#GwG?VJ5U4MOcPH8USY8-vD3ldbfY^iAk23-n0?-p<0bIW zLl1rIV;|e$HZ`GO&Eit~^h;-OfL&Z})AxS48Qs_HsW&R~Hyxcn_Wpeh?T}^c)jLMn z3bcIT0_)?Y*B+xhvk%UrbMS$K0q3RGW^TRxj%#nYsnpn)*(NY$YE(>f-LXL6iYxKS zlPACOm9K!KXJV0bq)c}T6oIF>#|*c$#iMSVD<@lY62MJHp!oUo3#ZP!{N|f)wNL;s zF6lTbRQFP+)jE9Wz-`BGzUiiG*+oEqW|3?ixC~1u_7={)ffPM=_RW45t1*a6TC(O~ z4?B49(EI0)F%>t7#>By(0**V#+{)1#=RbJcUA0mp9`wNBoi!<#KE5Iz zp%v>mrxLUI;vtQ>>0HORPk{{~xH*c^BJ^=&y;`e;P^>OqrnqWu$VeB(aKn^-!ddy1 zMpiF0%+>H^JCu}E$nA(k$E{q_Oo~VdXKmS3oe51IOQaEyXEAbDwoVxkhtLv+s;e*r zsH_zF1H>_o=X8WLuV^}V!D>;~N-8}X@k~q<9?jmFdrN@Tu#4SVm77y02o(e!7J)S8 zBj;HRI7cdM}lnmE0ZX>rUIE2Rf}d2e~gCUXD;M0FVkLVRZkl9Pfr>~8WM~Y z(1cY$>vIOV-;teH2LjpNts%rf97Q)lP`wakPGNaky)qUH@5QfV3hb;*zmj|YKRIq` z5gmzmZlhv8`skzVvV2>d@X(E0uBqbIC^BEPQRA)JG zki0rN2vUf{n2@9=FD?A+TR-}*|K-bvnx%cLU=lYd`|Z;5%jceW_^r2Jzv=3$Ylmy7 z2+QpbCl*%wJm|t$+JN%&L_(ylCoRYHtVj^+ z1c*P(4ZNrMna_NtNSWOci{?VHEw99E71-=?P0G`l&TKwO3r1)WQ6=;Mr7ZG%v|2j- zR`0RLPac??ow;UyP_3a%)+%*0xbC3KZsFC%g-5^t=!2iT@7VRTGlLS!4nsgrdHu5& zo__q~>EB#+)%$LE{{aAVNHtepa$`GKuMXaN>*P-!`u-n$<-e4U950pTN|jmd6E~0Z zx+US131vkDGGZG0>Q}#NN;=E&B5l_Nx!p{&lBQWx*(cy%#>>dO%8GMPQCso|+-_re zeMpLX>bt$tzyJK1*#ie=58X72^^pmVIk(C;e>;s@{X%E)p`SnV!8^(yx$~OCut}U} zmi0>AbEUUl`rXfd$Wx_jZ`fZN>8oNi|3>) zWT|%qctR#)uY)0=RQmM`Z=X8-#=*k}YWu`2nV>LmP-38$RbRNU{L^nf{3l;~@W@Q5 z$=+98~ol9>$`>kL6^H=|m|FhJ* zq12l#)u$t|Oo}vY%omcpTqs6;R;d9p={IS6q`Pjs6i(KbnTx6W?R$G;%Ej4jng zrS0>vFgJ<9K|Juk^YF=Xzjg4yjrBtsw+;M+3CPZV8SBWSr{8}3^trEm@#Fg|eBl;E zSe7_k;_cGnJ5PM?p@Rqa-+UdKgFLO|SEW~5VihTMo_+Z5&%N)4J3jlRQg5!*yqeds z2UTTnbv72K(g+bi)AYegg~H6^x+0xDFPvh+7IUW_Possl5Z$27`)x#M$VmF?c8l>< zm{?kh)&+I}L}$nZH0A)KHzVci-O_(N`~s5a;DPry@DwIg&fy^NTk*Ps3V z!}~sd&w)EXU%~`g-A{ukp@`{txBzZm*w7h}*_ydoSbIXU-oX^}rN~xhBANpp6+%Ax z=xYvfQ-2dl83wk96`R-;h$v*S00IEy`QNv`^)2|w!)7uQR%sR3JlJ8$jHw~ShN&=H z9F5gAPWZ+xqrAj>OJD!q|9$x2p+oOKh*mc!>-hUJBLQ2NYzJ6&2veyR76$Bp2qkc7pdV|s4FC8e)J@M$1ts@tX-gei~o36zpx3t`w zDc7q#J`ZkVZX3DD#Nz2FCxq~fn{}`};0)mY?BHE3m(^Am#>on-kHQCHF_D=#EN0r- z!F?W%OEfY%IwcJ_s$~+;5MWE1!tIPFv zc~GkiY8X-~d;ldnfPn4Pbtq(o4?FjJb^dVJ%z%ftYtDDiizEYirIOjP3w#W+>|Lfz zft8bT%E0%)z4s`2H7bGrpEO37%`zq2uI$WKrpS?6Jr0y6Y~Qc9yG7NS`&@-D)!DtU(-1pGL!E*(#8A z%ENwgYt_=iatT8i>ORW=t-v7m3|xu(mctX+c0T;@BMYrewb#>4>c8>EPk;JTm}_rS z3X6j?O)$4Xm)IF?wqiIGBgbiw2O+ym1#9UKhJxtXI|6b zk72GkSX#RD&_90N9HdHwMwxe#A^?hj-S**!A0Ax7AdqUVwUtK5JmlM@_P4+NZF&bg08RjM zx)K2zmgq+wc_faWP_Jo3z}KEKhy}RBu7<*b7(>puAy;v4qzTHvL4n)(00K0Jd67su zTa&oyMqkvO5WFDf_*Ri}?^r|#uf5Ttn~WAnN1&k*l9Gk|dHCTcb@XjiF)=T_@WSJd zKMtPA3d9t`s$OeCNnmm%g>UgOS)N8}LEC)LZWYzl<=tygC5!IE7rFx`*z(A7qHB>( zcUD8#ofI?&XySL?VJN{kPp$RnEE>dXuP-p*p*HIab{UA^K-Xyzk#*IA&_lYqCjZ~MugA!MS$qO&Mz`0dM5>8a#YBFw5s9yy62AiZuv69hNP6{hfr z6sG*cfVHWPcjpMUe4=o?H@xW-%%A9Ydi?HzaA@y_{kZ*fv~SeL0j?B`anPg9AJ zTqJ@;5GkA);%{eB(urFEKh3hKbui&w5GhU(Jl;|+X`nqp!$k-dFr9Ac8{c>YQ0Aa1 zPwUW%Mqwo>=ze++=H*KkylNTMvD^LU|M5SVytw>>3|ylYa)Vm1mGK7OKk|`}@T$b6 z*Ir4SqtR7}XM#@lj&%dd9<9(;ntc>^j}TTQ+9zgR7@NwZBW_|-rB@XKNn-`7P>;=s z9*qD1KmbWZK~!x8>y*cy)44)e*K^7$7?UPsE3B#`oY-%@^2RG#;Nn#<5@{~ANH>Xt z$$^7&h~L*Qyw0mMlQz7CdZsN`XaWMT74`D+I$S60p$1r`+chGLR0b{PSIq82G=Q_w z8@{B(F+j#u>AYC`=}&+1wiCCUJo)@Lzwz(Ceey|`FnTQ;F$+kXZX4Bk55vY?HH&Cf z;ut^EB2uxF4Z(;tH2cZ6nc{*`xYa}~rq~dfx2j_eC`^y27WPWY>alwuMts;!-9*PD zq8kNnR@^HMMl;eB+pWO7+Z-PQAs8UqUi4Z7F8Q%_TZZ3w6*!wOlj7XhRr%u^4z+wPE z2Ouee7>ounwUEL&UHA{!w8!nqlP5p^@sFcWf(dF$udC&RNUhnxu?wk$zJ(6TR)-I= zNHLvKpgAaD_$CgzEEu8<(}Ia(UBU4wHM?anw2prSNp0BC?#L~>f1Y1qb9S*cYb6l}|B2iO|?iwi3eoCAG7f1mpDneGq(DSEG z@)V5}0>B-}&7-m=Ku^a1xpR0ua<&SpwuHqCFFbe8y?2vKMRK7YEN-{EjrxpU#SLgT z-E_mnix=8U9IqL*RCBul#8USi!Pbd|l!p+2_Q2q}-Db93A3S-s#}P~*149W!2-^mn z1HknLn{QbPfHeok47QBKILbjEmI@@#!2@#$`LkyiEic)YF(FT%KF!L=Y@(#cc7`GQ zb_~k#XoM9Fk<#N-apcJW&$_9Ne~%nmWT;>xrXgN+2l>2vyZzU!0T~)jV~j`aj4PPv zL`FDh6Q;;VBnn1&Y^;7%!|FyW9xb5Zc|?l{D3k{d;O5I&5J_}GqquN^GFbD;l_ROr zv)~8OzL}q&NB>bXv1X}<6S2{~1~&i@M{aYai6BXzM}UYdKVY6BeQ0Y-fu zTp4&QH$Gy;oE=|(qtpbMVQe~YRAbG2SND}MjQWJH0Hw`eW6rJdUlYn!GvKH!k6AVA z9N5PL zb;cMw(2d&0-iK=^zXm?M;W4a%i|U{sUd1x4%I1rCk8^@+b?kV(qx0{-|Mx%h+0TBK zG5noxJhaeS?6VApMN&3dTuLzz%3etv^<35Cr!92iDHBfvKX%m=Qmd4*QP{=PtrN#( zfw63saRpHc-#8(cmIA(v#YD)7u2pd0$8xDs?B_usa`DE^jmz;jP%%Oy)PtU0sYlYM zep?(oMLvRBpoq+r*wo<5f}$Mk5d6}|1EY_0Q=d%L500n+A`e-HG~<8o`uU@u`OK#u z`0RbX&eFI3{a=@sFX74Cy%rIc;c$)L$yg!7O&AlQ#C~`cY zn&mitUeaz=^;@d&k&IHyKSW(wamFXBfCMp(de~_EAi-(6RBLuC4e>07kw%lO-i$O% z&p`qD#CtxZ1c!54to1ihm2qqFzE;V-^edSHg<*dsqq&FMZc%_`*ovCnZJvGhSz1n- zCDR_wz_@t9-YQ;sEe$;Wzw^$8ef#G~lt4iwDd;J&17%R%$VCh1?R#!BSyS#~|3yKO z?#?O|jwh&1tSByzu0~;G6I=cB=lPn})W$i8(Ojm;Rw}%p&G(2u`N>a`2mL=vI$uCW#}whZGN8xCZ-rWMMnNn9fcU8w@5G zzBSLSx5wZ{trX~jk&px_ zeUlMLL3Ijy@FBNZ8WN~P&(x@7)?mPh!WB*idlrGFF3{0UIq65ST zq<9h?I55ZT$gC-wL~-3>;NxNAc6rSf60z=3i54MFstqMUDc$aV1iL3u zWZpji?!kkHJ^i74K=zUWDRhD!0XmSyTgh;cfTJlaqzdRkPqM%ba&fiJVTO9Ek4Ti} z5c7y9iY62uK5B7%`A#7EG%b(x>~((;Q|jxC}|cG$wItBL0o@(J-AF<-@D`U$Fc5A z)&moSUI<#qZL^G9203KmafSOhT@sC@+d7G~Zn{y1F^1`rMF#>BNOzXU-a0_m$}$A! z^1d+<{>x0Si3`h6p~^I)o{aAM^UpueI*#IJicPbbEy98w)Icp@46k2R1DDYo)>4(! z!3ES!bSFRa3%1#@H|SRNKE$9|Zyq{u_+$6{-WUJuzlGLquT?L6sZDM(P+`8ryqBHf zhPlxe;k;;SF;5u&g~jPNOdW{ro;T!UpaM~4DVDKV-BM>|PL)M93!((_3`HA0 z$|x~tG@Azw9{Tvb_kQ`${){trvRY)Qr0Vl0;0#%5@`|7w3!z*c77+Hh%y|Mt6JGFkE?DZa^eJT$DA2L0xVNHO$D>bHz=jqMlFEj!-(|bX`G)Y zjW4~IdSz3<=oL%UKdAX!RFA!i&*9f7X|_n z8JMXqkRxOT(7MQ?j8!7%v1>sy;M2O?k{fEuB0!QBMIu?Cjm{^DKRxvYgx#o{z=X$6 zkCn{Lkw`Gk6A{M%rgH@WJQ-1>Fdjh%t5(wi4HT*JTGC)8!0CQIY=~$TA>E0j6DR|l zqF!)z!1ro6zqX}E-?E35x_qXA(3ai_uT4)X27F%r^~21mp${Mrb&i|BcM_kb%g!(N zjV?Q40%CK)&WOVc=rQJ;E z5$3xm;06$PCL6>64ADRjWQY)WubiD8h9TnQlI3E)jzxqyA%|k=XZ==dX=xdcNNvnq zIH2m1>XHQ`C__rU!ifYaXUzy`B>qwe2v8EM4MPsd&e}Cfnw!3Q-C>VPcM`3@(EKg- zddy1`d8#aQi0^JhzWeS`zs70(pcrKq+{5nKv-k_FBbBPCz~ zEq*zi?fj)FAaf=Fj{teJQ*3%WU?;1ah6?=!6M!(%xH616w*mDAYB+r|`0Lby7qrC{ z_+Hu|H7O=O72Pzq6iCyM!9*6U=PI0eO*S^_6eFm)ScQPoYs*?E-A@oWAZzu)qx|&) zO?!S3A-D;TXROtz$G`vkzwc=XPo2I#z#YmpVji&~M7fOV6Rn0v;LtXuLq@Xb$W+i^ z^ADZPHTn)K0ZS8!P~0LUVU{?RwJDE%1aW(5nMCSx<;)4H07@SMm5qY$=IW^!=Z&Kk z!e@RQ5i0ItO|n61iyffX)Swm6^Nc2foe|N&V^NdE18M$@NX+l=c&Hs|y zvPOi0yikeDvZ;o2?gY{3h*AQd+$gD@GaG%PY!Sj zZgUDX$5r!uQ|Z7#SW>HuJj9Wlp9T<34}T(jK*Rf3hg9%YNjV^gGcxVUqb>>vUE#nV z;RgUlAlnuhxbrh7WRxaDWvHW6jzr1Wsum;NFtX^lKWTPc7Cw%{XfJIN3KYluB=z^| z{O6;9wKw{OFBN-H>}Xa6SfMSxx2v6m)d~2>8X)5sg099bG9?e0I&Ruh2K5N6xJ93H zVyD|i{iQrQC`y%e1yaC`k_lj@BFCze33J!yI(nZnHi*zDC_#!J{NM+?49>{_+>%H& zBCc#~owxzG9m}S5SjM*<@R;ysY`csrq6+j-+#xmIA~a+om@@7-Uq)srqf)G9yrV5H zCk+5AKl^c`g#D2!I;0?W9XKYC-fAzyZl0Wrn`ol>(p1D^J8@cM8%&GrhmA(UZ98Bm zu1o6#lMWz69A}fjcGTYVU`ry!CT?YQ#d^JIe;*(-ZrLWWD$qn|OBjMPe*~S;nj6~k zBKn$vr&xv(xG_oDF@$3mZ808ZW;iW@=Kz`-SV*wotaJ?uj5g|!Ns*A})-2;V3%uU^ z0Tbf@+Ns#?N4&{2dJq#Nxq=e=8C;&3h7g2AlNxTW!qAEt>b90<_nKM2d%IleASuP* zGNiDFWTw!T51n9ciau+P6F`}O)O7@>OI}h&;FDskfJU5>L{{NB# z|NQ548QjP+XWiL}8;+i;He-z*jazppGVD7gy`bnA{h$Dt@F1&R$Bhe>nm1Y&SRz?~ zRE%$a=%XbR>}vk7TVq$Sa)U83=B@~C5CqJB^rIhP%ID!0x6UHbV?@j2JPBfeFneTJiM#$3On@r$7B^#uB-fT}6C0guD|4KmbgPEeP|x%||Cg zSTHBu*iJx)6;-ekK#*Wlnm)jXBi6$yOklBNN#aWDsBre^hkdL}Fa)i@>6|QWgstQQ zu)FWSo zD+>xI7OYifopfjdCP{|@ZAizPot)(J&2N4aVp5j^Tq(H@oGoX?N3r7RnySWe?%CTI za8|a*AYfcG&4hGtU>4$7V3fAp=+-NkHO#*6fB*X|F+2znwz<&WI=pGtU;vvz8vsR6 zL11(P4hkUd*ZFy(DJ!8ZakizxX{JKGT&;1YA~0jt)l1cx7BWpC5;YdW!e}d=bP$r1 z1w8DS$|i9r8c8UIKt|=WU+mZfg#hDIx+BtXqJd~_I1iIaF@X*t;o`aq8Yt!SRygJ; zDKGBwgt#YVcfE<-DkGfPw>}@$i6O38m*^wbVI2gVS%<@_n{|b6-XUY^B|L2qe_|LP znVwl{WqO5JuOEDr;Wj>4Ofqfb(t9PYFbbI2R~S-z z=v+PuplDhl00koR^Yct^JcGPElaeA6Y6d*FicJ^q7b2_J9Z=;N)!1IW)1yT|`nqk{ z69Rxx9z3pZN)l&;GtBthAM68d%9x}RHnXx?n@=zIID~UCZ6M(qO>oQ7ZX9UTtmIZK z;6+~cqA|MK9X`UV*=m+00?MY0l7X*RRmcCS&3ct1jb*)I40wJkb+f^kB>3EdHa23M z@;?9h&wB!biAQ)Uwpr*?EoU46X*5hv?0OuhfZ@!jijeCHCA^6(Co!im0I+Q_{?W}0 zle#I8LSTymor!Sh1S|!1aV)LwAJxUg2q28u{NWFOh>aM#CuyVr3e1U9(!_V+ox8d@ z+NHVq`FTblGZYza91>MGkAS9hApn^>9yu}kfX0}oYfdEH^i0J)z_`ZXm)N2J$RLaQ z4CB~KfH1jyGz+gc0t64sRAyCY5TQX* zrqt|~!GIEYQjM>*Ks}B;B4z!dxYagN-+1Sp4`$MeDd4`-7C}UTEC`@Vjp>t_hJsMl z8fIT92~_0j2uKq2a0V}*zTRjd-5mkFs+@qP6Tk`s2i6VVl{Q3L0h8%K-#QdgDHlXU z67`@4h;bSwp9bZG6S4?f2ftaS4YYZI6hbBNLt^nS0_~CuVi_#yb=dtoyywpA zFL|l_Z#JsDi!2PPiwpcQ^x5OTeDF-&g679J}_7Sk_LkN&Xq(ez&;v+R;C75`0Ey7 z$MD%ODqy*C@xr?lfV_Gj(AIpUF1JK*CC0vt{9h&=$l{uGL;h-ht{3t_vPPMND(Ag? z?|a|-%x6Bs+cRo0{5)tK%Fq|na19t6B)iue6k0o*)*&PLCMmc<2WW^m<<-D{`?r5X z*`^6nTwrZT8|OgQ74xC6YRCg0+x!5hNC=+kDyAOoI&e!HrWh$|FKo>lH@uA!^c`{Z zcYpVH=tu2)ZCQ7j;)qvhPV?}LfmMoVZCFfgt0O#vlP)ImS9eZZEeqn44XI^ncKD;Vk?%_yAq=Ij+4tFl_QJ&zyjEcAb zV@Zg%3f&B<_DU5r*G{d+Lj#`7)cKYBamcmK74a`j%{KW4U&DF~uQM}p47q?i;0SEF z8B?@YCzg0}r$U}5B)wLkcS zKY(R4N)w>1hAvYmb}hvdRXP0-TR#>e53p2V`^fSoNubs9=jUc$u!E=BveZKsW)KWD ziQFRTOhvAgWDZh`bd*RxE=v9F% zn&XLZCsVPJq2=>%g=qEmdk`&W;DMih8jb9njXmlJwZ!H2Gj8RhEioW7e%Ll8er zw2>yrW!=e!0Sih?CVmn<&N-ffTj~-zvcqRb+1Q_mrH6?Mz%-{ufKPDC%j2|;52Ga& z@fa<`aXJCa?Np)r1R#J3SCAn%g)3timpw$5pXEcP?X*>_OKWhA6j5Ghw&)m26gCaPfybh;4Dm!quO>-muQB#yZgWVH zJfZLx_yi~5M+^tQ_O-8J<)=jwX_Q>zgjB3hdkazb!WAX}1<{yv#fpiWI23ch9j}t` zc+A7TSZoU@jxAcwbQR4t8N4tSiBsUin;O)>NJMiE+(Ib0=9x=GqkwuLNydVoRw5tL zKCy^e#xB!qfmE_PG)=!|p zRced%Ht?dyCre~ts9cQ&mB`^M;vZAGSLIAzZP_l6XQ@+J z6*V2dloA|_=K(VAYcSsRGSupFs*=QdilnU7ksK_Gfik)E%Ifq~-hzolw3GxEglIjLOrpn{ znU@m^qcpe+EM2U!p=B22Txw(8NNSW)BXLYBc%ecQvaFy`AYoO63L3@RU5?wdXWXzX zfHOYD;Nb2g%$+i#X;CIifpTjT4<{W;5&!}P;2O2)wfk+FIF|xW%?K3-N1_l(E02I8 zW9D*4VF#*scvywV)RxGjp<>Bmoc!W#iaWyS<-k6UrTfD_{6mi!S|N*gDw#FEi+@ZO z#H0lk5?}>k0y0=7m{2wXLl4px?Ci9z>-Q z4T-C@)L6KH{mtL}4Ng^9vB@?5suKawd30z0rgjrEJB9;ds#@)up5dXX*tt7^rKYl6sIu#E+aK zEBX@oPN4v29O1@@gJ+tUG?#Gj_)oW^ZS2;)DFC)$0tfNegM%DqLi@$875kgwo57J; z;veu0(A3}-I0PcLYX} zyi-OJ*Xk5ayCFskL^CFs@re@*EtO%slfo@mF9ej(J~oGr7D1qCvrmyQM!Sl$^0HY; z-@2`lEUKr96aYisc83&rg7Z)<6*m6drot&}HzS~7OgzZlG-z{p2=-85F=$9)a0cM|fg#A$tGt_`0*1^LP%0Us z!x9z!T^1;}npvP!d{{~KhY+|^LLP`44loH!I-Wc7V%8VF@C970!PXR`IJqKUguM17 zf3cGLxu!cZ35Y-jd|cPujBX9^DZcgATQU0k2r?&brio!m(Q|wao->nC+_EGbQUy|+ z$V|i$1l$tju%V@b8%JCb7Nnx2IG!TT^(|MlKDIFIWf;#ciPpLuI;W<$8DZ1_0VRsZ zX$ko?U{Vs}UVTuT>6fbVe&JOW&Y;DOQ3F3;nEY>qtsR9QaS+L*!3psI;Okd-!mHulO$sJ44}_c!{~Lvt+i zyrfYoH#>Zqq|}t>tG42xSf?14T>N!Zx!dQY0(r#>E#bxF$57hkyC#yycs8`_4O;DK zQ_*QRq%6q;Oz#v@(u^*QLp*J{1EFHzNj&`Z7%QglL_BegA;%LemT9*llPp2VWzELl zIN!)m2RTXt`?pQyyaGt)=xV9fA5<~&%6t)P8kJc-V5>ODm?zT3eH$Wj1|6?2wIC1N z=E&hvwb2{YqdLd#H?R$N%hgJw-WkANIMyju7z{Y^;36RRZAOTHJVvO)jcy-L?wXK* z=(SRZ#|BuP*&%fBAFgobJ8eqCQT0Bb2$JtPFuSE%w^wOn+@V;Rsm&qFVkC21J!U{5 zJmu70u7U@yUBSfR%F(q&rev_&3BjtqWR=)(QCov2}`Ulob*!wO1%jUs89R%q@MPBkqg) zMIyU&Ui-pY$L^N}jKbF83c#+ED42sGI3wJN<8g_70hjQ<|NFmBD7u^YbkIS#%q@Y! zR=mcF^=w%f$E@)yhTcMw;t(OU6|*2?L{!R@n1t6_5fuhEXB$1^%+>s&v&1RDts^d+ z645+RG}-Lo>4sKEXU$w!blg?0kgNM^#Q0hcSn$bR9jAfi5t%kQcMc}|nXiL+T_L{~N-Juum6cIuZe?~QDZDZxfM(mU6 z1nY_qFaqvic4Qi|Q?(!!1>$vAH0^v>sHZ9JuhJqFDiJ~I62^5@DnpR{qJ%ZN{KKLLamoK&%b9zBvh!aT&r<7gAb=}6; zO06HjKXumZpuBtMDW7slKY#xGzx>O;@J%WXpF!a!#a&*%iOZD>{tR{Z;_%_aXb%7M zPyh6*U;PSvoR|<}_$4Nin7V9xRw_0@q&SozsK1RxeB5o>@JL(xY^NGTGe*yi@M+@ji0gu3bYSXr8$4S+pe31HpuEhPb# z1v52zLEe0%?|kEdr^_tCI9ZdJCh4Y8*|;Fh?Zl*ZM`-@ov153$V=3lkIPg!MC5s*< z4!Zl$fj}a~2q|~tZtP?U&;UK7WEL1(QrxT~=<3ep78ib=bDTPL3Vpz5O(X;%c&eBZ zFG~cG&KTPO8N^h%JUe&r{JV>#3hty@&3PzBPay&?IANMA4>~$$4~t%T@a{s3vvR}L zDcp&%wbpC1`}T3Z;$ka4Ac+aEGN62?wS>cl&-LaoziOqvu-qvDpFtvH=h#`qiX8qi z{dS|&X>bBY>wKfUT<>0-tM_W{3yuC#quQ(Dg22WC?qGT3gxyLwE{KUS^RWFko7H*^ z%jax!rr*`K&h|B$t)*oir31gwn334QG+L?FW;g{%=L#kU(NW!ioqxGpB8^(!Wv9YB z7ctkk65wM|0}qjZD{NkmB z8YX+TI_hm0i)+os+yTC^wA@;R?HFUR=5uZcAFO6XhZxb`s9K-9xX`IoXK*3cGznbn z?H}7}A)_(VZ*h1;slQY!^I_P|pmVWS?Nqv#>J6;-i^* zC>!Y{BF8a&So!dZz9mIuxNn#a$%o< z!p*M9#VBK1Q8HD$LLz1d$bg$Wh@}40KmF71eeZkdTNp{{lu^a*K<_A%0_oP|QkkuE zY7O$|rs5n_ws{C`UsYIi^8I{XWZd7>FET+m74dLQjNq2wG8A*11L<(&Ja+6Dd-YV& z5qe57dR>G+Qs%PUf|zHSTgOt&Ok^6e)k%)DU~W!%tWt8M7w-O8{D&icZ1&2;f-QGt z26yO3BP{m5eb_*zslqe!Ekk=)Kr@ip>t4VMI^}6dNk?9L?KR{NPy6XUPd+F^)+$u4 z!~{6velZQd@|Caf>L{NEg_~%YXi*qM={gD+M@PUc73Nq##x3e!e1;^vLv;EC&+Nu_ zk6N)GWDcz$2tWuP*p#uxW(Yjw2M2}|(twmJaPTvod6x4RfAJSsXLy|jhk6feqiU&@ zQs`!)OmR%5#HmE$2suXZCV{PKA~D)gQx8NqmCfc@29{S`i#V2c~3JLocWr&yS{sj*JKAUwY{$vV={!qa7KX*Ddy z4voZ40FT#7v~em{h5@nucqq~lA_U7}GL;{4nEqnWNt%QE=v=NYPerqP1pUi<3#uT` z72I&&`}05la~^K7bn?2lnP{9XnT;xii#XH4k(6TY8gb~)Z?hZ@o(x;n?snbD38_IT zg(40la!c2^5~p_r^AzW@C!To1p5T1?lJsK5D>-gX!J`j<_`~!CLCT_oBQ0HkJ6zps zy5L*&Wk}9gkXNvDq_3VYTj5s$8dGxYY+Ts}O^m0uY$z;_fo;qnoF^p@-Q@(t;}hPo z*s+?h_@ePf`SD;=#VtiV)VZ=DLSdtBBJ7sntX0?xx8d`&iD%?=2ZfeLyqvy5Tka7N z$`H3;@fDaPa-Hh9g?jLEytG{13lK;I@z?`Nqh#ESWWp0BqTg~0`*@N~;MokG)bz-p z49)S?&FyAN8cU2TOxqhYjZv;#sx>%!dw*-O%Mn-@6)`Nby!83RH!h{I7I5a+QfHZ) z%7G)LxkDIY1s305eLJ`IzUyv0f9~aLj~rxgC#8_%7R#l%8I06REgVoA`#SyV(sK8m zON;Y&-c;hS_a@L+Pc@Byb`Jest5RNGyzu7PGbg)Cm-ytFJW?x_rEa^ie_yv;yZXkP zue$yiU;k+|_s6#lF-FH)%GBd;xqAKkkDh(*xx=+er;QhC73AB?-P!uSUb{P}H92p- zhv#v%acOzs`kRl1-*AkbOcdlp2m}i_LRF6N(z#E&71CH@3iBvb#Bn#lt01)^ib0m_ zO1U{#YwllI>NV@-I&8#n%musA=v+Vd5Xu4VB^od%h)xC6n>DJrdU+UGW^(N~v#(F%@ zLn_7$!rQpi!B$b~Ug8~__aDdIJNtJi}`PeLJy^lm=ecS*h{*CDKO2I@(S+e9%@^o05jZ^yjTq(_@ys>34M!4**>cdc}fA& z2`*WT7wfJU-<%uBN(hDZWMu|-zLSdnXUcy_?-~RTuai!s) ziDV;2kD&~?gaqM=ih!ZJfQo6vNcsHdKaYpkH@@)=URz@&CW|XwfN|zHw*+oUBvS0r ziko7_Xk2Sp*nC&kPsL*Qo>tz_I8t5~8Rw3tF=ix-Tyq8YlsJ7PP%QO`EWZ1JaU2Wd z0}nia<0QolfikY*h=Cj66>>#;V3xT@;0&HTs6bt2K%O~shTOy>%9v&Dn)^tcu!(+U z^EdDHa8l$cD^#VFI2sv0m)X3Jz|ZTUX)pn4EgA$*2BaqxJ*8_*F;eIPJ>^}|M<0C@ zOB1FFY{Vp*YdNed{V)BZFZ2;B7}Jp59nc;ouj(@C9oQW-i2` z<5cvZrQ`@Im;Kg!0Z(uPXZXnJ6UYkQ5iM+pZWzM^``c7ZSvZ9WA$MNIGu@rL%jE5*$ zi3r3qe#LSSw_LQ25ER=uAk&I6aIFd(uyU*Sd)M7RZEf)N&Sc_5JjW+DUiyWjmTuVSF`adrwt zJY3xjaYwF@D?`i5>U#J<22Pro*?-3!AAa%Fs~8&jY8gJ7tyTwD-A=dPsPmzhQoGl! z)*DYhck;H6eyUVIh)uB*Cq_dT%)~dIxc!ZD7jQCdclsFbXcuxV^L<^+#`Rfj#LJz* znb+T{&K#V(=0<#7`Bn^+TDc?6aIE&12Hl0z&pq+WpM7`n!pqIUC4T#A%Z<{dt7iM9 z<#XlkJHLMT`%gah!{$u6*S?4!D<62(he9!I!#))FdTsvJ5AtO-EY#)NY^AYpX}O0% zd2zW{tM6MXSC`B6L1XT%i;Ew)?T*si0eJ%rRiiaI2(K{{qvx+j%&F$3*~%1VIY_rF z8v)_KSsz6$mu3#!cIQVzMRCTI6FF^q4CUfFW&j#yGzY| z7>*HFFao1FXL{Up%Uy4tUAVN+?zH98D?ZkAsy@bK9sV)Ev$*r>D`y)s2j;FhTISo- zEm>xPCfLndC@Jc&lH; z@4N55r=EHWtqXG%Y6>FNSGX&JZjuEksr;G^C^gh4xW&Wy4xo9ubSQ%k^U` zD0G3~I2(1Gxp=hb)99>bv4pT5Q^0D9OBPzXBBFG`nifd1Vxhip^$v&K!Zp`igFho1 z*Sqh&o9?&^sb6%HS|m;))`uEE&>^!Mt|U7-6k~N-S=jETMdoQOTtQC3p=(9NilffG zb{R8~b3*V#MA5_*kv$SLp?6$?Kmke;@rWG(yQ104cjKNKL*vYKetw=e(AlwL?1O`- z7x2zAu{auCmws)gYmhplwaoU}8o!NxCm==E7vc~f!ICQzsUc}sF~2+l!JI9S9V+>R z3qF;MVFohk5~#T82FDa+=d?+q;h4&6CxygfG6v%?GR7(dwyxnB>RbBD zr#%ZcRgiHy5&A-$h`DA4dFrv;qthZkd40j@#d(+Nx_U8dY@@r7Xx4yy zk^ZaN4mlQD~6lNIsmt3UmRfBgM_{0GilZY?ZgExvTA z#lg52FDx%}CT$7Fh|=}PZn@>&Plj7`{{1~hf$#}r6v>94hX)L0B;z7L#x66L<`53F zj?xkY%Y;#3oP#QdO6U~d!`N2a%TN5~v74^D4r70Zy7^mf{REFLW@N4Qc!EQ#f?4KCjU2XH&o1+E6;gW% z6H;JX>iC3h?p3>jDS(U~nardRyqt3-j>YZRv17cy&&#l=ChTMBP}=RVz#wEtfb2kz z3rpPw(7g_gsJYrmvIe+Eh%*p2&UBEz;6j2c5OSH$VhyJS(zzuY#yKk>6D?cyO|ie8T45}vjtTkpsmi>V)@F0 z+`IF5w&)ITHxtW2dEs#lP^E(&ncm}ZYqYuIY`sCOoC7GZx!`Ky?vO>qs*yg&M*KeEaQ?>w04<|v$K z;EKJle_-1~zrdXC!y``FJMyJ3J@MNo@d@JVG`HO{FBc88PlzEUOnj7XpAYgFw*_LZ z!8R(|z6_axK#LyO3?zRMj`_~Zs-zP+&Xx)7NYJ^5wx4 z&@+ax1|iI|I!tfB_{A?UGP1T}c1(St7?{#W0uk5R2JVc6O-|=1IhKVhR-g4yNndlRmx z?sS%}St7wl=wK#pUe2tE+PDnb0x=q5IC&O~E4Ae`^Ue|y_^Ce4@zjkc=txu!@#h3; z;&fKTi9pE|NwvmBMkd!6%Rw-_}!Ln1QPVtnK?FlLh24bL4 zh5!XzhKkk)+yHUXS2VV}H{fF=i7=HgimhiR_EScVj~LXC-1?Ev-t>X9ufN>uw^&>7 zBptMQP^zV&+MGFX%{3*CT;Qu?tU47=G*GW#Jf}*8zfENw_TYiT$FDvzd*E=1V;{=(z79U-NP;mT*F7}?aWIr9>3*y<=`QIA1^$KP}b|ML9f{#R1O_{;7|YC^4Zsy z7ngL1vJT67_Qb8nXWn;+1DFpSI9R&oTKtZ+4-{19;7~sTH&HO3+X+#;PrLU5 zzC)2>EAWw}+(#vhZ%)|K2aLXz>u>we^*4RsjaSbM`0!sv)(=!577C2P^+xmH`) zW(H=wY2VwM`@FeoXIHyHcTack-Q8Ij?9QsnO7pI?Z?0T<^iLnWf8xZcvC+flhmRKf zN6=ZO7}Of|bUq7w#LMY;ZgSnC*wE}rj;Z!L|Mk|hPd$~vE_D@K#;H6EZ=;UUXvBe0 z3v8RTsaNMrCm=Ys3xPlrZx@pL zB%EYhNtU>^>TMmfe+%F7o$q`HI}9i$aGg7M&WI}davP968V=fz?V8{{I--?tSB+(D zkYV}4o(3CZ(IDoCGx!MfW$Z*<8=`>&7hO2NZYT|jQ#A{v&@U^?WWRB$CVhSCC8Vpp8)gaQ#BVj-AiJmphwnwt`T$cg)f1m z(|bjT^v(Pt6gHr>0NvzC3t`;*7;dsdS}4BW>5zz^YII znW8gcptw>d*>8wvGG$5iS+hWPKWmnZE&S4E+E-+UWLriME)(?&Ph5oY|6l*>j~C9K zJ9FU)22~8d_>XBYiXfC;PPhO8q#w7NHrYrdf>(OUun$d@&U4Pk7wtUM&+T=te}1D2F+lgHa{zkTM+8D_=2A`2Vo zceW!-%Zjn)36Hu~cwX{q*&&;xoE;B$@7`rm#012&AL`BILdKE4WuITZcQX*2a`UY@ zmcs3}i+oX}En!}WmPk``gJ(oBoB|1tLz!pT0GG28C?)H5w3$(B4tL@h+U{|uXWh5zSJ{M#kw=L382U($H%)qUU zb+OxE7JjX?yLzh8qt(`>S5&e&$Xh+mk16CT6S>zYPb~WW4Nq5*e{g@z>`(} zF#?W1wF>B~BpI>h)N+IYE83hFUU&hH6~kOGT+GPm>rJL4#)__YENe5liQB)`$z1ov zYOn6eN-=qFzP@;I*ejmXJX+&P)k9tk`N)V3QIeg4<>dhw3@u?12|llL=VZM^v@-0W2AfSRNUZ~R z$x%532B0IqB#qe|s|D*JsSIrEm5hiAg&dtsNuc9HJ8{KHMd_-6J}}@hz^+FFV@IAC zR;JPgCk}+d$#B?YMw)ys(;~{3TYCcqU;%S-+s+K8`uhh?4ubhvI1-y^tzb-jYrMAj{Von5TF;yHO(1Z8*B1p&@yg476H;MGm8j2P`ja{BxpU{pJ`yw$ zZ(llXfD(@nY?P1)yz@5kXicYOgje2y1Sx?GF&S%OG8+k!1vH+L15NTmmdPEz_AQ^A z>~H{uA;S|N?3%If{zIS0HN}B&ks!Phd@&9c>n%G8W1RN=s#;l5Y39bg9#T>>os9*1mvZunlUMEu*j(N8{hczTBU?O z2(}{;;>pt|0AX@`!i}66zlq7^mxcc|V*v^`vDeViRIkvnT3lGTcKriX{{WC_Gv&lj zn=Ki%V^j+JQh|4xxHY5L(TA3X)lvB8=HQof8cRiKf##s!!E^@wj(J*ev%Mx$7uqsBBmUnw-Zj-E%w;lQ2O0L%b#!_hSlWFFOe+9+Q9tc$b zsD_w-r2@kEBU!&5wtn#4e1mjMKonH!AnTFHkz7y*$pVXZM)6w`@#&>Qo5@2eAWyoP zfz8Bi>9_U-!71m~Pqbyylsy>;5#{&a{qA>UiZXoq%Lw^;h(vl|F;e01#S@gv`LcK^N9$gjDrV*2d!xi87vk=&&)h=P!p z?zhQBF08?~93ej17D}Vt@|Guznh+>MVz_daQE^b184a@T$ePE7DkeocM*T~=uuitT zjK0J_DdZEwpMg3PE=0zjKdKe%Dv4we%RtCH;M3>VBC-*SFT>`|7i>eOlce4IOCo)O ziIt9wR%OM*WNO&jpOK~4C`U&k4No{<0HiQ8YQqqKV-?*nOlPP?k*tS64GRmXL21uR zxma)b5r3?A&OfzfaN@+-^A~^r`}baY<*V3#fGKW3{k2;;6iYi&>io4pqDHAqv9T!W?ro>;k|M*69En|# zt=-b6u^o}Ym zRPwWfy>?% zYz}7WP>uBBOnO<(X7>}`m@x1hu1cD5G>JP6g28e#J(Z?$U4B55Yo&*Ev89TBN)Gm;(_cH~`4 zN)w#HzWn^mJX7?=`Gx7JdrVV{{e29JnI15@BEgm;1#ZncO>)PPM8oFlJX!N?(r4G* zCNfLL3c>Nv-XbGR+_HubAdK6XxAO{2xR^W9Wzv;td09)K>bA86j5q+ET}L`^x}9TF z3{P3LjIeu8XXaKZ-6)@q0>QBm*-95R2;f`fmqt?cRBN$NA8vDyY6?qOsD-eXhH+9p z(}LB-OB%!Zn1D(}HCgC)Lnb<2rgoB`8)!1tBnwQ=Nhh;(#>764M8)NA&Eao(m6V-5 zPd;&9cmKyH$s8-v?}dwum?3Zh2)dSNlrYKx4bv!LAV@!*90~c1F=#<)Km!Qr4y5Y9 zp2A43Nszr{9J$6&)@dQMs7eg@stAvDbb5|deICxLof>BBo7Ho^;xWYn2ZN<1RSp`G zvw9h%Xf2wf8MHwvwOFbk!}+vwz! zQ5xK3$Rq`{L999f?CRguh03Sg96o~-OAeI+9H=_DD$I%SEln1DF?}(!Q}dZ@xG3wI z(PYq(ksvq=zAjaJIthMSIp)Mjnqzj5HT!KDMWidtWNDSC8VD3A=A-%rCkMxnhx9CRt;9VmWt@n(Rf0*RdzaSBdh8lHe(q=G&I6%=UW*BL;J`K znL5;J_Oq83^NtKX*mm2fvmu%hh1x@g3dkDWEkb4>!SF+*3uS@2O}i<{ay(R>5zF+$ z8D9q}cvM5uiDoqG%1FVk6~OS&p7?U>pRDlNo3}o-Nx}Jt)HoKXxLECA0RQIieoHZ? zMS`|FG@-Gdsxt_BHK>00Xti(XY1K!8uF-~%CP)dW;KjDi~LfwoeU&UpNq1Y7H5`LqgJ0za7t$u z7i;$)+^SX7ic%jNwf0~?yYt7MfA-SY#3-y*yz)$W>qw1g&7FK=ZB!Bvylu-xEUb0G z)Bs&F#>Wpl8Jf9$+rq6qAr>4|Knyf-LC~N)!4e1+6S3l-rj@s2V-x)53JlBmPyL&8 z9$nLl$t(K6CYaOuAzJhpQ!FHFsWQVuHnKCIXDd`XUvJdtF`=K>?i4ksBL%jvtAZ}S zh?k276Qx3i?i4dd@1#WC^ynz&;M7VK938rxa`ic~@T~T&DY7*mE>Fcy-T6dX8lbu9 z+*!0? zd8jq=q>|Pc61hqh?{kaRvTKpatAA~#%hmc@zkBETOP5CnMwMI_iySoE_J?1~sctEO z@g*cW7x988WrpPqo+%lL>fG_QBY!Pw$|PmgMd}l1sFQT1o`#02Q`&Pf0KM#ry~R20 zhZLe&=Wavx1;1hcH9!Y6PMo%K( zTX5<>6Fe^F?F__O%|^WudlYzJ62*MjCVr!%!x%rJU1A3|14+6r&5fC_FT3IiK7*Hf zy~Z92ri9beQ&+FSOmTE-rr2Dd>CeC zmHEY5JCz$79vdDU>K`aFKPOd8RF=i;ZVqN!hAd2>hQ5c99OMh*OSFVQ0~QaghDS%3 zRt*jg7Yju<-q3gj`65AeS9DrzFUpCNQ3vJb6&-Bp9&(H2^2Zd1i+O5WN;|+Xw9T#AY!v(Q zi*xfIz5V+K^G$db_SdB|xzW*aCMxI7pC2C^>+ffaoD~?W_>$v6yWBdOGM&rjkP!x9 zcyfPwc6RahXLrg2t!%Eyc@&&~-5GMquerfaqiU9M9zL9gYa8i5divz)uc14fm8LKk zUa1S_wp)F@moKZ@6iA9Oj0P7|V)~uiw{BqGck1_V^L%Io>R@bSbcg~!F*b^4mOmH| zGuEXCbQ6nyOFFRToMXmd8lL z!06=g=p>HlXwWR?f_tUPFnEA3rsVe*`;Z_P z;$zq~G%(22l)-{iZK8L8k6>k3%jSi2{@1VAi%#|fgubl69WT7BSYh|H(`AzOo%pT$IU>u0Je6( zvfr&V5HHCf4Rk0rDdUy~jk?K70B!82pMFl=WXP?t^iW@4aR9cRfqh^9AjfCr`WVk; zs;J!vSSljY&=|wA;}%`h6pRdva*MXq@llLEqUPdav$8nFD+ZjDA}&^sh?ho{K`CQS z3KHZsU7>O4gillH>mavQCN1S%L~r9!TblhZeU^3i8xLW7eF7j>xqk7BUm$ww2{NMlm-$VVMJ9iFD@VVtSmC#hQ zw*388VYL{^9YuGmVQkNL*ycixr049p(++I}=jABIDq5ZFT-hXykBFkvNsYQQrQ~_ zSaV=$p{=1Ipow8LfNH8eq_t&Lkj4LN*RC;4nVEgi*FOwhJ2ZNzI54tUUKr0FVWh~g z9{+x>?!INF=LU`RK^_Zed#PkqOVCp{Cq4UbPuo;o%(a-`4~H(AMoXM>eE zHe%ZKv4ut53%}MAQxlm3pK}+fqPKS1H!{sZHDnNv2S(~ly0Sx)_wP+-3WFT7Tw~DQ zs8e;M-70$6UpQgY5K(J>eleZRmr>Y99jMW&p$IN_Rtg1t;fAEnx4tEiR+Gg56jHQS zbokWKLjPE9V1SKI&Z{dK_~QfBLnaBE9_#od?oo$Gd3RA}(RXxO1!SG{yGAN`(_%YUO!&cYs%cWglk-h6)u%X2{TyI`bQ~4|Fo> zVvJjZ!-I#99lv<#BtvA<=J6*E0w&pp`ay22Kww4oO~ni*o#z}C09lMu&-LrqaU%tA z8y*=O93IOKj98mb6Vz;J-s`ok)aG!!igq)oJ%Y4qH0ulV9^E69U8Y&j4-BPAe z!}`Jr-Z5LS$>>kfs9|NWP&j(x)VYhII17E6QKK(T!N8?HPUJE(D9wLX)Tu2@tCWDf zeEBQ`nrEiycVLR>dbn6}#zX*|5gkqc@K|wVVzJ(;*Qz<}C$i*1cONy2`79d0BK-vg z8MNz!fs?<18o|E9&`|2dZFai;31n5Q%`cz$2iv0up8ypy!IB~3R z^l;z6pgJPGcD;>r_#Z2W_Uj++Z-*6HYnKuQ80xTZa1egr48GJPE9{#pV@<{ zX`qE!1l7Uep$8A9Qt1ix`_zTyosl64;Oq#~sJ_`MFP7S=>SB7aQlG8V%Z;>lp4Ab( zHvGXB12!drKRjXNASDn~AuF{yrv6OD5*ja7KSMTff-!= zN)%RVk>;iKj`$fNdX$!D;G%AD$m{HV=T7nd+Nb{tgqi%v02wC?c4_Da zbxkD$l@hK7;NKnB2^pp+I0Z`!0#K<)(~?L?yWbZ1G>{t=8qz5lHUKMG z%-4d@dj#K#@;I`L^SFN=HmqywPop3~wm=Y`i1vj6A8%GY>7)?7-GKsm5niIdyjbv9 zfLVx1b6f)^u~Drz9i16j>McQi+f1WSNDsOpor|q1b)sagRp(89pu5GB5@NgqV!v!P z$VctUfBX=cW@!MzG1{>9+umVkxLSiRb8rDzA#^QG^|UZrSOsb0Ii3xB^Yew~v#e_w z8OC8@)=xHnaF4)83VeOfh}B`XMD?Z|1$;xYJICnpe)S5Bf};Apy}|hSjIL!yxXW&q z#ivj=pS=z%#iSF$2+CBv>Q!ji5DQsJ<2pj}xt8P)`MlnXUajuc%xmGlwdw~!h>x%9@*~6e+ulp!-~t*O;wfdT-r*+Du79AsuA$pO zi4&@A=xT2c(?rF|rGr3&O69T@CW~CF0NCYP)x+mPF)bj*oVY@P2`w%A^9B)?N_WPS z9esHdd4HjD5>fs$XEw=+N&k;}6_d0xx=5=&GlZQ6)p^9vd9(9_A}bmaFzW zk8gg`qtES#KQ+*iXXHv*rj7E3OQAwi|9d7GoO-cj14qH1^*#`X(Y<}EXLX3bAm}taD{Aa4)pH<0av`Zs>FZyqsB`LU!wIQVRN@(r)yW@pMmJ z|7}ubDff>2XC%pm2)%bHPXWSr+Iwn6Z`sQ5vj5bYcr9)Za(KqmRCf|&vUvV_vj0ZE zK(YBzEO}+JSVScOsUY>C1pOshttCjDDQj4+v+pb=*Q`dSA*8@MWx{|XsKj5 zbQ{1kFkjQ~QHWO@EW9ONG#yI*aH%&W_xZCWLL$rMpK%{4LX1A#NB#|6>q+pl>3{Bt z_T&v|vN(T2!RtL0a%&1?bscYoW4Fiz;HR6YsauMx^L;h_E^IY&VU;?Hg&@t?v(y@u z!FMj3`g^X}!GH5K*a?sYi@e#x)EV&E)!hIrv=1(2&tRMN{w8%sI8Fm<vT%$lEqRd2x}#=Cqg<5wo>#iHX?=z>*siq zQxb}qkD?ys+WO(p?g7g0T}=T7ekE;i4Oqp*5p26cHOD2i=mP3YY)gu|U8d}8HUWze zBDh0}hK=D>!Mj%WnKAWzYlgGOMPR7-K@@6q@_%JKjVBMdlZJQo8QbU5X))`|i+X zFg)_o^HEq8!%G}5>t4mZA?54>f%<4p0KrO}t8knU@fqNoTrC_?Pu3(C8@yP&r#e;) zc)~ihlTXq!vL#z4gwkY7e%DHP%a=2%1wOzmrXezpd%MJLR<$e`%_CA{M((lqGOD{C z^=X2mmIiP&yl=aSvm#@QlH(O6S{SPg!y1l*sTFLtEa=1h=f5b@r;QtGW19ImsCW}? znvjK9L3)CQ(V{sQ*bQBjrAFYdu9JQooqLK`s0w)xp%J8ec{we5wL`vue4H}AmLE&HcKFGgH{uwI*S$BYi{USAQyT}h7GfgPo z)evnAgWOxK$eC@11RK<^2$eN4DCYS~VY|FXQhplc_25=|{ED;|kcb_8rgaWbi?&1# zv7*6L7s3sIN|IxR4t|YuKDLBLMkC%2ad6!?A`NKAOYtaMvVhIY5C}hS*1+Xjwqvr6=2A0T9_>X0bWv4c#2N%-q< za9H)kU(?^Tyg>E@`X;B;ai7Z8i(z3j$?e1fX;i#2$2-D0%)1;usQc(|wTOftW5UJG z#IeG!@v$33jR)_;e(8Lx#CIyM=|YPRTLUao@)|ngu9K8ydSSxndK@$p#>=qLi2RCv{`9w@pC2!Dw#=rdnf4-NdAnXKK7LAe`)TJ{Q^LBnUs|8MJ zri7CLtxV>+Li@b_Y1Man?};FN9h!iCfkBeKq!H(F{XmM^Np0xV`=4hB>y}(^WL}7t zd;tFcAXk#oGU1rX`(lDlzL*FtqOhtNvJ==t1LSHW#YB4AO+|k=4*K}@(he+<&c>M7RR*R@u{HOR#OV!Je&fPXiSG&XJ}AcMS>;I!5mmR%X{B4ushe2e3IxQ{c8q!|+Kc>hJA>68@o= zsUw`~uh?)eyDM%)I0O5wFY{qi4`>fPlXHE;EID#4H62i&D=Kt;B{oOA4LZmhpLeo- zNfUTY#@!+XZi8cd*xgDX%;Qcbwt^l}Ef>t2tcT2~RJ7z`m0Pnyr+_z!QJ{p%DyrHi z^c1a1CkZ;5hQ|K8Ujdj}E02BIAHu1J5b3u@LC7B#YVz{&GN7Bswrat}KIHnUOpzv5 zMk!gx^Tre#Y~XxK9i}Z79&29)`Qhi18OikAW^DxbgnxBl%#`}_bOm2-eO+xk#pz4c z9u1g>-CH=5-tQxY_!bl*?@7O=u%}A$Dp{hHr>+2(x|Sn#kWo>`*d#4F63M~L1ihg) z`@Sq(Y6WmQQvucr{0G$vxMSCvvP0IH7Nm7jBoI608~qY)z3Q?A1Vj6Q9W`e9u^?xc zwc63QKXzSBb>X3XIpQ*Tg@~;kJ57(9rexdpf?&fl0(fY^XvK@`Ae5A4Gz>~bmBk0i z?lCmA_6&@~JOsTz&V|{#>WE1Zn+y}WD@&y!v4!XYA3>V6d3hx;8l0Pj;bGhEq_6wI zGGe*GV6_9R4bK@QU$KgNyeJqj^ zZG+Tdip1#@=hZ~Sq^bsd6B)0h%pZ|l3z*%&WdK8&-oV|cVHg)9m| zZpSJd45sL)PfZl)atg=+>r&0i_DNQJBqV$PKTaDA!{)O& zM-2E)C1ozV_T^~$UE?YnZ;(U>yBO52H{H+phFxE87l36|dLRT;P+T4Bl(3i~XxS;f z0y{K_LnsknhW2ir3r z9@ha5&K9ldqx&PJ~jXa$&#>N{ld^N3xJE34hhyG2mO5ILmw}LMweIId33wodmzkQ7DPa z?cNEjY;l2`t_EuoL$Aa+^TOKknG0U*PHZn^$*8TaOx#B>@sY91yIK&7K|LXZs!A{BQY?X zyZM(6 znL*Emm)wWkn(Ef&T0mbPitpqDQWXStmBC1tEXi*})s@ha>}94~qT)hC@(;tC3OSpm z|3J(e905C5&HQR=JNR4w6mcAm)aISx zsJ%8$<6^`u;O7%%%TcAt*FUkHVS>!U8{AUzL$#TfMoxwhyZ=Lryer9})!h&}YDu&( zr>GJ~(Tg4}Fhf`&2VJ2$S<}7HRR&xN_nFJOY%p}SGJ{&zblm3VWn?sLD4hwtMDY(O z0*N6-o$MZg93T<>4hmLSaX4a-hbYA`JRqgu1io5qlIS0OGLo&J61OlAEMu+hv_;L3LFF=ox#juwz zSfu!4zZyQZ%BSXO=|~p8hQo(Ub724b(W)`EkZ*n&^WE~XYy+}3Z3eZN zJC6)M=Glag*wJz+UrG4$U@D$wZ>%C=X7vx>mS;;s^`9(_a8mJ4Bkxow>((r)h8mH1 z7Q-3q)&gC2e0mXj{W>)~)*`MD{u=OXZBlt%x5hUWXNZOsM20n_)E6!{cpcD$ z#X%%+jy(y{6{){c{a!I>gvPFWtwYOePBHA+;!WcA;h)9vcl^F%=c4F)UiN>c2%V@Y z;;aFZg;64nY6+yC)`Zq|5xZ%>hD+eQ->8Kl-OY3O50%SK-fw56jOZULexGViJl{px zI|4LSl^ws|j){x5u5K+*2Zj_cWww}H!x##llOreVj4KBDEp%N-e~`t? zeY8oB*_pivqjBgF|0>iar?B}_+Jbh33%i0T6(x#zM)Cm+P!m90)1Pp-1~c)BQ?eW) z&K}@ssc=&s>mqRfD`0^7nGHQs{+kbZ0iq#(ss}^!-p~m_f&jFe_+D+$r9-79`zRfv zE*rY8IJB6MrNu8peekYO{%uMCE&^$$bR%q29*6_Hud z(H?pNJT|&3ZED(Cy3RvEVsbPO#=BR4oaTVM)4-^1_jnw};Wi)TBF%r?w-U7(lROXO z%nFrQV8XvFpT}7g12BzwHy{%DiX%UNON3ek0*WX3y*WNdM2kYS)@4)-Arslk-B^$0)s}(4$xuqNb4vh-93J!m1PDoMp@N)b%RnZ~U@h8O!LG9A> z0&qoGLB7r6IW80@eDbYh^I;Tx{NPC#Do$`xUtr*{&kWGkEvZV>Yjsx8>>-FGI$Z>W zZ7qtxo--xz0MR7O$KHr2fD{h)MiTHwmE6AqE5=KODLA~Q_&}hugEiDzJb5*tDzG@A zS0`%VV?U2VQ6_im-kF2`x4ZpD8)X7j>P?0K3N=R7uEjjr<_PJ)#OQL2ssoGFC^c+V zxL}ho(8uhay5UloFx2>?WG)%QA0^W@Le6vU7LcOfs2a@$t%jKSc{&jdDRF-fO=}b6 zpSraiz9sYU{TX+o9B2X}0H2iwS|Hi+V#rPtLBatYJ}Db$_Z9PmSR$t@ zjC+B~cEsGF0_6rE35Kp^3Op;I3oVh8P+1uN^O7PALZJyN+?_Qf@jPZTs;qELkd+3| zuZShH*^>$DyWS|d;!3V|lTw-`8Z*Iz$aWg;tJgrSW!JGqkbJ+l~G50d?_I7q}^DIg-@hcyikefPyu`2WLc`2qvl9c76db(m}I3tjhF$}|K@1Q zT4uK%xFUNWigk^OI4{-clmx!Ezu$1xWkm8;HU}r9gHx2)@%Zsf{~tLI5b#J~hFkuI z;OKx_Vft`}3>@>cFaumXa&=*3I`f=rn4|FL!x++zr&_@6JWq+W%C*P=X99=qx zQ%rUcm9ZZ{BvI__Q>;}^umuBBu%XN@`o|SLBq(NJBIM(RtHR!L>~oMGaLn7K(^PQOvx-) zYgw~fvH;7xhEh9~;zPMqIu~HDMP<@priPm{wA`@SJGiwYIDi>V_0-t&^V=kMzS=t8 zUByjN-fR-M6bw5WhL_Kx4$oufha*}m6dE+PiQizXR!oL?r{Gd05?Y0IlKMcc)D92* zHaZVFVq_;N-f)qMVUwP)0GZ5J<4=y9kv@sEUi*q0WIG%H+XHp_7vIR_wJ82CB*9AlsD_Yzg>s%7n z^J?wZ%N87H50fkbvugA|#5GChjb(f+H8QD9i<7j>iypGka+{QXj9eLoIZ8lImYfZo zBVLze_UQMZYguxoFfniYr}>PlNegJMmqAs@k#bm3JJoH?r97L$2Z<)=Q!(XWaOCc? z8v1Zu#RQK5Y=&elyd{QzfOH{aau7Z7 zvjn5Dv6D(OL+G6Nyq-(^ep!93PggLUW|n5DhZ>!}S15-Ppq}2|7Ch?2*(oZ6JLB<3 z=(1H7an734mkj78PTnsw%i|~~j1*Ugr{u>G{Zj{%@e;IHi;IgtKF&-MSK}*rKP&;{ z08lQ@8{&N;DuTAuqNa|H2FIUAjsu(+ z+fC2aq4}D~z}J5^e5Hm6i+_+KPLa)}lc`{$&usRdw!8`t5+i@-d<=umTWA?RaJvPVB;qeSFo1-xm z>J#`r9ba_ApUSHjJqbu+C3EoVHXTK$<&iKt4ru>4VYkEMp6G|dwNj6TY{QXqxNRZ= zD}(y+F`Pg4tk73JnL%83?bfkIUScj-ke%LX| zM7ceVor!_hkWFI6@jx0;!7c|AxSX<7N*$#S$;wXSa(jc=8dX->An>G7@^P&aV-=j< zzDP9d9J|y0fi=SB{tUYCZL3U{ZW2rmc|8!1RkAsYs4LEyIUXgK-|S(RogKLD5<-@~ z>DMfF^eV^$Afi)d*xcEAtCRD z*X?rbmZq^LW0;SKy%VL@yOG+j%Y65ZZ5j=y4~Hp>xB;*avAOu9o!77mLC{B;o(W{E z4+TuB{O_n3BZwbg=;niMiAW$%5|=sVVc}vA1zMGU2iOMXDjGKT8rzHR2mNS$G6Q?nLl{I_!T;U3W~e zduEAH3i+om&89-y)Wq(%=zk+|YbSu?0e9lwED>4SQ5k;KN@nmiTC|HPfY{jk2)=`F zt&6ew#NtQ<1~W@#G_}7GE(3zqO5=w4QWQn3_vs%&orrR|?Rg=lB2%Y&(ulaKEo?bqr5hy#r&tGc+^+C$K?5SzM0vDKqVBbE%e3HORz z8B@R{k^r_Md=Ao`B2+-%r;4S&eIxJ>_ zTSvpfM#Sea!dqqf3Hk0Gi(~n_8JT<2rk|7Rbmm9sNKZ)38O;Amp*i~$rhm+7+jl=t z71tI)h^K3jO8!Q)+f00a-_-yGK17SM8J5O@%I4#Yyu99N-wWb~oz|DF2*jVm2%GVvmEo}-l?Akqttr9FOSFL2J zTt4$+&-Hs#pspy88@isQT*T$}W$g#Ycu*gjc8FqLAz zmCfn>Rlq&8SwWQOqhZ-O)8~8U^3jEE zs{nh>3-zHLWW?D*TPsToeVK~Pv)DLH93YFX+beAAuVBajy)S+9eReKX94W^LJK5|R zMI=eCE_3AMnJ~es{OVTg?R2)FUeH4%hE0Rq*8OZ%p74iNan6-!$BP{Xv()_~t}2%8 z!|QrsG1vQ%NI2xn2sjADBA(R>?J49)<4RuqXaQGcX&LeR<)6A?g&n zj`zs~Mfw1xH9&4B)A^Rcv=2J;EsT$f4gTwKeECHGt71byu6r?*`FQY$o#7-H$PnrF zUXD>}9$65-_vK6x>+AbTLgR_}c$B&PiBC6n}>m>mPU@h?cx3i_O36thYAOc zf*c}DY}@x_%gB#_hF_@|eI>Ig_vdx^zJQ}V43S{_NsNp!RhSE zMPs?jx^HI8{ouP>@B7AuaJwS5BGrjjK7NupDiG!H-kxETLN&un*2CuZ8%5`(K;8!l znp~~^pY~e&-N&;=40^qx8W7efO-#vx?RYuu!#+bD1_YkC5aBQcO}e&b3hsDVp^=Rs z2&HRc7~-vn@D=LLd+(uRFMCi+n$ z0P+R~k;vzfm{knLwH{vf25WWrqG*lJieUs@q7`c@SqQd&0C!KbKbx}rKgL$^cIpQ4 zIEwmw+GTWXQp*$N%%yU?58g@Qm9U`Zp`QyJBW7^ezM&V2P^6qjn+O63X^X&68hc4R ziiDqc@_s>d?ll?$JBmE{$8+Jowj^5UMMG>Zn(ce0HW9Bn<-+u1&tVk{9|^wizv1BE z=HoSH#ZN|=m`1{xUpEJgLSp{X`+jOG`a=DK{BbD!fxaY%&=%VixI9}_z=);UIW*8kQkb+5|Z|NIDj z6Ztf3E0NjIWRh7wSA^!e|3UlYGU?15F57Y1+Eg0BJJsSuvUKv45+sSPQ z-m5fwERoOogqs#%rYk#G=EHsgTT>+oq>lg(g-AU<=zmbD;)3vZ>KK=BLX(`DH9I|y z<@0;J5)Y@NoRvYMTH5Yf&$8ife-D3u-QAI(fc?H8D8~lwNSCQlvP+tXNj%Br`P`&d zw1RSU5A+}q+KQoJ%oLT?F;dVwGn6(1S0|)+Hyhw7@fS&UgdrFk+?eS^A-tYNoZmbR+@rP`y z$j~a{nne(I~#%*s*_AnRBJpgD1UMC-kayP++ zZFHvZy`TO%IMIMuzLmeCO2+RW50zc~LrE%p^8R$N9eIBm7}aqzS1 zXLXSSj+3R6OFD&O!997uho#Tvpoub;m@R)Z9%;c|KHhm7mRGKpVk-r`YFmSkn|)fo zn6@F(uU^q=9*NR__hfm6+o^{T_#{_pf^ItZ4-NcV)^Ek^^ydKMtHU>uqr(MDgix6A zXU*XEvFi7^iZ|Dhl-FFSi*4)zVfa!w(pJfgPjW=70<3@zFth)GW9-JNS1b>E6DgO| z%}tn@_GSloR>t9ZU$3Xe#^fkATOD253^o_?V#>$!Z4#FN^?ZNcBpOo#eeAE)5!Z<5 z4icyPulYQ;-hA#oiv#55Koj(+nzeM%#gpB!WvG70o<}*S$1(4i_0u`N%bpsO!c2X( zQ_C?p>^1tmfDNXI^?XYFc&%UwXRy`p$se)#F#eDHAft$*n-*Nl0IZX?6%C~Y79uoN zt>;=jk9YPkGVv9~KPo|qg`{I>lPSN}CN{e~9wN;3&$xYlOEKmjJJH8CcAkCIXO4b$ zJD*Lwef|O2@ZsdMJ0E<;152pVInjKDyhb11TFtz6nbQ)|8#y=lg;PL!)hgC zl9adtEPR~tSU66j`j5>@;3R7Mn)r9^5)oxyg%jC9?%Kkxx{8qGnWRX*Qznw4^p%Ud zXXW@Z$sDBlQy;(=0UP--veeU?p<(Y7Bcy`Hhgc4`@+00pjhUl{n8@w?oxa3ykWog9@y$&}&SKs(~G_O8Ia`CpQ2j^E`fDR6)BTNqG z#J`CBuzMBB0tF6E^3@tu0@V(LEFq}^Uq_b|lD;ie z%sgS=N2ykUl18CH7U8(uo>fTyiVKKK$3SZyN%0K=+5WE<0P{kn@;^=u3|)yXOw+jz zp<&@R=vJQoS9L`d%erc!3iiTTJ2`0iuvYI1@q$_lS%5qo{DN$G*TVX3R3v;SNl_x` zj!JIl^YBUAx<(9)b#cG1>wMDZYrx+8_~j|s6zcJAI5CR`x8hqIp^?A22m`zQY@N@R zZGz}oL_P4b-~%PczAy?ny5y;w3gSy>BO4BySwA*s5(c`t*{PdFGsmpIK6>t-I@@}8 zlMjUsKMpBj@4Q<5=*1ft%OzO!%$gS zRvESdqLV8kc4PXV%>op8x|=iu#b*dZ+N1!-+P{5APE3RNJ;sXEkgDaBsbx2(B@#UR|BgW zR%Z5C;`$21>}kWaT2qUk8JU2ePf>twnlkpIP93N}R*{i(>fHRt;UUd~!MRqglJEkX zg;5wGTDpiCmy(HzDRjeq2-)>kHN9(@UysC^Y?ieOj3T~b=g^+r(rA~k_Yow?a3k06 zW7IF0{5XXg+8D7dwN6V8g+z1V9O8^_e7fb*tf?TQM7QC0HEx4#sQ$=1%^gDpsPn4u z!1{&on?`jBUe{!>-K$yh#eZm0vA&Na`6n=-v#Q!1QYhB(oyPNC>E3qV$JzWJUrT4k zQ`J_@RKo63wL|C8-xdY5sswEZmofC$yswAK!w)<4ml}YUZg=gvA5#ioF`NHbcQ0Hr z>ZDxawbhr=x*bM=)vZ0K`~6dw>`%|tm0md4p-U{ZM#=V{i0LE{M#)XWW&Wo%UK%&f zV#@?kBFjvXNGH&r+y0W=|3Gh`9&|ah2vy;YyK&CZ`VySS|9XfLszKlT(IEaO$}nWM zBhnuy?hh+C(S1;ee`ZDEkydxgDx;}O&W5=A*CQArR>ZCRWNOJRw_DE33H`^gw zjzzsRLy)Q^=@CSPIK{hm;^(H5B}o}hQzMP7lZdC%zx^(&QDmv(%PTR!tMH(>SXeA0 zt{}FZc~YQTkbISokWeA^cwbVvXedG~3A`@Yo_Qvc!8*7x5zuU(pHmY9r;&U+LUsq=LioNaVY(7SPAI_WuV_ni5DR} z6FQ#A`f8x5*AZ-Vx_AVT=kyLxu%C|3a5@e#5`}U279ugmg%reJ<3Y$16*8e|+U4^A zSlI$ge#JCZYs)=6O1N<+ok;IY`EhV&>l;(IOp>HEJou=109_6~Bwq3pf&>KR6yx9R zGhvI}bKJv$9BsKprx+|K^ZTH%S^nyDrey!&U&$7SK{s7p;i$sJ+UT%C&ddFOyuLP? zAO`1Xzz6Y(n;S<>pHv!Ljnr`t#ZWy787(xiZAGa<>_qXD+?W*g?FjjK9D+9n_sZNw z!^f7f2~JJGGK^XON`Wu>{4|VdFT1+9>9~6vARuserzA?6aG1@7^}t^-{fzl7wJXxn ztS~5?iauG35#i?{xQpN`U`m>6gJ!;$WA*jIo};5KS}!4W32@EVB$~w>mjHWG4fd=} zA*DeIp}q$}6@ljBj>jxVHnRcputk&~1u-BiW=r*I1SOK!((7CzvBDA(3XRC|NY7xC zDO9nB^!@oeV{8pV4p z=cob5%xFhzJ7F6`!+?W-;f`3;9nDDW4``Ysz&hR{moyqiUmzsIo8l|BlL(&G(7oU_b7BB@ms4AYl6i zR1<{;Z|{WQ_1%C27^2QVPJjG4#85R#huvv$>v2NL*aKG-1}{e_;UKow6x=-5j4J7A zjbSdzu+AWgFIa6>@5x6w8kei`>m^7mS%}S{3>Q|ekjSw|D$r0-pu`{CtX1JXTLmJO z_rk0Pp*UKG4~3tXCXUa%*1j%UzQ_G3NIXa5%lsAulp0(YTo8UI7?B{k#;0CO$}Q1* z8$tLOdNa%Kf(F~)Q*hvs1*_u=ujdU-X#U2Lv$<^(1gnG6J-KwRKvm|Z6O0bht`cV3 zc(s8+>k6D%GX)M^lrch1Tf*`d__Wp}oRE5YQ+t4s#a6M+21^GY_!hV2*taSKd@6_@ z&|{=-?U}!F-&lTg{aTkdOJGb93%>1FMF>?tAghrobge*S&>ppIFnSF6@hEZ=&?cU zez~hqxN1@GB9)6|;1`NSO02tZ z9r(Z>(csbRT3=MfwjNwys&oy3xF-E-NhUdk#N!mrqKRq+79nR^%o|on*A5VyOotSu zAFMCaTQIl;&CO|$ONyuXnw=72_ZWp9IZI*-cSs>`v({iHTNsh@FPZ&&yqamEUGi5e ziNxIIi(;LcUUK~3HOO2mi;6ufit}G*=s2d>T|`}6V*QNZ+cvGu>@tOJl)gpfG17AK zI&OAx5*ISnO;pt!2jQs{d5M^4(SXiLKj26oima`sFbgA+OgIii2OfIoUx&$7WhP|- z#MJq?My2@i7+R6aKfuCT5;E9x!VhA*0~uX#WU|q8T_h8U9d&fCBYQUj^aE|~v!pbm z-`t5qPg$jp6(xFoD8{6;_&YWoYQMlkxo5Ju#eq^LSV^vi znuGlEITS-f$jPx%e4?s?wh+X+_2%$6JD%l;rJ|SInnDl2=i^B(DjN9q(o7;|Cy@nf zX25@ZR^3}~vxa2k<#RcRy3wWA&M{k&Y+g*Bk=JI7>fg%|8Lv6PY^35Qk2!iWfCWe`Da>!? z&X*n#XsQn~3s0MjHlGMw^n)!HT4>ZVb}=Icp38TJG9h{Hl<4@YOb4)R>F5Y0%ApUq zyb5kZEjEN|;6YBODMuc>9Yv6qDwgTG7>?I;JaByY@PKhNjo-C2^g^$dy|+m=XHA#I zmg`oIx9cRJb&9Yu#-WzUeh33=;?`d?APurG7U~}kE$J{rcykAe(KJ9%$FP;A8{?u| zCs)$Ro+Db}HNVy2{u)uGx@xkU!?gnoC&C-;5!NN9^nDF;e+iQm81sS)uxO5!*Pub+ z{(VQIntRlLbHTmWE~aU;U1(#?7(5=w1C6gCFwx~&NAp>DT1huY)i3JAm!0LP4c-yR zfsji^he2-Tq_sO%a$1B+hriUtDW-)u(5wr^u(&IP`v$sS#q6Y4qD+KV8ocF_w24WZ zhGi(J)|$X%oQ{NC~#0Lbpgk`GBRNnD&HKmWh8e#|x~a&LG4;U2K=4cx#=jLRGTZpEmu;M4MdVz1n;>uP%4e5F0QVXqxXeWwh2~bFo zMa-9WveAhr1Enfcj9$reTlvbN#+U)&wL>wxf^z!0C+}%rjE?e1(TE#OqjHKE>H_A+TR=I`9%HZRve7 zcY(@j?DlWOQear`gMc~(Z}Z5yT#o~*9%*3tclX;x=vKB=K}Qphs|Vt$D^VPh_@=`! zk+t6u%r0j?4L+F0c=-EoEgUtiOJ<%j$wi!3M5p>)fE+u;H{{(4%I>i zy5UBd^Vx{|=#0zawWjyO4eay>hD5*AeaN!UE47x zZP*paH-&w(S^*lg;WKWXAFEK;%cVrYGtL=RxX#0-`|3dEv4J30rmCu<>lqf<}>aEi(E7>OgE*}ZKTGrnSa$9 z^~$-jL7f!IREK+TBX3_!NFYA&8|+!JUvbSzoSE90xdY1*ZU13&YPP~_4&GFVI}C*g z0sD9xF#>f4;=~h={=D$!@cV6--c2%H^Ilg4kzwoBBnr=BQMBw(H@766W2=Hma>2Ia zp4P{MPRd=$fZ8W5==9X~|KixUUdDXjpyf(GRx#j;%hB4h1lb&<{Z`ohE$VH@^{;5? z(!hqZ@753l*}dv*bJ!)U3={-7z+XUskPrz83JMGih0j5gw7;tc=ttPbdJSHqjsR;&n>W5jjrPM#6+f5s!$?h=Y(P9H9p@|*k_@!9o0U@Sf|kELU|ffX5`Oz`!{2J%;v#*@30 z=+A%85)#syFW8!uZn;Ve|0a^r4bko^{VZt#Tol=gR<)SX6>V)->-GdNOlLYSX`ytv z#AsEOV9EO%#2gRVdRbut8jS*r)|Gd@Unz=w^x6f3r{k(hgu2NDY-MdN;*oam_9D(N z5iyMuH?{%Vt-4{2yVMaowp98KWrGHdEW&0TpJC!!hV8>cjad_?Q}h#vl zD>rbYST|VN=w*Zf24|GKH}}1e+Ro4)hGi#&PdLZ__h0*;6g?4xhy`M@3cnGhO&mEH zh&>fhvS*snyS&duGrM=9Y;HW@)4~-dE{j#DST5nn0^X|Bs7)YIT9cJHVN26yQ!0wT z`U9MrTQn@qLR|+}T1qkcV}@+haJ3(9F6{4}@e34eTq#^>$;QE|_Cg8GhWqh`Xe^>V zWkp}>?>rlu+cXv=SF4u>E(Z+q`4^>F(J>;+%Yat(>6FQ-la-3Kurcvh!p8lO0$@1% z`D&4$kMRYcV`F2rU!R)|oCU#lx&!`0AQeU+Li_Kjs$)kM$i&QWw=dx>@^pr(8vmA) zlXH4oKL$CEym{s|8UJ`m@8Wz4TZp29#*MbNj;%yG;7wGC}z}|^L*|`gcL^* z|5QTQnvc>7+Jha_EHstPcNaHq>Qje`*EVKD#N#?VI{kEsXBv5e{MJ%L7fSWJe&jse zhAUO)cf2c?x8Ia8Z78cZZU!Z7JagfTrHyC1bfAh-bN}3;_QS=+i+bB??j4$Na6ktnK|c` z{=)jzhyq3EHjA^*V1|N#2cfaYTCdlFB(#p-nr$o(6mTK0t zH=JWjtPx!=Z+|VMnCZPeM=UV2qX3c#g{FuahOU&NK37@zOV>sVkAi}d>wO;~AHkWx z>Vu(_5DgNzX4jqahkGLO%(gU^=XGjn>65?9?`X69;3&PkY~1kAv{rSnU)zBgbw+xx z-{n#{M$f-$sJ}y8!w4gqI*q+`+BO^GU*jowWx@OK^!t~Q)aw}x)WP%MsacZl_n z|63maH)8)jd|?A{zocn1M(FePV&0VWxJ_ANgRcCu+zc-Vhq%RAs1XY$DWTZ=;c{HK zgNL1u?Wq%j?Ox|@rC4<77?r|el6k9U7knIuVPo@If_4W^tD^!L!^A$DjrXZ3xG5T# z;kAY4SYQT^&NC$TCt#3)!tt5WsKVY}KRde_b(5Pljk3Qtlhz+XHwi`>y11vm#4RI^ z)A_yrtwo@;+I#EKuEMr|Y`Tyo&K9m^P8T_YAG#lpq0FA%cE}*(ENSFDoiVSvIu3#T z99LpC)t%SDyzc|2>*lG}saj0G*UBo*3R|F#Y-QRN!>B2D zGuiEXKJF(WFz^YZuH0}AbeSom9RE>I>BkAdaTu&&+<73^KDBepD1# zCNEeX-nz@|nfcrhAKHau{I)O#ZVqp>6K2QN9H0Pm57{FfpmqPt>+csBHnPnlwP)?J z6`I+KhG?xq*Jr&Qhlq%nrvjXK_1aKs)iDl&84hnoGE|yTMEEkK$Z=8V88ls$WE1Y+ z3u715T(6VhHd3RTLF9>GLlvj}JBX~jz262T;qllwzbRia$w#hIfj#SQ?FobWVWEbk zVSuCl+g<)Y@vub!o&RwlzLgTpcW<1#!udu8V%ys3bc@CH$ez%f!UHS#bT)~DTjBBo zlVexCPU0YbM*L#5#XVv>98^)DCEPqt%g&zeoD?^pDeB$wfpp?=Ivm^Tbn_wZHhxAa z#Qf`{h!dipC?B}$Hq-JLmEFTYHTX0eA$PJ=?6Qsb-M8^Ie160Y zFnC0TDv3H@QZ(-1IKp4ARagD9wC_s3$so9#B*1tGA2SwG3?gsqR%#q%^w~F%sVB0lI9!IYh_itD-wz3JKn-!+p`bxq+wkfC;p;7c>gtwt z(Le|e0T%AMaCZsr8Z^P(-95NFB-lcN1b26LcXxLQ4gucE{`a}}o}KJhHH#{!8q6_! zbocn$p+bDYNeCoAZAh8;`J*c#IX0d|uZiVMv`DDg^0@Yb2kLw^}g z>uq(K!DCRA7aN8W`k6xfDI@3A&Jl+`qFXsiGh&yyoiwgdEBfj+Ac*vnaZVoZ(XsE+1|=V0`jNfYy775fOWBf8@m_E;`Mb64)C_TMUWHS;9|xQ z7Am!&9||oV@Lx6z195hu>Fn)?9q8K|Jwl1t4zv21#E&Jz@C3!hHp==8G z8p5$kYGmGEW_gMpyZy~Nu)8zV93CXk?2;Bkz$;#ZUB5d|i+cyzI$$1ncfMZFv!<;m zybwmb7tbZj7EKq;ZP2T0|)2B83l$joJi{Te^>_pXcJ@>$P9>1ecU)~e5!BKdQ~3tzm6zjaGIV|UEL}N z2M0ax&PZ$$+#Kk$Bpjl|%a!@$@=0d|5Yi~?Wl`C`B)hfA9db}v3H4rV{%Qgbq(Et* z&eq`5-VdcuH#9U{=;@eZE8Gf`as6 zFxm(Mo~RUwA9YFTp146MYwDU{WR2T&Wir&*90pfu%lNxLs}7%t*?)n9uc#MLmDG3! zU`&YZ8qW*;uH{{-+iOzr*iYY-TCsz*qcfp_r=-2fqBj+S-$hys9%+ z3u=~M8ZBy<;I&4U!$US*GD&>D#_?Y-9!}>^0y#KF=ic9{%h#t7d=}H|L}M-?Wn&h+ z(VM;S7oqh6N0)<$dT@6dGA5X15i{G?mM^?wNKwqTKF~D8p8Wk5I>3eOqf);XC(rt* z#gNpT_#MUm4uuI49m$|ag7^74`}_sJ8w=tD~E$CbO>1>x+!TeaUKV6g{x z3mF>Xv$C>+f`S-;)S|0mP$L%%h-8!=+RZLeU+=3XdsS{EXG(IgQ($)=!6OCJ1$+qY zJQ8%Sl{~k?D9dzOm&o4SO0uA&NFn9j2fXL!^1QW*A*UlZ!~?^`OAdb=|NnA_1^D|i z19#@GktTsUT3S1E$-vj!?;ZBOSLWbxC>%H*VyFBrxLtbs^Fq<|baDv6sVX zL#Nt@el`4HW#XF6sWcBLe1pkwy-NuMR^U$o%;Ccp)PAGHQoG=e|Km9Lzu*bX1RHw9 zdif4QU*fcr8CiKM{a^i+xUuyDwcsJbA<%rlK#Zx>cXQtu-c9%iW!+WXjHCjHu zF8dYuA9ilVafOdjp@d^*$^2ml|CdMO|8A@`&{wge543T1d~B%DIxhIWN{`>Vvl0ON z5$!YU1{S?ab(tW}Mxt&GZrF^CE9tEc?DWrL>J6i{LgIq&V5`kw4%2qUL)L?2HxU|b zL~QBkh9``?Hb{7Lu2i#Sm!y3l>620Cex)l~dlYr00irr0cz7vrCkf$88fDtVz!+Sl zgOy<Tk|sQ(48yoJb-_tI#@Xq~YEt%hOPw$^Vfl*1C+ zGdeAYz4wNu^|;y_U0htmXk6eYs1nBEZ49{CExQKbp3M@6E+D z7rsjxHbxI`td&*YilV&XUh+X6rh6c52mY1~D8td@HE61}$8^9G<2eksgddQ9VCud5 zf4L*%$n>1zpK((R&cMpDv60(J#S3{r^ zv9tE1HwDR;GhSuDw)wpB-*PhOwvEsp1?_)s4Uy}wOdxV2L4$J;O_e--*w6GiF0Gg3 z14q3|=gy6UJt(ZklMRL5scwDJaqo@5&R%Ih{k9YA#K!^o)E?(y6)ODZ@45{I<#Vv_ zept)fTPRgt^!Jyh@1qnh2@}1EY;I8@P2%1N;aQq+XvV$#E`SBU)v-dq3Xy#E3HGgV zZjF}9@L z&z&$tX}qq{-xqb_gh+T6TAEC}BHJ4M5Uo^2I>>Lb?b2}aqLPXoyS8eQCo=9b zK-z+mcaIo`3pV^yUx7}eLr`tP@A24Gw(W#JcDf1eDT9RLTPoUaYs;U(LxTIA#C3Hu z$LR>|94|52_Z9=H#iLQInlXMHE7E)lpF*PSUrd?n(h4ibi!)-F%wJ0J0_qNA;K;nC zcx92Km*&)z(=nmww3rbUYekcqV zba66-LnVoAYn^?WjWO=|&csPM4>my0e`o*Q4QpQMwZFLAoj60Ocy?p_UE07r)L}@< z#KiQG<{KSK&L=|Fj*Ev3@i#xQhU++OR@`2n?@l)urGq9zV=cw$aCJvYnR}0SvFNo5 zK2dF&@Tw|^+5YNpu4o!{g4pB2zz}_|#Vd7r3&bwf_G*1OEyt|hN@-NO=#LGQlN86l zSp?0myB{Y>^gV%Y95_HR8igL!iQ{{Eu^`v;b?>KEQ^wbaokG*>U*B^u=OyD%J1tx6 zT&$wr?O{0rCL+q)UmjEW?urb#9&!mH~F@L|u4rm~|ySuNhuKM2My3fq+ z1P(Mu`ciAOT~EmfJS~pPXYg@za6n@rMoGd|enLmXpRhAY&~M!Vd(oW@uOp{_VLk8k zzsAv;^a1-cs2j8;hafvc1*|yrJoLj&^G(<8#|6ChXUi{CEDA7ffLOIyxYc()PkZgU z9$QBI5#WkM{!eCXuub2>lt#rrd>e}-9MX2b8y#-^b#REF$kw70?}j{oxUOMN`CIjq zN%n!$Nt4Rk9%+_()|LsKLo==GGr?(NJFu5XH?|6h5Gz_nQ7s_k+SRi3MKy$APU-#o z4a(PF;&rhEM42OEIF;Qf-S8pr0>lB|E>()J^^QK-wmd)9>xiBN?)h7rUd&mEvQFE+ z?ECOP@1$rYF1J}RubRzVew6}OmWea03$XCKtah-{r6UXs~(=3g$1vNB8ur4N@X<8}%J@=Ajej`zV*|#VBsN_p-9`d1-JE z2!Z4Fzg|yT;gXH!)$`o%LPtu5mRx7Er`kMFIT9nSTl5SAI$f%p6A@)<|M5`PAV6z`Bj7T z1f1{Eb(cLlQ!6W}!a-a>Ju&R5sN(O@(*?8gOQjcGqn78Zm##vq!w~}Ch`cN9zcB47 zzi$U?Fm3SoP;j%`548^leA|m_cHKz4E+=l_)(6k%eAW#1rq89TMhKyqi}^kv_<$ zHg$gLv4Lsqlfu>iJV`iQXS3#X_hFq)HI)b(x*apk#!h4(C&s6oC4JeZIZC&^tYHiO zwUR?crgxH@IK_?ayx4>Sl9c#=KXzwu-_iHE$10aZ^%6URk?XQS9_qQL@g#dW9w?&?XAp-X}1?eT$al z5vM`)&+*K$u;?{X+hH}XSLQHI%M(-20ZQa)!Awt6rWUbIh9^e9+g~%NUgj#_%+yYmKMY*oI?}q^>e|_a1M-QIW z_xZBszIeL95D42enXuAtOzYbX=LGg+%f(kThdE(=$4Xl1GjrHI>d$l=ip#@#8{IDH zeQl@hOqoPK7-6oHtl)Sj?UgquRLA+*YwLcKsX#-Wtuc3Xu^vW!v9(oF($%B-ojoH0 z=Xf8wTap7r0#a4iLtMbm>UOdHux#asgpO)&H2N_d^O^soDVW3;EGUBOBv;6A<`ebQ z{7Yk>Ak9L_k*^5#xm*~f1&`=v=h zu?=zU+4h+K^dC(j?HkA*xeI<)+)4g9+$|P_#1ZdbC4+ENWW0ldzuOWJ_}bq%LH|- zh`~E~8?FBUY^#WmIFF9r_zwdBhM(azyWFrbS8VAZXgy#Ky(VGQ{US>u%=Nvq0uII;Z!)Xwd&1ADchhCICyoMGc& z@~r2|Cp8WGj75nON>NHzF8wn!%G~D)HN0rBZ;9$x`pTj>ln`2VhPLXr#?|z^p{?BM zL*Jz}fW0&aOY2pcUX7f!_$<3YW$%{S?Nz9@S+qiHvy+W#y6mqo1uy@P>o?l#yVKz? zwx8xTniYX|OZDee3L1_82vn5+c>Jp6-Uy*g!@W#?@PlcZT09fI#6b)!!RQ}5HeL?C~yWVbtF`jFPcjd(4-er8{o;E5|e80p8{c_ zp9o!B>a!o3r5J3K)CW5%CYI_bovgOhS4>P&Iy*C#OP8wVnJsP=Ih_GG^Qt(Ei=qvH z#ARNrExEX@t8YEcj$qgtRp6pwI(~qpYjd=_y}A)GmExTG(HIkgOt5w9j{CyTKfKZq zvj*K00-sRR>+Q5uMkxi=ZePybUF9OC?lPSH?cwRbCReq0uAwX~2<6%4S z#+EzKsD#pvR{kU}N8%1kfIVF)u(xh{c_Xe z{c7vwx=(FCzpeC35V{Al@{9b+E%c8@Qafo3oZ;)}gLBRBoYQPY$yz@n#Z$FM7n-7Y z$7j)?&E|63%Rtar2)CX6pbHx+hmvs1Jv{!dpzq=?ouz2v3sr^h%LjJsEWF8$0cer& z9SNVu=}d6T51;27{+I>Vo(%ZvT2)+aNV1>ZV+U)*qq0z9@^e0-dIc~AYxDhXbS8cU zk@!q?0&GZLXBCB6041D{V;-;1n%T~b!R?2*iBfZuN4+>#i{sIs34`1=bdK(lz8Qce zaz*iD=WcLIF6>ZT4G855Hb5ppEKK)8kA5d{cJ`+8M{D4EW{2iZc)(0szu(*Uj^ z#z;xdRcVpN1{No0b!?&gDvcC5$qzf%q3O(2Td4rT+$ApOiUNkL2+}?ew6zPTP(*K` zY?P&4mxfja*{7YOYo8WG6cJ9yU=IhWSeB24;5x-SkHN0C~`!>P({T^q#6xtaW&{=RH0u0;q zApF-6e~`VEOVB0I(F$xmJ7ySLU9>KmJPEPcS!seDn$fTJ-=*1R8*L6eYf%3}j*VaM zR!#hpMQ|rR@$}8tg6+p1K!LBbRIAdU_x;Ppdj}ux@e1=Zb^~4OvjHczDkyS5%t`5N z&wuimAk-KI#5H=&x3DlcbT+mm$Hm%54K1kH&DgNt1Mk*7&s*t&gDoO|*jjw)lrmQD zc9u;nP`-3i&bm)g3E{^N6q`iarjs_jf}&~~_nZq2OB|4XUb2Fv1I&iS09nugyWEJ? zsTmDljhr}RfiO+`#FgkGI#cqQUmg#^a`zj{=`@<0!)zZ5ocqylg^$dPr>bndg+X5N zzFWOpbtOt{lYD&(`|%`U)N(O^l)pi1K`XykUqu4eihb zSD`Z{AAa|T&yf~nxhz;yAC>vvVhEPX4K0A64oRn-g#~O7*8Bqnv14@{h=XLnfSIiXDUELnn5>9A1|mM9b~~-l zGs+73y~E|iZYYLTC-gwrUWy`h+4-LA<|@W}nKnLY&%NHBR+JWp-JyDLendeTn6g;# zFF{y31J>_=f(u>*)%Q%2;^C|v`|*=5{^wdOE>E)W>Ylh7zL6(u(lb&mmsUF&A{ZLi zH-f~bIL{NlF1Ys!2d|tKP$7H?{0`6EuSFk@pY;mXJA24g`0IIcATP()v0T21(9$r^ z>Ie9x7(R%>9x|T@cA-D-OGlwMlOZj9Rbk*bWQ)&nJMD(r z$dP^wU(4}T=IgJj*AR?DWnwkNE`e|Ui*UNF~7kqz$u7zfh0k(4-~ia_9t*L<0_+4s+v;7Ks6<;tcn*f!m@ zzV)q{2-BC8+__9#*OU@g0(|x%WqnQt+kH{s=$H5XX2>92p4T8n(#EGRKYP-bjsYQ+ zl)(23@&0nB>8-Q@K$Y#26Kfau&a$?{UK?&+PrNurL~C?yhtNg>HpcLH!(krAVKxFX zueH)>eBNKDwgPq+6Z}dUl_a-@S9j4LzQGtTc{neN@15=GNzz0z{U(~1BRl(Fh$i=D zHpa&h%JyLMMAM(~;54N}Kf#C-R3Q|!YUNBK+iaaqglkpz*?_3g6l`zw>xRCc_2 ztECG41RV4Rq|0$A-suxM+8He>Et^7Rg8fX^p9Z@6Y_I;wX-4ZyUNAJGhGm#LNFZY< zsUab44yjlpd+^N4&F5y9EVn9tJoRyKg2$ID(0Ys0p%Qc#_D4w=xO^Qla0`{V$)rN= z*JSbCl6L#=8#a$oDT)gf`fnkbUvqs&EHN7b;JzS(O|;bp^HOFKQ{I|Ddxen4XO)eF z#z3krAp9Nk<1*rFCywP{1cFL&W#UF68jf*$fwamRxeTuuznW_E9pCMYvgZRe>2Q3h zCTIKE<8c+6;l>N$WY68G7S}fC*!`6#ue%Wsq4sZ}d$(p`@+Z*UkA3qKz6;)CUF7|T zz(0?1tqwCTV=fmD49k0t?R*AgZ56^6EmAbEw^PFYtf%%PYcr1JIB2g@#PKV>;tbDZv8SH<4NKO>GEjVz%zGV^RakG|FWlIQ zT0q_4Kl!w9Wrsumlghcp!;FS9Oa7IgXtTxxmdX309j)Jnjrn+9fXIs~M8s1i_^ulP zIj`97Tjv^b~~=}zhAp`VlA*@sdgTwcJ-0s;0#OFy5u3v;@Tkm zAvxO;6ut6%UsbEgVZAh>2L$%55QxY#Y-2+#o&oD)f=^m6Mq7R%-8}i>o*|UUoQfy< z8g0e3*8T;Tbu?1?%JO~qAlI(re);4mfOyjo3rF1Hjub6~Txk0P05N%)J-935PG;SH z;y|-;EBNIx-N)xH?vN)22>et#-4N6f5>zreX&$7Qu1K!RE=-5WBQzm3Ra7En)BB4< zmdwlin7HsNuy|nx7$jpPACK6IrpQEE+|VaV8cFuDBZR4a_c(8|TwZ_gSl?;ZD#&-Y zRieX}oSxLBP{e^7@)q(-m5aoF!f<1ZRg&%(_ce5h7(#D{yX|H|pOcNsZp$Qjlg`iD zNm8GQ7kFpa*jxb~a!7kCPOCe{OpX_JuA~UXr0+=+8BbgkWIyREWnDQ;3)jEVQ4zJ* zaI3aR6D#7{)K+9u*RolvvF~O-w`#aMYZa4jY>ZflVkzG0T*W7R(9`LzH~;8(pF5Nz z0lp*-R+UF3|GXrOi8nCxe|?DS`xxHe*M}Www3W0ePq)P&M}==?LV*H>WwqAeUQN&9 zQUX|Uc=Eo7eezPU3gkXym8Pd&P~&4=WkSy3iFWz$Yaq+?Ht_*&E?35mv0!L+SLC!L z2zKF6cH2XCdTd0cncZd$t1dQun90R^zMwBz!Hl;Us*6Xf)$1m8w>ZCe!_3_WV`(aI z1$oNnls|#+eKaPrr2$O3>ZnL1TKn+b-3gy=DjZ7~W7V2I-~}8p zs39DlX5pvtEoIS)?zz@*GqURm=jsK#lDKwW(KPYhy8F1ctEzLlF*U;MWP#kAc^sLP z#*2mWWX^6HNAik^v)*Ff(io}V*ArVM_wMh#a=kp*R~zU^?M|Gj$|9O()a}mtcSg@k zNVaV+7~6@+@;Y46B-68<5@+qlb!W0Lmii}or>=fXmG3S`EK^Ij_90Jl?6Wpt0x-2F z7B_5`%He-OZ#2l=_kALkSGHq!IEf6PY_h$h zej=PDp4u^phGa{y#-C^x-YOVC4aEQfPA79;D=dOMoJX{nbe5yr6LX9ZG&qvYS4K}p zX(--_zs=prEuq7u1+kl?pRQ-*pj zunqke)dskQ$lxL<_yww1b;$BZw2Th{x-`0&2@K*jtoGSmb($ z;p);On|6kMtoopPAixBuspkIGKn%pSnv5$?;sjZ_?fXVcR+erwKN>?dw&x&rhb*aJ z*bT!_T(4lNp9)dO?54Imy?D~l?B#b}8qE9HHPODYKp7OS#}T|&=lc6SVvT(YKo z<#Ua1bL}&%;1{>3cR@b3h-(^Z-8>UO$b7Vk*Tu{sQPV%Mu;@k*%Ec5D%VivygxNTW z!6*V;E(hs^&-QmzLx~Z*FfF}T7wIX?C4Jd7s=1mI%LKwnJZ~3f1}HH_B~}W>1?w_Z z0#kKu3`LZD`(U<63TE!o#M~7b-5Y23VWooZyv(2@1V6p7)aK)lBgC=_;WtD5>fz<)gE&58ja!g_mbDHCNpmP&GpXTTlL$Q+Hr zx%2%S`zU8-2K0k&7gojSx0s#VWCi<{`qrMH!LjErPy5%OpgdCcAxZxD+TX&+0sH-+ zR)b!N{VHvQ-yM*Nl((;dxX*7nU28S5Xr61N+A9j1!BFy5CiZ=No zt4c0`{b33;wFGH0Q&12LIWr@cAx@G}_wpQ8gq<2kuTgy%+)7_kSYWlqA6dgD2DTp}myCm#C77R~xkKm-1ZX+pt@sF5>C1>OrR+ z!<%%7U~)|WZ0qcsY6;050*oN$@}hzwu5!|^T~)};ID%-oddO%%V0`{z7*!_Xgy{JlwxtjvX6Tb$21#hk<>hM*GfC(rdcDUWV)`b95Bt^jQESf8J6+A-W)h| zlu2SpPfayo3bkQ8kDYnSCew5OVZ|SYF2=;N{>CTz*L3!B=@4mH4vVZLp2-7uy(?TH z0}i*f@w>eqiYmj8+=GixNH7P(ivG}%G%fW_i|q)SW-@ejd?SM^09@VI9jVj_)bG(-s|i_l5fz z0|HhOIJf*Cex85*ItB?*|1r5YtHNTo2%LzBJeC%80MGmgHR0r$(xcPE%0B)Bd*9fv zE|iC>(Fjhxz_1`vEh9traQUV81Qydr1})mhdI6DePN+ zo@0-}&!|fQs-MUPJ8m3_8__Zr_8((vHN?JAsz3F};?z(8GIu%gLxS5&_yN@?_*vWzY4+vBkwf*WD3)DXTu;u60EvzW=yHeg!0KjFJngSyg*~-YxOfl4b3$#UqhW1yF~oJ+2Z!aIdMPVE=LdyqNrBHMuq+W zQF@yw#D})M;TYgvPjRv>b~Zx>_=AER8~{zqGwmAh=}D2NX61h2D)C9U<7BKy62@xk zI%v7cQ@)!X@TA0*l~d?eM70@%G^(mWggWipjqc_g^4=CMn^ZOIJ&BlHV+_pIFf|@l z)@~XXUZO<;6B&ODJWR66AU+ctR7sHnx&Ir?oCe zvJ$!2qWjTdf4$Y(=WGhcJN3?3Ed{ncuH`S2#_t7cBGz*z`~lVI)J?4Y@u4f*hiaIk zXmN?2S$q>#3b0t%qfS@QVr9K-+>1H?wEa*ie0fIpz~f-? z@V1Fws-q6H*h`ZFJ3eObmuQLLf-9Znw6|z04Wurb=35-@$lfQi_UDj>4l|W*(}hj} zBP+rWoQe+*yT|#iywBa9;?A(+fPUKOV##ajo2z7_>3-&Y$|0nJcKLy!l4*9WAuS~jtZq>onBm@ zFg4&0sObw!_Xe^AC#aM5?E^Tc;kD>ftHo$4JA&Rv+%5xRJ%jI!(^ZYR;~745ltop5 zgpVsd#yyaxozvs^>o)^q&;8kXMa*d83t?oe#RT2?9ARbPp=heZ%hXkN0?|Kk5-P$q z7SM%QuMS#W@zVovv|&vDQ{wsOoUd%>xZ9?ApoxF zGwi1k2U;{bE`@c_fjtP-bSi)PMN#~)WZ%?ATN$)d4n!01E}hW0sO)D(4ZNIS6*J~L zOn5nLZ5-89=sgJ`yrbBoAys5lpmwKJLcj)1izMU%nqzxcUP%Pah(^E@tcQ-4aZU## z)Q;m8Fe+N+xj|T`jJBGlPNtX9rT(>{CgIlD+9u{4xK`bh0F zy-4YkFKe*ER!UPdGa~anM`pa5P_kpr3uJ5_8=xoAgw!odv`PvjP{dukPR{tn*5Nw(( zSp5x(I???FwlMk%GFng9=0>jefE}Rw+lWxSx~9Z^t-d-lMn-UMy|zprG*3qYMH0V> zOw>-F$?lCWU7Ax6*m`c=&5co@hmgbZ&bo2 zG+1M;6Owi@nObacQ=b#-2jwEw&+a$)aF z^nZq?%pT1agNx__*-qX!vY~{w>hLMC&Kv#nDoj?ebVNLynV(U=^V zo}7>8PM7OruB>#Zb(jKX1^>NKJ!J55N4tZ+-Pk%z5Se5ngxbl(!%2*kpk#kQxaYCl z15aBQ!kHwJ*gs1)PJZin7k8S}KNMKUgG`4^$Cnq_PB%cl`O#?@H-4MP3(M5VKT9#m z!LLU6S!ed~&z(oa`i_Gm*j<{kJ@$OyzRuSNiarH=TVif9>_)#2iAxuZGx&ASYWArA z;IWH%w1OhA8@ob-z+15|<37bSVLBdS3NlBkg2(LB=?u2gL8XMYMqbx zRPuT9)CKGm=IZX7iHi*Ru@e`ZHTaJON{9QKYeq|_1piIT|K|`ex%?8Q0-4hV(RJ#q z7Qxn9i}(qxB(=`QonrUzc&*=6EZE<5b&+Q|^e6R9Gr`ombzqvLwhOjCU8)5KNPim3 zdt7Z*ueXwh{{u!lB8Z7o?!`KkmH)!@*rZI7IJf&Pn#{!u;FeVhZy%CX4B8h~Kk{Py z&#@Z=!OM-?AdYnY{P-T&dp-jJ>LJ=sWqkHqqKfEgLd1lXgBCphY&Ay+tRBo(Bu0h* zT&jBaK)FJB$j(V^z=_LVv5 z`^Fs}ht)#0Y0TaCi`ih>%pWfp;{Se^4i}i~7}L@<{!NGf7n*lmk#x|wz#!va;Pjw{ zX1-^hMg50Y`hOp^;l`2t9~<6iA@e;H&DaNb_g)M0e;9V!YVib^b^YF5MFpHBP-o#z zBInNlJ2o8T@q9VB%B@@}Ok&H#0`2z#CjZ|-tPQ!@uYYP0Omx4ZqB?Le)bq~9$Y2+h zu|PAY^Pd2;8<>l3jW65mA3)9beVTBtNMR!8=t)LCM{_#Ztp76>LKYu1mZ zKPgFi;SX!19<0=2yWU1sMQPut&_Wz;@sG)Up@bSM`%5WKRqHQ&xhW*d~Mu-BxD#%5G4Xk4SMbQ8I(=U#6V>%>dYHt)o3Vx+l`JLZEy47`Z@Pg`1k9N&&AX`;e`=&ga=|So znS}>JDXO(r&%4GGZSQ}3gTQAGUiR=&iQtbtf&;LXP!kbxVS~WDjjISb$Mh*Ui1rTY!xgwb#5)7u2Z%C=mb$B9$@q5CRA;oSVNJq>gPf&5%7|9-?I7qK^ecKRKa+1Cm{~zc;=@Fh9)y`I7-fNbxz3GhbpPHUE3#|RTYDcRj7lO zs>ES5V$W=}jeh^AFLg<5sMdK?`h^bFe5!b(xRUZKLvyd|Mjnl-(gP4#! zbR2)N@0pE$CCu9hO=rffNf^(&Y5=*ktb6^%H&*Dz=c1Dz{l0`*I$m}jFKL&RU>PK` z8`2dIZK+^Rh!uanFDIJAD6)}%t}0t-F0DAK$=9|4WJ`{5033xNaqW9)t}^kG*j=~t zQ~Pbm4y-@C8FyiC1*^XO;)uM@X|CRn<-DmM_nb+x;=)BTl~r zho^8pUihwkZ{e^HTskfj&HDC{AG8!M)I$p^(zD0IhJ=n$ZU1hl2KE$HfbEdo390*m zyGLaUeyF5ZP-rwDS@NI`-=V6+G0aKT@s&t#~A8o(4;R2ALLMr?tRGH1!UWDx4{88p!Yqm_EGzXO=e&h1Ux5!e+VL2or7@*wpou3_3sLD0oSYj@}{Gr5;5o! znHRAX@_BY?*&s0?v0`!(4(Xxnk_2?Qo5O%~a8CQyMrU_PFcyVW{j!)MfvxFDk{m{w#Gz-6_NU*JMA@vU*?ImITama z!&9VVPVI`*Y+{fTbzfqiWznG^_e~YG5(`d;P%J4imdWSJ^FfKb6Zh=2l%i%ad`k61 zwNAu{8@Wh&%|?Tpykc9XmHf@~t1w07UY_35MKsn%0W;wtEe))~~z&8kmHilk6n1uuD+eyx*z%)P9JB1y0cvvINieLTOw8;H4hQJeGa5 z`>lOGb3uKTYq2{E68}dzfyw-T9;Q^Ni&Vj7u_5x(h4I**oHp%_i&e&-wCv&XX~;kR zieUB+O|K4w%al)y8sZ6+n)d#6Sh#%N=6X2w*HQ zf-X+4bI$;y5w>|^5u~$0aK8zC%r>|tI=pTAJpYQT0nYZi*8x`ABznt!MPB(q!Lcj( zG}ThN!?XiJ9;cnbfX%z-gQtMGz##Z37Tb6_6JXq73+C`;7(HbJe4BUwTiw<}+6{Qt z3O0T9`f5ah=t!Nj4L2i{esQG@*Uh{$=i3SzI<@>KU7ziBzN^xZ zyjvo(I7a+nv2UD}#x4?aMulU;z7#;F!-SDpj~0he`x?k zAj<1f`STLhxWZpE1>XsFAoQFJF5m2iQOLY*620g@i7gwT597vYi-$!<tjHYO@AQ08#(LyYaJ7sOm2=Ae0* z&i-}vxjSV=mF9%^eVHvjZ;S16p0iBf6dlsCIrgkrunn%-sSD4XbP}C85w}Bu?X`^^#L2t52&s|C!b)zm@YM}QQv%cbHNf+AWBs}R)UvLYG~h z#b=p-)M91e08aYejyY|6#F!VQ$4*fY51X_LJ=3DBFnn_`&pMs?6d>TXL0jL`AH!G( zQd930XjPDb|?&_oH`J3ah zDIg!e;{9kYZ2ZebBnr7^54oGz`OWx{e8ukJr*T3;LNlS~*J_mt`yBb;FL1oYVb@ZG z^gm2*iFB%Ma!TU5nZ8H>)+kn=HHe#zsy3F6sI*NBij&m9;})*++&7_ty(QtWIO({bKfd>ziWCD z5^|Q-1IN&X*@kTMutmSjshy^#IMpLJ$;LfA(Tk%x#5B@fGU#tpZGIE{R^d$GVn*m_ z8V`tP7Y6!{q5jiRJDyFB-|m&b<0#|`M2(VE(FWLJup}pcGEb-E9kMap#RY|5YzLxv$ zH*n&cp!zqG>_R}?b7;cCjDFJ45Q|G^g6CZny|z5<7P0RZl3(iKH)Bi#nW+*IyoIUQ z?(OMma&i!Nxw5|d-d)ipQ)nJ*6wiu{KB*#wloooBvksQC_Li9tjSX5{_%YP__aj*X z&M3Ffz|-Rbr%pFJc80^6Ax-s>g^ThF@5OJx9wo?WL>+zfn0z_3LNMe*f?Y>2g2gte9)FrEP zywX2WohJGhYhr*c4~boQ3&Q`ag#SkyBYg99O}X$C5NF&5SKE1Y2P3V&+}10S7BB5i zEg36oz&KFs+9>vzo47YZpi!GQQ7~%6+Tb_NE*DG(aw(Cgmo1*A3S0^1g+yWmI%{m`oAHlYvr-PTo2d3J@+rR{OJ1bhu__DJ=t4 z7zh(7_?D2BQFJC=wcOZ-_uzzw@}BF@(iyh=bfKO&ATBmR5))@$|D0_TMK$(ZZBWs0 zn~ii?G6CxnQ;}mVll08?p?Tt%;f7uXH#R%t6^o_EI6{vB+eHxs!@V$GylRBKXxVf@ z1(k`SqU%q1C&}@N-s|q<8yyZ!A3chH1-O|0e-abHHa-{f?;Of>URQ8CgASMk9o1Cn|T9UwZUCioIm8zaV!y(f4)>&D@rT=LMJK{3WTh<^Op`6LWm^Q zRV@|=m8^>;9@id`;u{A)OBn=zjpjA+$>>LE8ttjEcITU%(Pc!@ zTsC-A=#gYq#(NV*^vKpQjKv46rKoO)^Ck3;AfiN~{N7w=-!dyl7n`9rqEwXBPtm>5 zsZ3$>nE9Og5xZp9IZf^4*5hNwV;BU#HB(0c3icx(!8dC2ziZ*dv0u>9F`Ox*7t75F3;$2 z)HzKRRc4Q*71Uxa&8*Btok7slCO^N)cjGMm+e*V^^h@AB#yS^ebNDR*|M7g5g<|^5 z)V`Y6c$@l(P=LP{+|>taF}q-^XCwk955V=#@YNEC;Z0wg)MN=)kyv>gAK?kel$gAR zyc86^8LVFW+-dZ2MjuLjM*UWWrFFq+6M9)k8PUVrB^PHo&v>wCf`wX??qB!9b=#3Xr6y*TGGr2MC_~J_K3r_J0t>zee zUM~kM$kqojG1!_}d;WyGrgVLXV8Mp}#?4Og-M>Vd05ahH!DLFj$)EU&7&5|N(8#8Z zxvXbi*XNnQ$Hz2d-#qnJHXM~%ePR>`GY7|TEW%ea2<%RVYxv@S1-^Ato~RvY3}~}U ziw&PL_$>O(nqT;g6A)l+rplHJOekm&167Lo3T(fj=xb~5d@|+H|5O^MaOfP*fw2dV zD(VO!qEuLT>`~pGj0k$PvV3UN(nB~Ie`1hd9q+p#1LwiJt8rb0=Y_hZ6gm>ZEXX-k z8n`s}*;siWrl34aA(cl-5qeSF>S^+>qLh(hKysh3fWusO<|&6cw*m58#~Tsanf{)iKQb789UD1>D&m( zzUkBRkzJ@|Uf^*qqu+;?3qC>$2Qq<{_zGrt#Zg>yjz2j)H;)*MuTH-Nk_lu3FLV;ad`2ik8Pyx{t1! z39}FlGg(YGY$e({oXRTv`8vwedWMpf!DR_9?>JpL)UXY)=OfPx;v+Q#v^3MZp0%m# zwKX*NgKXfd##-gH|ENI&CA(vv^Jv!Y;}VT$OG(|L>_TB5J%MSFee8t;CZ@Tr&>6eS zB)052=OU1m7aj0dNw)T3Hu5h{vbPYX0WNf;10pHktVpiySux)^yQrIzTX({Jb=+6Rkdc#IqRDpp_SYf0Ujc} z>bBdBU~$#obTT?I$noQk?#yyBhF|{?LB9Aw%leY9=cQy0))tq-=6`=(wRL?>Q=x5t z=pC1rm1i%`l8q&IPRv-Y%aTlE2u7E4sK;9znlq}6Sm2q_$#BhwqMtDbx+-6EoChjQ zCx=-Lf>gOnO6y>?@SU_|=+pR0n>3af6cvWqr2BDSV$)m3hE|?rx`=AU>exSrqL+Ol zab@SW>Fb(891}zNB$}8O#)8y=;m$I89KllIrBmVN|tm*jf#O>)Xruw_est^%3xoWxReX5CoMny*Y-Lge} zlZwm!Sf&SoM=`ILU0$LxmUMMcL2~ZuO@8@dbaDvgG9az;*qK*@wsXY5-JN2%rm&=_ zs7O*vF8+6dOaGFzr1jg7THBQ5`DizD@`9ygn++=fM-nH~t(27=WtVmu&2V{69#iLp z#WPCjKC+rBdd+>=y7zQFyg$h~f`w5|CNe6SRkVzqL&@1rfnrOY-Fd4gOy#g-PsT0a0 zt%*HRg732e`8(pP_>RiE{yMMR`zIn%eE>@o^G*RhTz%K_=f|AA-3jv|pSltuC&yAr zryc8)@-7Ug1+W@x|x&gA9 zHi5p!b0eR%Dx5XGKIrG5lXgW%MYSGctaAfzkBG21pJ72iN9$-%EbZe{xc}^Hxp-Di zANFV%INX_oY_g^&Z|B=hR(q>p^CF>s)&$dMiHa>4i<$J`p?4CVJ$RuT4)15KnqcTQ z=@+V~ z6+ZS{%Mn#lL7krEl^lO(wtpc~NBDPCe+cVlbVg8&4E9(wPQEz$)JbX6h9=2oHsbxW zS|hsJQe6c@4JA3ElqS5;;FSxF^VG*CKfV!0ON?c(<||*Zg%&w7Jr^VVjR7Qa>yM~P zZW$#8W4GSk%E7k6;k^AHOssI2(?TJwJ)3E3gCd|EJwD12i^X{$!|1oJ*Ia?*_avqf zGd^2aT)awwTFa-0K|ySJPY%=&QkdS;8c(e{o;T8tOrNH+A*bfo)RVicpba$sEV14~ zB14l$C99;=tut)8h;s8gik08=Ac5gP;je5UmhAh?hQE4f{vXKWB-K=~DOX1vjJ2vJ zQI_fR=8AIk=dlVqx#43aOTaPlrDo{mPd zu&G~T)(YRB9r{ODnAjB~LB9#Bm3vjpEO5FvlcA&H8G($`@B+=*9i0%mv85&e5-ze5JZw4r3A7ygqJ_+GQb`tp~O zHn@X7-N+|bE~~MHuqjnJkUZxl^-&Lx_J+3Wa*5Khdtp~BTJr-J=OW5Hqv>U-Hrh8t zb&(X67ab)&I~n!)qFcUmUJi=q=4=I0&~ov$adnNQ?4=l(RX9a}^bqVxb3Is;S1bZw9L74DQz^0xw_ z$vk97c~hFG%vo@g@2cGp75%P4YNTHFCCfxL*eYF3a>kSdZR7VT-#Q$?!GSqHpGI%U z7m|hIL9x!GY?JFWI(+DNoJwo4c9O~b9dp$%O&y&6t< z2QII|CA|7X`^qs>AYy6Rra_YG?&(}1)_W2R@XAF}O5oDVf^{iZ=tQVu z@hMHo5CdFF;+_nqvmt>I8X7$2=Tc@4!l97e*wN#8fj_6u(=r=knoOpA5R`S8AW&uj z_w+>Y&I=n=b1F$he+tXIE2X%rC09_-`mZ;yOW-Pz39s_sg``;gd&@4ar6TRuKhK~6 z`3Ao;wwFrUS}L)=Dn=PlQS6{c%FjP6OQKoMgrYf#eLcLg;!zdIr-P1O@c5EtQoExk z^25o*>8Fz1C{@klkk2GFm6o~4DDKu^DNL@0BLjN@5dHG8izs#nX1V&u3L`Q@ z>EED5zPNsy{NrSL!GGWuzlV{)52X~OqIGPGGfta1vqODL^uN`4*+kJ=>(mNE^(}@< zmbrdgr=>6hgQ^9s1X|}Pp~cBgx5Oj$rHrM8qbJ-XYkPqZQT5-23Lu(TJshGwD#?2& z(oJ3L<&pHE29C-4N0Gz_y?*c#Z894nTM(-^I%I)bBB}BpNUA2)TijZ@#eqxvWx}x> z(s_AEeL}@$-Bi5`^RY)eez7=UOqS7Uh}a`NW70R_783t{9?HaMgKBV~nF4?B=~-tiHHIl2)08?V!`YN);WKMhAxmdndnTD@uw=&Q$i~Z;IA6Xs z8$);*U3z<1e;3<$&2VP-@5@2D%V_q+La!XunTvG+GFl2~;oW*2LUhAV+S{<@tD=$n)p=R~s@W+Lx8|&WH0k2zUVeLuy1idP1-K~zn+P$?-h&&Yn9z%|#ArdCW_kD{ zoW5a>{3~xjD3_xEf`I&7ST8>St8-d1yEX=F^cGJS8M$l`MclZ>zNxIUGu5x|*05JzT9K#@#5WKB_-jIDL5e!!5`*b8-Z+-8x$m{`x_%^!ZRGFX2QYVFnzt?oB zQ`9*$*Z{vwFvRcznfDEpy2FHBc9_zA9OHKYv${M$qeHTf$58{aNjg4l=pKB}YVTb0 zziNi4j+6VI;!&CF|HFOwUl501ujGf;Uw)MK{&JG$$9wee#1f%?V|EUoK_~TW1w8hF zLOlE1gUN2(`3fH-{)C{VBsr20IeC|GIZSRWv`=hiT__=*x5Y}EtuH(5Hkq}Q>nyo-Heu2b)IR;Y z5e)VIeGqh}ms0;zHbSQhNmYu_Rtc{}c^rQQvc@eeigw{?3`@5WrpHI0kdLVG*x4Y4 zx187KiDZz5ve2KF?#x3p?j>oXc-MLDzVqKg#8wVvNV`y@GPd$~6E@8zp1@nq~Pg@oD63jA9dPiGn{Oq2rnZ#ncIy4_rUniVa;KD z8fyToDsp)K%~#yuBhuIVwBO0b{&(&2zfQ}0PE3KCAb#C)ps?c6U=jMnz76Z4EB-^4 zN72rX;$dZdt<7jKo^I@mJhSGv5c+@>f}`}}k?UQw;Wf|hUf2S4aLo&D5=IntORI0C zzD3n;?_7w#$dMW^!U_I`F;$tqMd5-|C+Jp92UkQoz|{?{ zFv%?<&_bAPST9^QVTyam0!s#_mvvOB*{1mwgh8vS57h~)b3Xv2THQ173+lb9KjvtE zLi{ew!3-hW*VnONq~lB`MTuB8$&LlBrnmrywCb|4-44#S2TJtO@nbjQAsfrkQ0;%u zG`bAmmG;@lewC;mF|mYzYzboiW9Mjf;#>Ni8{di7L*Q|+{dGq+1oD#SRQEeqDJ7NSW8E)9r6Cj`w`J!$HSzBvipw!IMUUYz z(uwgo77C&L#}9+#%8 z6^64v?v>>*nE3&+mXRgz!q(Y@b3WqvYbrmdR!x~F`JVyjIWK(8-1#EHgv^GtSK>=~ zod@h`+X8@UGou+N2JbM{$$G^_flbkcr~$IFOD6GEZB>;p0#8rk;2z7u9|_rK_r2Zm zW=1!)&NmDs=w^##I{#+mfH~(xr2uOxJ8)NqQb0J(Kl@iLBT&(D$?rsXZs!v$k{WuH zeU6?jy09dAEs(>bgR0XpKjod6#dv9J|L7X_b3iRrh5G(k&!k}p5&@U@3l~YbVPn|cw{C7@QihzA!?j*EiGo93X zoNhLVkBs9gCi_dhR>_vDe|869>eZR7@;F4vHV}j}nL53FZz6OW zBmW<4{C)rafazJEiMeCLPQ^P0z#p*$_)gV^e@b4tkz5ltcO$N1`{Yr^KPjBed=moH zKS0_!&|`Q4xA|Qp*#%u51CyLTIA~-tY5CnPj#3)p2#cZ#j+Uv zeZCb4CWN`o^fq>i8EHt1VsD^ZbulM~`i9C-C9}vhonc$^XDY1RSV%c=VqP%Yo|}t0{6Ah77O7*%S-a;^`aK(Gmic>;Y#I!%PTT?QHi)L z@ke^v*yNfI^DRTVx8`LO`tpZz>UC^^3#;dSnp+k}dUR`9SVH(t$@EEuku15>H|sC! z;Wm(Ur?1_Z^BM!3zmW|m-GwdG$v*_Gev;Ih$Hb)PU5NI?;v80jQ@5V4ZYPY4ZQ|fl z`{~TdwzQZ)^B?kqC*++k=Z!-D=EeX4o}0xf{`?roAm{qdWRHNM;_G1@pZSuKNmZMN z1KZ-fXivq0lgf1NrQ1OR4>WS-ZDm}qmG=Neb)lSTj()^EHz90RXq*|G>(%xlHN<`v z)1@1eFwd3nE$NP1`X=y7w@L_{>SY7vK>`W}Ip}=Z>g(Kd$mW(5s7F4i`&_m=gJ_{( za*e5@$op4~!^H^O@+CDzUPf2f*XX>>*`4stGxAE7<@u)tVVzyw`0;)#;nSMkruMSw z;%$4<32xY$wVNt9cTDdkU}c}TH;cG0RsKPvNvsb)tsKBxeza8X;WR4m!3o>NS|)#r7R&votPI(oyWDdHeyOFF$k}pg0C@6nfn5;a3Eor;zbK>r`m54%AQ?Kt1@AaBctDW_KY=!Wy^yR;|3} zn?^k7_3rF4_BX9G!=78(@dAq-aNdXu;?H`|?Fr=LzAT!W8zMOiFYmT@k(|eA6bE00 z1AoexUFVJ!SddefbA(3d%~^Pp1~+W-BMyk*-Ch8&LVUzNJa=1#z>m`Wl$1DPA~xg* zwWYMrn=^-8(EiuPGW~!W<7%AqQMj5EcrXI!!SGX^rFXrA1PjVjKeLWIo*QZyzZv32 z?^Yq6ddb|=Qjuj`32Ds7lXYe3+rM6}`_-wT9-c#nhC`w)y%w)Ydwu669ZYW4F zFD-8YqI_(MHkD1?$4->{pL}kZx*+gyYT1GN)>H)nwm)uDGGTs9cuSajB}Fe?x?glW zH*HUPF@2f99E|<6bUrATx-pcjAWu}d7HDbWC^moPaL`J6f@Rx9H;F)D>o({hwpa| z-)P1Asg~P*j5woW$M%GIa?VFe*j8Ti&wIZ=rSDHBLpF_s(UM9mm}kXw)FTdX%3;~U z^|mK&eC4#*X_|SboWb3exgacdvs_MQ)#FA6XY+;E?WfuHK0|tU+SZNWNnDw8`S!l+ z2dzaK0h^jaL#1jzwR(Pv{@1W;v(dfM#l`FQ5-0bccL z)$zn~ij0{!kv0rBgfL8JKVED2T^ji)1_fF(GEGk>v7&_8HoC!^PF^oRseBANq`JBS z7H?P}9o?lNRF27+Q?77m5D?cI%~Q;x6}lS0V8C1vj22qbxtWo%wJ2t) zAk^s0-E}v^N9Bq=&!*>r1VS(*yxb8#-t;~pe2(WdQ7QBFak2tn*GBw2Sdh!=uyg!7 zNp8Fl+3m4hmi#E0rc`8n#3?Q#sfoGlLMk?Q1DY;~CM=7)_>C;*DQx!;HfP_-H4@%s zZa68nZ9b&9JX!t3vH!kiXOjJDuJDI3{7=b92&^n_b8unglQ51a`_qCAzB-PG_jh&N z=I0PrHfu=l?#Pq$pwia!oSahlHuOV2_^t3WMQdSI+j?8!XTX$Ntw2{{arZ9>Xbi)y zPXS724ucXUT{&xOZ!+#_Xh$PB|oAeJV6TKc`0E(2u^fhu|GYsgtTs1i+YuiVYyj~Y+ zh|>msjmXg`Gky60CSQ*22EAv^-tk7RkZ!Qx+oqwc+oDQbum@=is$|s)8)^ zqYC1cc?8`nW8X1cjPF^50_yer14r_?4p(QoqQY`L^9 zqO1>i=^496Oi*gWb@+`-oiaWpR^W>Fzm>t(C)z?LXr3NV%$d^X#x8`?m;4lQgnIXkPAxy@R4{Tf$Nx*ssB7N_66%GKlw9GYSOw0$l#_ zPJRSLs7%18DrhOJgZ`Ha;X1Gbd$>ns-K5JagJkp0NpWVD>EKhRQ*5^DM&&Xxe=VnV z1Bxly%oOWl^w)R08g1ZVO&O_?HrZsc@l`RV>HaH}e2P#eIxECslj7y6hqLEXRr_5O zZyN>~8URbjeQ^%g#&z|+?Ybh9NaEuhhjn38;kL61g) z!EsG^e98nM<+ns1x#*7eblM`TSAvFOti5HQ4AMu%{(d<7akyZ6au?I~yBpnbsdd&T(`rD%?z^S+O6`8weA}tcT>1Ck zvYF+F0TZ(;=fqmcm0h=+JqhwJWj?QhUXRBvc!S$UR^&8HkZ0@bZ?l5!m*Y0=$LDGc zg!u$hwrPchuu9I4~#*h>g5b$SW^^(C`0h7;#uH^(P!7y@;2!+qu21Uu43YjKza&(FLpj) z46C?HF5kJhs}QH$e8VxM)!av8&a6i!sxs8qt(|%_D-2NePo>xYy+Unx65I8$0K+Pf zq7#Iue>UpFTPP*{Tsc{|2kT!6ppk4WLnhk}|3#H&i7NPq)?WK&TKW6=Q)T14+ z-ayg+V{efYbvoX6QnYm}N=`~`CTtLegPr@%W;nCSrdKh*9_p@tPuOY{wc*M< z6;m?&cZ7~0LdHM@HOu?7^UepY{@)l05*ZGSbWDjI^7&3Q)v3x85JxP*Sl$LkB>jpi zM9*p4ffcY?iS0Mz;OD9}@B6B^P1*J9yW_#p!m`0ZXCqpezOp&14-oh%J9wLP+CQl} zQ<-G2@;NP?#rh$1(vLrB@(6(zzrgI^^h+9z4OH54(!bMn_m}uEZ3%1N{;9-z2L_(x zd&l`B_w}wD4f)4$&NQwH4(9i>GI87bH8m8IoT%Ogsk>P?4BIH?nwM8QQ=R6}FAtOA4SV-^zne81c?2-2U7!+CI%BRyD;ANh-`8tLfMu8e`y; zjG}UeKq6x8>pG%w%)?HH+RK@2tbK>vOmc^Ix8XC=!V@);&oB8zemq`pm)!A^Vj(do z*$3&cT3CK%g-07*e&}FUHk3+fk4%T6VTm*z$`;+cY7-kdx-%Cc*)SVf((rvsbT4Yu z-M_ZDp**R72mlJ#)2`HP{FEGlQKlM@jyX3__U`Ene)Ev8n@@jcwy2^XQCnnFuaC*^ z`}*2_m!kgCk7pgldzGXZHyNk3!k>6$NEuScTK-b^&ns(aM=FXs!lT$V z9K0!q^z2^@6~8zNyS^c!rb4f}w3)Deh$UX2-on0`}9ZNfG}0}zkFf7}*qz5JXFzJkW(+$6nB zI)C-MZ$0NfO_ChIGF@rdt_0tuwm%gSzHAuEpFOUFPr-nPh0D(kdNsV-IsCTfgRg#qhgW<79pesTb6W0)ziR_%u0^o4mHu|K5R3gL zp?Z=kTc(Cj3oVfUm?O^V9M&dMQh4^VEU#DXoQJVX2I2C#_872OW;)R=!ZlsdMY_7- zO4)i8WsB?&r&>+Qjadc-%B3?A(RV3yJJEaH3ISi6o_Vjjv|bw$J`1WdL@O^<4{BI2 zZu+Rx#kP(}G*iBXWP>~C8$37FTc?J5PUqrNQ&VF+ckb7o+s-Mc@7`d}+{Qh;tIY>` z*@uIGVoT++s=4@ zmFMzk_VYvO%K`YI^WNvKOz(5|Ws42WCYEHEF&en<{AC^d;I*EK?jiUZChxs5FYqSs zbC>LWoa|GqBMZLHeh~M$pJi;_-hVp{0pBvRCUXpfPd#p|*RLxnV@zEdlCyk${CRZ*i{gysDL)`c=e34dV}iMjwjMnQkvM#! z_T507sCvtNjz<^IUX71PdS`lAPP;$m-N#$sj83wBLbMPql(@*dE=2ziOm_pDv6sGm zgC^>I>ZO|IE*)I{JR8kKM~kV9!v8SZCHEQ zTz_g^ZnGage>sw`uyua-j3a#AF}-#jMF!7|m!}DE-Jas#p9;A4ux;7c-5kh*cPpzG zU#h&{D3gMF+n?{-FAJ)?%=MgSt6s8w+`(hEtJhKEZMUTu!$V%+En)E0nNEnmyQF4) z_mK!*R&~(XmV@z1Io-QrDoP1&^F`s4bh?E#dQtJnX8dTCcfYKU*D|eIOa=F4kL}wY z_+?)2D&5|O{P5Uz-niMsr^TPBTfJL)3_jXp^xmzqS!#3etT}w^JbxQ|GcCNx zF)P9zL$B`jdql8JF8!zSy@d|yg!|>DY8qisa5B4zuyW1g$fYyH5^3IkcQc}USBa)= z0QY6JE=}6M5_)XqpkO!<8uI+D2sqX9U%@k~Y}FN7nE*o$=RWTdVx8 z=svIRa~ixXmyGrNu>Ib1H_{Plh)v67UrFi~r}uTkI=Ic{XBRKf{+Q5fi}BsFMqxj* z{o#R9eZ<<}(8vQ%h3Bne{i%Y`1MD$Zp>dss{F0FU+L&!=YMA{xqL*Ocd6=kXyYBp* zGy5r>(Q~bMcJ=vfGaaTVhP0+_H)UZ`#g9} z<^JUqEl^Jg&7W7{mSDs2hzz5kv@i${W9FVLW-i3iq6-UMP&AB!{`^iN zBU-4=ohmqaN}{%Gd)BzNfg+8I^6X!~cdNSQ@fb?0pVNME+2y{EZ#@|SQ_(Iz0OLNu z#r~J&*$e9lA=|=3t%hEB0*(3LjjES56BA1mQWcZlV0@AtW`x zVnG{nNt{hPo$s+IlpH^cl3G0XX5zf9)mde^OtUzU4;^RoI?rr~V^pNOTd=P3f}A-u zz@jvL46-G35s?cT=i}CJpuwY@v#+f5PS$b0w5CN`j(uLPUH_&dl^|uYtwguqyv|Ro zCea`DO!*1q^xTcS-pWRg?{uS)J-{1i6H*V@~gt}16=wWFo_0*554?;QD%u| z4_9C2HYaXiTnfyvWHn^m8OHRiLM5L@*~5iL>`rM2cQuDi{sOR0(=v;O{-uy5cY zkyx8>vFGlf0t8@!a0s}x2esa>>D^~MY?FUp^Yn_IRct!Xg~=mw*=dLot_!M4;JV|) zsu6AIgCwi@_0jN-D=`>KLiIoB6CpsXQ`jFonT~y zHk7+L6=u+8!U*E@1$UcPr7BTBK9*g9!XN8>c;1ti&jivX_II`kWDl6IkQ>Q9=q*_b z@PaZCL66_{VLC&924u21eIyh%U@_-9v{0zmKu0F~2KnC>IGduHA#K6)LdsF&`2~IM zhlPVmo4T(9`g54n0@clrB56VOpMPeQbFVnkVX#|5MVZ_&Icw7ExqCrEDM+hM#qh%} z@>uOg+@4i+WZ?MhSa8Vo2Cgfwc(8!54IXkX1flj63Z8{4^}O9 zG+Ku$dDU$|2=?|LT?l9yzZ*RTgbFz3O4#mT5u~l@hXuZtMSdrv;i>04FxUa8XK8iN zDxwBNoVrhaFYHrSRBFvWg3ka@p~(M-001V+m$Pp3oytUf2!M}vwAuYbA33RZ!m$W~y1T`qC!*_} z*0dvOUqAXx!Vp#-P<0zqhLs$v$f2tMbymyOik`2+F~S+dsbJ=|EHLjOmoJ+zrj~8n z;G3U2N&CeD+aH&3cOBS~@;g6%(qz7AB@G*pl0>ih_T3GHraH6`j0`|V=frt@Rf;yZ<0WW1AA7y$HycYo7mF>dAuyfnpB-eBITtjT zmgT@rbD28NwmaedoD^CRHO%=w(SmU#W4SaU1RqpI>x_QexL`a6e+A0}DzAoY#_4M+ zrwx-Lj-rQF(O$8tWfpdy^Lp8APB3j#b9Tq8KDGg1jnUt&xo%f*;wsO4Zgbokd{@YS zQ9CWZVe-#JA=%n2w%sziH#uOMl~_v$k{`=8O?6 z!1o(zGXivM=5;hw`bIe2MJ8W{{}%2*=r1wk=@Dix5Fp!p=CwhJytb?x?$9g5egc~zl^o0{?#&&E z1fG*rZR?amAuM-_r~38D^t#h9?Qk@)oVflPgpqk0p{LySu%vaS^RVd=Lg1+{tfTX8 z@xC{vv+%?I%FVVf0$KZRzWuCqf3s4`c@eO0i&qC;p3ZtUdows?BXHi~yjw80gksIf znnQB1(NH0hOD8gNOzKNLeNgAy>|6r*f00TYiqso&&yf!`P)-;{&^EXD3hjxL(hA$;Ff7QPD^$| zks99*b11|<{E4RXd2t?Lx;}o%VF* z@&HV?;wa4B_dWCCC_k?sG=0NyM}8eyr%1PL-zI?349ZH%hSYFM;KuX#a2Ue=2e=-qDq}vnsZOH8HeJ zQ)MnOB<@;r#Tdmy0*zQvQ9xx^1?*L&n317j3FZ(OrCbL6g;@na7?j zRvO&l$Hz_VGxOAo-AEU7jOGE<3`GQaiN;Rqx5XsvJqugogZ=Ps=y_pL)Je8+P$vI+ z)KUEyOT#wBqmYws!sG}M)Y8=@>EcltTxEG&qkb`?l2LAltLF?v=zfjGMD>o7QAu{0ibJvZ*+XT(`n5G(qKzz(gN*OeA^A?r_`(dt#F9Wf8 zeX3dm-V%B7$3j?ID*eKOn5HHE5lK}j$?#!_^0eIrIL_LO3(81d6T*q~h@+?d16@qg z!g_iFu%CkqKhR5(QDH>@?9p6^auV?)N@4th=l+~3Qb*VCe>OJEbrkoozoA|rhM^2aMNF22>mJuwo&J_k+wW(h^ zIqEDp_-rIwFG}e>^l#|hzXu6z86n3=_G~hWa0sl2nhGpTZQUtH!T>ApWoE~a6R?M{iL9Z#qB@{IWJYcKy^#Tuip8^& z13TfQhiTb_<5~cn?TX$a)abh_)JJQvUq@Cg45-}CoQebAg5mn7^c#U81XGMgL013r zM)a5=b*dZ0V4@bs7mSQ2huJ88yIA_1e517hMl;LZ3Yvzzz>D5aV>r z+Jj)Y{D3=@`lL>spZM!j$qjoKy8UOW8G!N^6+>jLR`V!$9qLZmjY_W;awQ2XXrzTC zxtnsf9o3vPb&J?jrZBJm3YB>ZSW~-lws3>fKM*p&5&GeTH_#5Imaz`t_knt_P;_-U z%=ManY!I3b=$mMuB6Z zCN8x#+NsQz@s>Z~!wM*{{8|P$#Bu_~5ULsLr$9nSALkBvyM|(Puj?=pPaRQogV2X*$pYw}coX+-01{UJ!A6+uEL0RfzXC|&0mbpmz zpa1q+hENV1*nzM=3`uiI$i3cqb3lu5F6cyc0-MmxpjR1UV#`P5y@FZf)N-BItd z-5t3Ff_S(0Y!MpZOM{WFL3{qLovJVYbwnc4g+6XTjmi`$=Rb$_z66R8@omeMXy5lDP_VuV7NL2!3AQ>y1}!SSel`Y2V*)^m z?^9tnHH*Ko!OO-^WB3e`?am}A!}8%@%%o?Kegn}zgGdZhsg(Yx2QK=K&!7xbVl}0x zwn;H;6ht8F4V=((GxPeAzwe8Z(sfY*+Dbk~{sdzGvVfu!x~BM;DIw z*y}ng_Pa)eDE)T$1N9B)G~Ayf7xz!E*nhoYl3idB6UE!(YZmGDlzmCa6qL07qCq1I ziJ#-l>jF%;TAKCIXaI8w0ZAUeSs1Y%ha<&iq=ucHL{gf3-PX8-j;*<8(k zC!VAYvm$cDL=ZumgcL`+*mjkquB(w4y{*vaYZa`H(TP9*M}pr@HUO6}qDy~4bFmcZ zp1E#vAhUKz?-4no)5(=F7??<%W0D}z0;KD93Z)A*gEi2or?MRhRS_>N$Gk@nMI?_B zSI_x{kpAhEp{!@iPPnuXg_9W2MWq%zz=1*}HuaqMEBCOrA=3A2c63nAg{DPSZeLKC z26vX(A~a)k>5uM_e|;IG6T21$N)cr$WptRqb} z+MY^m$e3O9*BnWpg3$vGrd96`yNDu%9uuS;i2N^{vC_PKWd@aTM3J1vcrb7S)Xu=} zB?YHQ58C5^f^bbxK!81mttcpI$V~F99BLS0AGQd)s`)3O;y_C^6{^!ZWL<#+S9ghd^MYP(XTN-DO+iQ)CU47n_g%Pbvr z6F)$IhHhtewo?jC{FZ2Sx0cqn5ErP^^I1C0|7I}U?DFSTS{Nor)7R~Rs~q!Ocl%#| z*3h|oncDM$a&0a}){=dHx^ah-F6bz;Y=0R4%ovRab70|Wpu`q6rHw2s0bPwQsHF(S zOn08IpeDu=uo)_(Lw%B1^+g!*I~`zT;SH&FDExP}<>MbA#A?YBJJQK1hd4mowPrWs zzZ4x1sOs5JbJ@;;MeMsR12;jQqG6Tz4_|r}iegJGeVlY)GEw%A4PZD!e@JLYsV_CNkx}L3(fduU2~ugtE&+b7A_G>u$dljwj=lrMmm$_ z2i^!2s&Zs1e@n!KE|oDoHVTUXPbVC1_U90M$yCM}<}I@7&-m~q_)HybE+*65_FWV6 zSTJ20!HJqC68dC%Ia1t%By@Jd!2#dqq_)$ik1VU&*Z@mqa8kj$?|x}ljDb6wDwQ?; zeDVJU()wGs(U8#UK`*lO8o_K4 z(kG*l0UAAaJz6L>ZD|=Br81{~+3i+n3xk2cEh`NPiSxF-!$p=)QwFrWB0bM-bb3MN zJ5B0@hejEkYFA(jB}ZZhr-(&ede=C-{iN8TVpYI)Op1 zdw&G3x$SmQUpSCmzX9aGTu%+T%5+^P4WN`UWOGm;X zsQ^L%Mn32-*g)}7QEh+m0y65=YtAdEDW2Ip6TZ}fxobO}O$45&9t8#4HIKM%d(a3n z`ASTy_>%nmP$nZ0A4D!G%#Vh|e5mmH@6_+Mb@zi)jH22gX~`NEe*S_&3zD>wwNFt%o%%+}zo zzR%aX_)y2*UN02N9u`Zta{)Z=mvibXTFlJh>RY0Zm!gGYFQio99hb~yjV(^%_6M^Z zL5YlVsXPT0KWe^wjZ;oy)Zwz+gE`2VRv;mlDPhc@m8l35h(?#O!Orb|NSLNV60-|J zPQqS6vik+u6(aZ~GuEN;|__kox&Cl23EC@`e;;bnWZ1@y;j(~+?q zHvL2km{2zuSVNl$2{GBp%vT?BV33LguUW&B_eT>+#uchGOmT^BRIKNdbEh;Qu%729 zSwi|v^hrrMZUDUdzemX`s3z4T%feudH>*YjULK#xuD-L?QbjDNMrF|`8EgltAtr`!#@)m7yOcD=N{}`=(Ovc7M$~u_{`+hH}BW7 zu9M3ueH_O;B;%v8`HwJ7!52yP+wlbHS>9W@x<{foiETmANRN0{!7!Uz2U_jVHp?r5 zhm8ipnJLF)=6cl|;|*)~D?6qzH-~WFTb{i`@V$G8zis3ZXoMt5ZDdjkzK*bi|ELh_ zl9YbqBm}&;gdpM~lti~){GM1OQ#4ZP%hYRxKb;k5?hE$$ZvgUNzi&i*IjEr+qpJuL zQn%Jkk>wh>ySp<`lK%~7n9lj<-MvjGKuPy>SB0TY&e}>n`M`o|umhGi*klxyzD75; zWXoum>TL!@IxGZjU(nLBn~J$cv*-m{H*61n6Sir525N!bw#Ub~dWX2%D&??zR#BUu zFYulDJ(J_2ix-Bbf~5Z+!rm$WZ$%={*$P} z*5=KVIPNTPzWYE(s+-jCLLG6jp*ThiUC}@x_Pi?7Wp*yHb9=S6;g0 zA^J87_eg!Ta!qs5qraDz(9y&Bjw~S910Ao%Y3!j+Wi`B3gIjOY7Xwow&5u@W4$B}% z%b6RHylR~kKjh%R56?GCmWiHg%Pzae;DBbmflSx)dDEeyWkbK0CU2|7p3B{I_k*Mu z)Hh7kWD-lR<>{j4^?o8rmSDcI_(|MIJ~Pj@!yKnmjbH6=S*yoJcjD?prokiMVY4w3 zUaIHdtwBkB&rVK;x5b5JMVt4wicf8JWL}s6@A0ERBAVp&wc6A51a`Nz-&+Ed` zJX71nV>9HtURe&n57eK0mQ81L=xWu(r|Y$$QPOKFq6Fn>RZS z%@CWW^f%`Eau5f#cDFluGybmjgJe2vYB4BStt4H>;=Cn82ThdDTOX!~U~TOAb_u8W zu6|1Pq9xNq?u3_h%1i)BXb2dnGO#B*^!hLkreVRs4 zIO$inis^ykefu`9ZM|3D?}6Rv)MM~=6rbm2?T>Dmviuz&7FrDuTHwAvf3$8p9zEZ* zdFjmxdOCVqE&F-kGWVQ$ED`0pbz?^yLyYsdavfzEbai^NeR#vS_B*A#W!OUZY)L7K z5jeflXf`lXCnMT;?)Lh)3zi_!=x>Vff(P=wM54HJ)DJeE1o9|hJ5y&FOKdB8nDoq0 z2h7Qz2D+mR6eye}zkA~)2_~|gAFz~}gvan;^u6I8Sj;_U+$v@9uhDwll{{$Y( zbM`QlWmzlKJdJ8qXrHC%z8>xFj(9(peTVowOqe?yeJcp!NoM zYr1&9Jp7UksNboC+@|HqmFkFTm9Ve_&=tzMOsNG(DW&%BY!Y>yo$fcq1!}u)Vn_RA z?;cXZlEtqX*snOfkGwg6UfahmytZ4dO?=l^MvH1%Hf>g?D^9A@v+Bbh=`GvrhM9Gn za~Eeq3+5IU@+3D24mrifZbGrHn%i|m)tg@m(&kpGYSb#MyEqn`vpk~OL(A%kwupIb z>*nnwI(|DEYgs>hQtQzNqIfYCDC1L_x5==d%r2fFlyR?$wK)FmRVWhaD)VU7sw~~X zetaRTg|%#V3Hc9CXMtIx-@XyivBBsa)33~-Wc*poo~jZ{Esakn?I1d+%-WB< z|2Pl-)cD66xG~_B#EV|_miI3#&=k$4op)!DjMNqX^sj$jk)T^xHBU^t;kz4eR+%9l ziSYH95wK3;uG_p>q&i}K00F>JoL9f?)SlM!Xqkpnp!SP=QI7rxS4wh-UvcA+8*MI8m)37r``id9;RP}`(&h)xi98aD` zNhf=8&bU(m>81aBUsUT?UpX!47%}R`^gjc~?vVMy(Pl%&QUfnTws3jblm)5S5HT8; zmj`x6!J3Ga<)QyXq)ct8Ha)f@9_E z7M0U~CQ^IB&Ll zn%v_-w2DkzF4V5ZKV}B3oEk;D+Dc02EOF^(42M9&ima}{Bz%2GvFOSbyP#wA$HkQ; z-s5e&h$k62zI!7;(aPI2@c21_{TfnsYz#B9?NJHRTdV#Jmz`#>6*s&Zh{|TJ=j$=n z%l(*Lt6Hfj4!qw2!K!);fgaQFtRLl!BD3ud(dCFu9=G>n83$9F1&p&=a_057|T^=bD$kN8zN@I3n1*i!= zV)2=o-5GmgtD~RcCh(ZfQUC&^*h$|UrVZ(67gMVT;JKYTM((BLf${z z7gjkR79oiqhBqaO_bHkvEssR*cl!NZb9Rdpe(=dh2oku~8XfxrZEI*gt?p9{y;plv z95S7kK6WQwE~|N)xylwLQdRz^t~wpnp2TDv`0E3ZX(_;JoqnY@J%i-vbC53fR^VWD4Z1i|IyJOsX6z@fUe+1TW~tlPoji>44~k%XY@mABp!GJ$2HLM{FwB<;!aT7Ib9;XHAiH22T)I z4K9^+NH*3$_&*L6WHm$~m63HfG4>00)CdFud z&~f(>ZQ?`!Q*A0Mb`Fvi$PMASN8HhF-rmoGK=5+IM773}8r+SXgD?+JHUI<>A2?nQ zN;5T{B)6dm?9{2fzJyyNBv)pm4s{7~g{o+T zKz+0_TKL((jdO|J+NbxnPAS$0vq?rsas#VPh{eb+FuMY>YOa#RJlE<%rB59Q$Z_~e z8dpkdjo*pGaM@8LD~}&OsGau`MkuyjCY)c9V9kygYK35op#St2wgj;Qv4hA^iJM7` zY3OsN79Vw}(K#x6SX_L8#e@ebT#vys+^3wV91hc#hdRqweJv**5F2s@q1@qC_YC<5 z;l=SGyMy{4{5o=7@}Z%80>8-n>>6DT_ZJw8lbC5EtB7*kx@az9_(6+KPS+?A%UJ>7 zAM(>&~1UGm+81sk|*i^jcvbm9a5QxGu>zL8Z4vJsi94x zDnGjEyGEbwa<%I>h#m2f&?&u2&7}6UNuaIA^Ld(+)EOi@%gQh;IJMa*Fvt zmen@pXkKB-ZG#x#o<~mCM z-a=3gbu6o1G|N~?9Ja@9QU$h8c*OZx#~ls)^OlyuCBsgM9W0uy?p$)RF<=iy#+mYe zv)NyL$0#Z%-Mn@B#=NYRX}hgQoj}Aq3F2SA6WJvP7Dc{ZBJ78ZzPkB0UvGJnZ{X}q zJH?ogq2d%@u@E@d`4x)!Xi-lJ$Y8*)e+!|}YWCgI*Xm-U&dT{(q?9;Em>UuC=;J$T6o8>0v9_G`_q=hW|i_I=o&zmBZ z@^_~%MY$?j0EPh>pznjUEfJQxe#3YlW~P3)6K0>Ca$rfqsWeMbMB_T>xVD1Lse7I) zq2=YJ#iHvH?SJp_cC|D?Xh{h4=Pzjj*iJlo75coYS`&Xd#w>p51wOUp5w~I#vUcjX zkWERi^5=nQauu1be;OibZ?ZT^;<&}%4yAAfwtZC7_~oiA5~@}34&g@nloXx#DP;1l z$Q7lFnwp6uGlP$mo8qDKHFdWyBO8!Xa$PBw-ISkMq z`#hl9PVv7V@xRZWxW3l~Av|gM!{nNH7Fe1}dA-RSWaZGE!YTRrUVo{O$~y%k(SPkh zatSu2MPE>ZSVdWxkr_9T0d^&Vk;PjJDt-~BlWAI?d%2Z4*Aq!Em}g>`_#iTraHU~a#~XDFNm`a8Ry*e z6BSte=+Dwj2)uE{u@o8}f2efV+_{v%XuNu4H+)kmXAzff!-_=c$yu*vGyX8p_EvD()}+H4et zVwjO+NL@a)*I58>GyK``@F~!l3XfiLGG9Wn8B0d9UoPtTBEn?*i)X%&&?bvTg-M<3 zxA#7^x6VOH*I7R^{)WXNvzCKJ0vYAt>ph23-uLGE#U3~ng21fs=zI;Gk3@FZV!fFr zN)`P1pY48|^@!WLekM?H_K&N}vRAl70*8eo1k`_C8uq0BL&~jUTTmB|$K3;NX5d28 z49y+^k{9u)R7})B?7AnVO(dAU(GE4!z`i`)6!GT|Yrw?_ZzannV12;{h>?g}{p8$` z`sRlHe(_53xm0AF0sFAvS3&>mQTgAie5yvZ8tXs2LG<3)H`C4ct7GdxVl)D^u66?( zE-dF%R}xIJ?l}VO$D*-B3m`pBbnfSQwowl1`dv=0p+J&^@DLCh_eUky?_iB(<`ou- zter1D+m!X1Mz}+tg{h3%cv!R%t|LgfL%4gJXe8J_4M|CPSkeDrqy17IG;+o77lB+~ z<&~UzugKfuMc-0giSLW^Z|I}aKMD0ZOA4#`%h~l+%k3pzviDYF?CY6WP0rkmrXRc5 z*9u^J(m{Xd8GX}V7~LFW=tc1H649cNroWYO(^y_V4tsU!4{a;e`?zeLP_MYE&!%Uc zbBNzigp)xailG1Y@W23sUX`IEV6QlTeJud9I4PG1R6%b|{CmP61IQ*XyV%){TM>%X8TBGRPT zcwGX9&Y&FA&}?Erud~bP=)aez>M;gObF1S*^>UlXdj`mYbH?+{=p@C(JPgzoc>j5i zjKa{G#!@H*mSjU`0YjyP2h`#qLFu&-P_*p9wl&@dAt!D`6}`e^ezXMQlKw9Z|J(1L z#-B_59Q^yS+Zx>c)~3NYSsz{PhW?nd=-4~ZEX&w80BNnYq=iz8ws1(}3x1_}?)e5e zFYt(#Yq*~iY69?H9_Z`UnnQ4dD97q->JJ7N zsrV~-_m*9ztm=rGTX+kL3^X8ndINcPx$BGuRC?Ci7_lsCfeOFrwKE;M_NS;`&nJ<1 zCWQ~S<37p(x5J}|PxC+5Sydkg%ZcA#eG%6-#1ctkkX&*E^(ct6f~O~ZF%9Mn&CNHs z7!7E+)mnSA?*6xE`3-Y!6xCznYjn2p-T`pWsu)ue+YM1f@ySE6v6F#MM_&;ydVpE= zH+_L8FPRY!d;-@=vz1@AYa{J9I8~!tlj@$0LVC|OFdv)qVou$q8ty&4JN*|`|86XZ z+kkI?#7x>}a};QY$~_Q)=j%F^6UkWDJ#bXUd%-|7zdwi8nnyp0g zR~T(CY<3auu)JD6Cb8mPC@SyKXkWU=-MQ(TpDe7o=BRf&PL|vF{0BYOJ=QCDZlS80 z&E>&hO3sYDKtyNYgo2dI`MY=_OkHp#a<#HXh%CA~$Uf0B=lq@9{lCwp@Gu$so=}6O zncT8F8BvkU>pys6-_Npn^_vF?tmDbpi5Y!*03h?nQZq1Ap=@Kb558MkAvX zNX-UNu^x=&=zcSAI*|or%xQL+CA8UKUWper(oSP{l9JF0JFYK$RN?eTU=KVd@N6^+mdYC} zowrHC;6cj>pLZ)@?;E^^oGoO@gvo_#^*B}fn4Nr|0kls zeou6VHjhdz8C;@hRw5sMs99Xb%A|(iw6V4H@NJS#>;*+W7?SRz#CR>P z_61o-;YbSMAwujiBi1(hpYM+QNZrn6eO^5}E-G`jtQwX{OrI~7(q&XztN~2vKFcat znk5}Ers>O;^56oCUZ{-6dP84&8ici2U{}Ia6@-1igUJxk5-SrU%IEGRV7qH&lm287 z3z(n9q_48lxvWn)@!Tt-^@2w!@1AM7;n@q$K}C#~B_oxub(}{W8WRw>Z#=ZAS=QOM z_+d2H5AI9J+jyf>2Z&jd3ZjrORGsrVnj=?FU=un0vBg( zr>mZVWQC_MYks#(Kvi$Y9ZMH}d%}DM1}*Okmse9a$tb>c559xt)Xj#UfJkA^)ym|L zg}1e3QMmM?MfPm|6|y=>7Z)5+PwBhRF8DEb2VC-i4DX$gk_yt>c32lyeGcxL%8RZk zS&zLWJP;FJfa{{$dH`Nu6zS_@N%^wPsy5PG9a9MUUO4H^I*T|FH;KlV9c_~LcAZ|! zn|Ok)xTtVh**Tjf86Ttl;?O`EgIr&SHM#!sI+^U5UI8&BwXo+>D<0K(-uqC<>#$_| z22bZ7na#q!p&W1I$DrpZ&idWi2i)48>bFn`z|U_aF?D;r0_D-3AoeF! zthI%5eop(Rl{}&5x4dGMA~!o2zi&QDBOo z5XB?JJB^P<^6&Gf5-^7nSf);GW7~*<=#A!=Kg{SHMG}Umco3JvE7H0LVIAyE{ zt7$m@Q{OgQSrUWX*f%V_&CmSja@DQXo2M>Gpl#Q67kh!mZ6t714u7tBg~V(h$Y6Qp zyqIBk!{z01E7D?p${_B&d!>W{e43sbTgTwFU)Ap#eRK5^JZ;(Fyga7+_Gf(piq8vnwOyw<&>V^CB%<o|0!Fnv`q1rm|iIMBUB2fF3+febiG2v?K}>ST;?!*T6QAUGQ3!6 z8QdIRp11%TK(99D91b^+6Jea*^cy}jScA0ns{6~+q`UT*9 zb8yRH$0I|gEf*W5F8mfUEpf6h<-5DewK*%ZBbNg9u~nOepRuf;SCL-SD2oXwpW)C{ z16Hg~6DxN~EdDX)L{fmhxa^C)dbGECh!&|pa~9{oL3*?%?pG614U4Cho6OW13XDvL zvn}42Wf1d1q>DMT(a=k(de?%AQ+3HwZumf`9X)la@^PhdjS|$!9wlP?+Z^bw|G^!P zlnR$u2pw)CxXYq;VQy}#kuJihjUR+$mZE5J^`5ZvtM6+fLMev~a+8H{h<31CoE$w; zLh>9V93{=Sv(OTp%~TEj@E~cB^6fl=@^wxC(u$?|qHDK_5LjX@f0f_<6;RQ+B3xE`raV18$|s|6I{c-NS%sE+RB*Aon>_5i&`ZGgB;atzdwo2ga&?ly1557^jE_2B15WpRa# zvQ%wNT{axy#^SURMYuY@W?(=63ecqBeG!;DStZKsePkZc_w|>8P zN;p1qztnQ0u(qG+{S5Ayq`=)8McGqFm-zD)w5`ylRN}v$<~kj~17fuST?m}>BsHn` zS*rwf;xmcI5pVIHMB7sj(@KU`EKdDBjbx4ef?&*CEbtC=n%qlI;&~JVPdInj~u@s z)myhsituWzHSg5RIwbr)y3m4-j?}=P3}OqaS`~r~Dpg-OnQJZZOl!9bJ0a03KC)T4BFtUL;*e|xinV9c8ZWF8`Oc2!GUV_L6DAs2qo8f(U z?V_*AQLE-zvv)N$R(!V%XVtnK2+K~yZ`7V3(zIK)!_wyR5Ok@fI+-=(2O$o}5}drqlF#&~eiuL^63UxbZiRI3HM+Z#dJ?%=#1C$ooo`ly>*= zZt)$d?|%y3YH}~D5^ZtR#q4QCbM4vIr_EVoY3>B36ZOZ{kyYYpE{Ef3rJ{MZyqU?uRRi4vo-R$Q#ucMw|#ecVs#mfdQN@oD`yjq4Y zytZd=0n!Q8T9X$SDK@Qcb_1_nY;`j@2Zx@EwVMM<1;5hIRgY$M&-%`! zQ&}O2gfI7Kcy$T#ys1b^fL&2HUjK$NJs516oMv@O8Wh##v9`0QUC-BCxPps`B^T?a z&6RQ5qpGo&F*$F~Evn^c;+S5^SD8B3t8?nQ70{HhV(V$T&xqG+4h@Zt>phN7^A;2} zxBA#wnIisD9~!ptHiQz4l|C_^5wJd`?ke*hCa+DV=L7M5ej=;7Scy;22(p`T;8SiP z#}(ExM*cngkw)vIgy)N1$<^e!K;EQRhsqnTv95x z5BUb#zfHM&g3QzY64RrFfm1D{A44zfIdpaRW<>%+%Gzr;zFo!@7~l$Y6wI^2)3i`ghz-}? z0wU#g_fyUy7Gv`a6{+;aNP@l)p-kdNLIZ{(id9Hwx_8)Kx&;9|B$n^Rz>{2+b_YMW_@dksjS<};e8#ZIpkRbR z-he)R0Vkf}=>4aC#!#PW@y=4fN~@>){p#PRJQ(FPqd+c+<7ldu95aLpDk~xhtd2j> zgvJQ3j|MX4s~shu9QhJju@DXORbq(8$#Br+3jGMO=GW(oIs^s zX}7w={+D{vIxH?HgTutt?9~(B4}gFK^tVnHg6s zCNv*nYq7!yn$t-t_R(x}mN)9iM|8Gd8f#8}p!fu1twBwLgc^eqll#H(N51l${V2#H z$QFsPdecjxHNmg_^Iy`OdX0{!z_8$|D|oVep{24IDW1{y^&;Q10(_Or{b zFRHPh55A8bzaZdeHKhkm*;Z{BDm>Fs7?@#iIX5_Q8#M2cheD^%ge%MKh_31sijkZS z_1L%YbG}d`H-+14V*%*hx;9+ZxK%~*avC`fQ9{Bp;Xex|fDp`;${{InlQ5Sz1Y<_rma7)vRE9Nt*3qva?Tp2-Ro91toA@L zhQBb6`wWN~DwL6x=KwX#ImiXZQi)^@6z&X%2b#LV+cX7}_xhE#YZl<-Vr!}DJiOm( zC5nD6(brBKi1@61EtV@7ta}}A*G(pjYjg=w@hBbPN(J!J$GU@ma2l{0Nv)a5su6x| zqPWM*-A^8rvx%QeM2)#+v zYJkif=Oc^{M{TsCiVU6OU^D%}&i$wQBcN$~O^$L{D0vof5-N6H5edX-$gmzSn-d)c z6X)`O&Fu7(Q?)Kt$UE$&p@3i!Zg&+O|5HW;wrlZJrD^B8O%UFw+xyz}{f@p>^G;Ui znByo{`XWvHqc}AUn-;5(wm03$Rbz$iQ$K!oVEYrZ*tjBqUgjbAm-q$PYG~HeAguOy z?N~)IsJiw2BK(!X*J-+Yo?KrjLb{AWgBl$4pt&XUZEgW2rBZUK!k^!0rrOd?)R$rD zdt`JE-OoxMV%9S$iSQ0p_oamzlI|)Hlc}?xUlbczPb>kE1u>2C(V%P0L{&)MWl6a@ zJ%pAuVF{65<$|V4fv1aHlxPz*|N91BBKi~pHU!Ak(;wa13)5wsl2-9T7Sqmcf))12Ci7yl!j7~gmJ0(h4-+oRu(DycLoaL*BePOX zXu|F5r8*yoE(oTP#kfDr{661r(!Uaq!qsN>y^Ky)5pQusA7vix7Lz8F*UIF!%kyCJ>kYb#6Pt zKeSgXk%S#M8*n2QZ)b(qq6u&4P1Z3nkBvQ~F0M2bQX`jN7(t&vc&ik=H68aA5x%Iq zK4wi+=RG(2|FbMASYbG21M)>(VqFSk-W*jM2hz7oGpz<186+|@iVf=YnlrJ?)f%s3 z0twBX8&QeKOSWhIEw^>4z(2#E<$%uYQM8XeDyD39bl{%vJ*tYdb#9T6AT~+UMdeW##%bB3PA3d!5*-%icB!@T={^q3B zVExZl4q9qQ7m?9jDCpTJT`o<(nwwZHk^W!ZV|mj+q^<0FA$XPhVQ!OsZ)R=XM9so`qOQV`vyp8HQHPAI%hoi05XyZ6h^*t9*vjqpdXtC6{h z)qK#G<#37m03##y%Xk^EKf?IQiMxrkoubUZ@pFUI$!iX{^cNX{ zbsDzBf}k%;iQ1{P@W0*!CrDW5+AuCpEC9qPuSQe{K+~G|WS3j1O{$q7LbfSw3!cnU z)%m*0IQu183p3*J=ES6=UcL~^G~9kq*||E-ZT4JOaL4$4$*k;J*iR`0xwuLB%K~y> znrI1YN4i60N$-p>#3XyhF(FiYZ|c=q8GUGu6Ez)g01ZIco6wIyVG&_+%-bVo{NSLZ zn{lZx@vrIrCM2g1gc)T1-{(}}E2-X(J9vp<5!a5aPLrEH&Fhi9y$?6`hk<@@q#jqa zTA~e3I2})A4g&HH_+t;+a!>xK3g@X}F%Htv0bdQcWy;YeYwN;7|Pm>gZqZ zb`HxMA6J@c(+D}Nm$vY9%WLY=FS}0Ja4s5>nz6b?4T8VG*iVeMueO zm(sSry7?sVa_xjByJ)ftYu&UzMl%s$Fq-gDo3H_5*i(M;6OiXH%K2C%DaUYeuv~vc zd3^s>cUA`Pb-f~pv;mJUU^VG^V?Bs?($``W2u7T@+W%9sAywP5Scyj^z?L}>tG&A+ z=F@OBIb>ZAy&#;dIiiZxz6guB-!-)zoE_c&;V zrVAWowlI{S=QZFS&wEdt+sg@@&xt>$pcjQd8DGlqzQg3coz^0P$Vc347)9cJvnzoq z@s0>-bC}D3ktc=mrlKVf%VYPRyUp7sboZ4GalmL<2h^Tz%TpvO}TTM zTIKikC+L==t?h1mpT+08+Tk!YH|Hpl)W_w|oxXOX>C%W#+l_c%#hW9?kHq6CBvIGx z9J1J_>%4zTXdm#Fj+E~D^x0*}?y;m=H-qi4mh^RR_&eRQ1-JZ(93BM$1?lPI8z5}Hrb-N-(=jNMde@Ar?z0<;hdIn79EPPNm>J03#9V^v8eLyTIF``w zopyDe5_6(BMe~Y_Ufb=ktiSMA2H`;>+b&)4%{4urz|`?r2@ANo7k!6{GlyEM{OdM8_sh{ju*n36kX5(KXVke7Sw6X$wA&OU zachp^=rNqo9xPe@B9~L0Jo}+kE*q+aLjljr3ai=%;x|ftAlMt$J)8jS3olq${W4uw zdZEGlu6XU05av(?tE-LRoVK>B|FISUU|339_Dx>$KNTuV8Wea#pQE}h%R#)m3-?dAa(umNGm>)r zlnR6Tdrjq=c(;6a2+Cs8;enwX3zZdCZp6gI={Tb)xBAipGR<#s&F;y(e|1y$&@*UK zrd33i{yl6}3p`B7)vY=QDde!*SZjo$##fdu@m?gTCY(<&Owy>LwDkN^+Ci1cCbAiw!LTNmM*`(`%iRahs4-6FMIZ69_;6G+=4+QOsO(H1Fj4nkfmFW33QEc$_BiE~0> zE-NnbAyC+#WBDthe$uC41Pddi5~JyU{lWCq+qA+Qid~$P&83~3y8WpF)X)8M7IKH8 z4e7GN#%gg=<`u+f8dddq^Vjy)_0O|8Z@%zI)J@M;1g0ZS5;$Za$1J99%!WX7#<$Qc zEWoRzoACdJeamhs9K~W zY2ELQ++J~jc8dW9O+L_&2g0#yYEl<6M(U?8uPxu=emzmg{!Z{zdN_*%yIKv1k6n_~ zHh#P#R4G;rLk3&+@I!J$gzSmj?tyuj3*O7Ax4U(9(kPXN^Qe%$t_NHB8Qqr)YEqx( z&f5d36|YmmiZ-_Oa-N$x2afGI1?+(zo2+L~77avOux_8GF*+5r5UnfUrM42q_0rz( z6*=YQMod2BHj7?c1+o;^7dSsWi#7{9<%4?j8R^BZB2Ba8)KyEygC&c$(eb(Ek@BfT zi?~0CZNp`H!L%&w)t&dnFA`pd-Bgdu!0i#;bf1aIX$y&VKR~Biv2&Jz5>v(&q5Ry^ z`AKS`Zr_i*dHJphl{y&<$YT^yi-xSH&3t4maB}T1HA?^38DT9}sqzS2DY3K{=oOWJ zz;l1Yk?yLmS*qAR6Rn~nvv;zSGI4{Yi+ofovf~< zqaNDzRrhY~+9fL^#twtWx%jU#bM^Z#Lh9{BoYMhDW1Y9*{7YrwB9?vuaHKv|;AuLS zX2u>fJ|BJ6jlAB#0W{h)e@1jxfRuptroU;KsUkKHKKT6SeoXI!LxLnZaH-4OC1 zW!^2j#gGe!PdN5!&UG66i}#2!?;0l))D-!$Qq(v0s2B2=yGJ^8@VG_F}wY2Rz#PDwM#X}5yrt9C+ts&!;yH};qp#fl!T1KKipY$n2#3S_(=ifGEyU!F8vbbf9HxVi>W} zX?(W=u0d}sq~qXl;pD|Z`2I0vNh@<$k9vX6A6dlzkEH$VweogFUsYxI@nzvUmBk@{ z91c#NmacHlL+EH(J)6ckoGQT##GHJ?OoeY4M&Q9z2u{S87VL%zZs8EE%v6TYPpf`B zb%S!7eid)iM`-ADdQMN%A7iwvu#fUtcB?S`|9{f$K0oyWibJmdRn|yjQNK-3o19}` z9LX!XGe9u1>qFQ)9@Wok$9++Q1eet7|K-U^o=X*zqUaXBDTs5_GcU-Xnf{R@^T9=b zZeH~u9GyYmt>>XPHOSd6Sv-AtIgIh7r=<#qRP+ zjm!M&38xHq8hI>Pw0$ZTX~f8xQAi$ya=Vh{SG+E$ybQcd|2K0()CmF^v(cU_F;&tA zOQfo+b31ocGawU{(02xf;W1dFRW;_nxals{6_n&Qi3C}_2SMpkVyWB7gukhdfhc8y z=mB@j^dX>s3cO;epS&wmQkk2FJj6r&QO3fnV+!qi>n3Yuxq7Wsu!g?s7wp~#+n{Tw zW_?tz>u!g{(D?EPshiIZ?!v z_*`Znt5mujUf3U5r9S7i5aXG_*#-Q>e@_36gx{fD8J%@&Y|W8CDjm+*NG|_pu<)DU z*Y}m!H$F3j+08IiY(kU$3X~^|tB6;uPG{9*Cy13YJ6>Ry?HS@;=&U}`4GGpd)<2gv zp!ohRa9|RdaA#d1xjt5%p2<<#JUr}c7`@pBcAWok{AUIeKuL(wyV0*D}|4sE6_G$CKt~65tNiyV=AzgPwvvb1xOpWpGdMw)C1cKXZ zy_PmO+#yrwU;3o(%>q!au6%zZIA%#qoZ}Sou>a6(!vUA_bo6o8!pIr#KM<5e%hFK{ zuKkzS(f<)-mA7pb`>nsWJ%?3>^3gR33HX0l1UlbPXE0iTMSnGqOx{{FW4;d`Qn#l%CFBlF6o3e&32Aw*Q+7#a8NRf^%v zJ|hv;_hOMz32@+?WC2{J>DqAwJA)P3my8{oxPO8((l`I59d41)mlqlsI&FV&uLxcK zxxqj$QK0ivrXtvsrT)z;XQn+GZi7lSgPb(n$RgUz*ZkTei4lyS=6)XALgV zKFB-TU7xoF?z&=%L-+F6|1Ol@0s+NfqyzQ`2|iX~{ZT)vY#m=F&}#JD{iro0gi4r* zxN5OgG>lcHk`WOQ??F_k+gt8fxERnovgW4}YIXRFg%6e~&THrD$2p`C_*``L6;n%V zaZs``R@|ig2d9p-B{60A>O-M31&5*$5U087=Ahs~Yf65$YMA;#@$|^FI0b|IgHFEb ze&xn|6Uo`1cMqDdk1y301}w~*z^1Ky=Je>aozKOgSq51iiKoEjDU?n%RtMvcj+9Uw zD4iVO26wDxBL2^MIou@jDYz~bL=XgHz}wyupD$^^Q&pClRJBh`vECA(Y+$~*?oV=! zrNmGF0VAG08tPhB9Wqi^-!mDmG&>5#mU&fr)gJwFJw4NF5OGbXmoWnT?&bEMF|svP zAWR+ad!*}Hn~a{ZnZdBaM}Q;E4=P7h*bA~@EC(I1*iZBCjcokRNqV{z>LZzx|7p#T z(Zif$gp-g1 z8#jdB-48M_fXaZhqO^EPCt9_7AV#OjCWlvqNART72*?E&ow8^%6Xr`YG=?#}vWJPm zw05%0xqu~x`6oe89ElHTG?mL?F`*{0)7~TKbT-bjx8~Vgk7At5acdS-B2gIYBko>U*DT&Xj%NnZf~X8|ut|A@p?{iQD2X*;)rl zLn_zpu=l?nhASP2*-QN>%CU71{}}X5NHr~_x4n1auC+<%eJT#A<(*-fB&q*VCq?hy ziqut8R#w*1!iOxz^SP{ZrnD( z2g5e?3Dr}vw}uh7{na?MZoM~7)Tw&;O=YIRJEc@%?W}eUab# zDV3MZ$&#$z8}GGyg%M@|rQU|)zER(x{~kSz?XkVCs781gvZ>15O~bhvO?+kqbWC8$ z$N$t**~MZqe0Mb^_kTN6lMP@)P5-xe&-azx7iYZt^#|p#7Makxy~5wciE`i&rE`6B06=g#~52P^isB%ijF2khjiD1uK93N^d)asquL{$fb`b(N6BAlmvRRe3SV}VSdZPhPiw|T8 zf{5E2s0v*)O1;9Ff8*wyfTdlXb9OYn_^_s*_!zq#FGdN+NWVYKHSSnZ%QiOc<-A2} zqr3T1Ie(NPIrw3j@a0edQ|_G`5D?KG8X)`A=nubWZ8E^FyC~GaX@F9#slnh>XN2xj zRS>I5_W9}F!KPt5CMRl6*NC2o**jPD%VvL@4XJsm@mvn$v3OmT6yaL06$f{<> zM1br3uZXv+o@!{+nb|n*)-N;Y-mg@whInCI%G-F$1v$dCZc8L`^R|Xl?dG;NKIVKka^0{e{(~ znv7=W`$N6RwFe{MV5H8(q=+Z;^uxmM?=7(>SRp0D?(Dn4a_j%)-*O^0DT6diCeKrQh6O#_%IgY20aw4+spX^tZ0vF1r z^j!OW|NBAq{LLDk`>wX1Q`bLyUQn{g^Y&2J9e7^1a66Fbzu3Y#&eWHg2+1ngeU}H| z#NjxaCXDv-UroyWIAcq~^S^(F0_b{5ax>Vrf7AxKZ+)}FS{Pa8L{b(0%VL$0nn{lP zB?7O9vKA~4wbVJJZXu+O2f7b!K9YK39^YM{$-{Ab-^zB(wacdsk4I0d`_qqQmL%4M z3^kPp(Za@ z)^)d0%VEL$dDMXNe}`7U`tz5NX{V2;VaEcbO{l?M!Y*FdIDY@5Ma0af^`wJt=p?GL zo8LVfzDvEBd=9IQlDZx*%Y+xr@8jvLybV4U)%M#5CbR_aL0#4FbI;J)64kDfqdnh!0>CMr4|*{EE%FeE4@J5y&H!C#|*a=-ZPnOTh14EU%^-v zG>TN&9}-zoHAnb8Dfrj9Jmd9rFsQ>$zn9(qs9p!b8dwIl8~azBOB5O1Ez-`Z&v#F@ z2RTub(7Z8iowi`U@9)Y^H5AOor0Tz%B{DV^HXZ8w-18FpUaj6q>r^hRk3}i)zo*(y zY6oIJ(Nr}hw(5F4&b3Kq-`oN=Juffg1lfF)5^CEYWc@%3JDj0r`)Bd}g>}E- z4NiS`$kj>?(%^e)8wC%~w)y$H@~(&9^)bv;tboVmdGktbli3BoJOA(1m`~1& zT|xqm7BRd-rh)0{45l*($0!f-cLe{@z#B z-TbEsjMsZ}Q^8RO8_XZEBh~Nwgdg=wNDq(ix+b3oYxW&}Rl@vpDr5MMeBsb0^ANAy zNa^_Ly2j(|i17(Vq#2|Q%5=Imb{B$w{?*rKtP+TZqAeO&-!Htd6U$)N;0<%+;{~o& zrhjo;f?%Cm5E@db1%(T5QWJKAXuQqfB@zFDf}oAmu4&=NmNVpyRGTl}3SpYlWw*oo znS^&C9x{S{h4Si~_ezOYZ|1FX47xez?2mpp){zembIjL58~a3f4zasogc9Rb(aa}- z2K^ZB%dU^a;HS=7raDD6jxJB+HB+V8Zoz>e&FaiawiC?51R?y_`WcZpdsGdbWL4A{ znTXsz@Z|w|t=f%cHT>7XCd!z6`$Yir$&dOX{&6RMiHm0UJdpv3Ad|Qc;TS*$i>K8O zBS4oAT8SAgj7wbu8lR5BWW7xY$q^a~_Gklr4>B#TbMV1;EdY5=i8IHQG#I=T^s%;Q z@_+t`Um-sIB@DIRSF>|cLJ8Hpgy}tK+Z?Jk|uW7 zAMC_=a7=|vJzK2&YZ0tx=jM52m;yLB3&K}ofNxIbiXJD}!;TU$IFLJ0#)l)<8iXup zyc^WC8q(aUQ|=NE+j*xPob21gLjhfbhmO&(hLs~h1`5hsHir!m8$>)JgdB8sp-z_3 zoc=SM*lvZ+T>%vc$1}285rrm&QYaEcj!N}&g_@Par1N|wj>aqLhbCeCTIf81$ckcR zyk3qPSVmNgk>GTA;CW;`LHH@5N1NbscwQbkUsddeMJSre@||h`nfL0+$awBvl#Xho ze{gkhW^`J}4%r*e31Hh?!8i)6Y8Rsc^T5l6f*;Zey>QkQQRej}IeW4LrN*(2O%6k; zsFVsJ_V{duzntFZ>qTi9C=-5asgd-rmd~s9t}~ijOZuKB6&DH$w6{OEkyS3p3f00{ z(yT`RNeOSBa()PV^=~!ML;&N>0~un_7y-7>mDEM|TXs=dq36|?Eo-v8Atc*1OFEKD zqYn3W(2lMibF-s-_x3WuiKQ6Ej$ zCu{*45k9x<`tfbS=?sBm6FGA?Ph+`du6F+8IC>>bu_?y&G#fZGwcTrdccyGl?x=C% zV={^vzmB|7N)~F}y?-GI-r@F+AW4MN)&{5<5+R1;?VEIi|4o$F@1*Z}if_{sMlAN< z+Q>?{b2sm4BGRbY%mA47r?06}giSRS@KUmu^NZ?{Y}yu#_D=+{ZIKedPYl&Mw`@3s zsH%ps@3y(AiP)Y*wvw#ThW30>@kdy?NRz!LAeYl`+1WXr;m2Z-VV~09m-_k+y&n|h zdxC%S+egSHj)G5J-BJkr?sjb5)~^?kdZOQltZSa5C|Tui^$D~W8K0z-C-ER8(Y_<# zH1E`YbM+f86XbTAw16{82Riu-%eQo1|C2j927lNDKONtl>_p!?dzwd8eD=~W(Ff%# zCNFh9?u?Vkx~-WF`+>hyw>%hf+VKM^C4Jc48D8Axj#8{^GGBTR@k4T632Ko{J@vY$ zD>Ug+F$`QM$kV!k=mc%Ay*d}3(4H7P>fq=jj=i*+@$378xf35hist`BYss#&2nd~o zDwV2fp5EVzebqnG2K=XH>5lYMwpBaJ5zBzK8k@bfy5xKHBA@SS<^=fnW>=S&Zlt+L zSOX9BMxDuqQ(_pB*J9h5W}Ucr#k<4$xwTo76>HCvcDpq3{o2lnMIF%H_;XX<+~>;8 zD!EIS+44ZEzV8c_8DQlRV~8>l z$(N?kXL=XTDU;cco@2Xhr&WT`xI7Pa>CD*u)*dJ5<-JxF)?X#@+vRLUL9aK)`qnK_M$ad5vcB@N)<{wDWtalD`nb?G zj{k0(lJzhtTmR6&WK927xl7(6brav@Vw3e)9a0Lb37`%JJdD=#84gApWKEyPT&m!5XK%-o4!sqVTJ~yYd zeB4xZoR%kX7v(5+!&F{fqkbOs6qm4hL>}N^)L5BnskN050rNw_7&n$ng4LMYs?TMt z6U)xTA#6-YiYHv&ub2eN@;0G?eSO ziW(l|c@^9mZ(Xi_Pw|H)RH!79N225!JinOM3H{IHz{Bn??}KZ)NcyE@iPA6ylQsPS zFo)-qFHgyb^8XAIX5%)U<>3^hbEk0tgQad0Nv32jNpn-?Q^!A%u3^n@d7(bK!@9ig z@wsXR4rSLNjJ=DnNg|0|l+|f9o($NT%U$o$8YLAPmVa1BGvcS|_=G(Vs(^q<`hyzo zv2hC5G08@$+;0-3KTVU(e_kFR%YJJJ<`WVuDC=U<#XE24KjMbdA@tvWEGe5>c(cZ0Ww6(g3oi>pI_4Q7 z8Gx{HYa16{^79LQQGM}!C?(Ryq#j^`#+n^IcY=!O_J0(_TUMo2-0%TY5XBmIm1?;C z;2oA(gR%QMu~9FElEru>TBcFRi-f!0vUtp+zK&d$bheF}p>?h{jFGCG0Fy&Frcr|S zkU4P_)fXfb^O;cY`~H`+K6S8-=Gc@0G9gbHN5*{0frdT< znPhtfUD`;Gq8!b)vbRu$WQ+|iI?BGK7t*E0JcOINr2A1d`G_iLZSFnkv&g!8H6lwa z1qS8n#1ZFo0vT_5h#w~H)tNi~rDqGXSZ-V)dOtg3{~s1WL6z_XV+OY+Vl0P-8I~^8 zsTAImF8?jppu9>rFm|z(v@xH~@?On8ZFgD&5+>_cvphZXs3!M8fKgF6w)3FQk?~!O zYKo|fSkVX0H@pno$Raa4uiq>>Z~cJg3$hTc6h>JQV)|`uN|Qg7nA6M%D2Js&LwRte z>XkJ!SbES<=n)@ozPjqqB{zV6enguCNT8*kl*zEq8m_O#ajW{znB`Ywqpldg%LEjA ze>JtjkS_8s;?=o{UCmuwA_LnpmWLfc$X?1^bb#2r)uOWa~65Y7i}&zpR4 zn7Fm-f2tNlFe6aE4Vq{%3ZV7dQc#QLQxjxLya1Nw?^Ls{UUJun8oi$ z@~up0F?eg(9gErP$gH-T(o+_Dul8G07rlrz1Jo3Og@@EVP4v~{@SH!1gs18wtN&$M z9gOZx)Q=++S6Y;Id}~u$vwio+)x;A2k=FpRT;y7QSQbE%tmWzOfEDn?yY{zS?9Xck zSCD-r#i3P|=1i|Pz6jASI|T8?K&r-|F=q;0_m8l;h|d$Wrt^7w`K{Jr2&IllBBL{N z%3e(B-+pE(vDy8M)M65M5RtlrP^UNAdWvvbm3-1pfBAq+_52ZD)9!9+GVfWojVvrQ zwf3~CV2G{q;Y2HNAMR|*=aAjs{B=JJGjntdLiizeo=tg|59{>dJTcK9dEpm zelxTLUQ{C?Ns^eM4IEQ9-~5a{D_xmh7E}`yn8@D&@rrv2UXy!tG5Y~{H%br0Nn1Bq z2sDHd%aH8Ln=;PP-qrD#JM-VdGv@cJH_iwrdaT1Qk0HNx?-I~M;T|13m_|qD z8#iK5M_U=aZ95bWEsNN(*twR1tB@$!WokQAP4`eaH@Qej;22T+ZF)H1PSkzBUOYmM z#VW?49V_%{p%JH5O%}c*?HtY<9-w_MupjR~0VHFrnx7)w!5~}*agfAL*z5h&#TNC~ zO}OzQIFLOisA;zo%fJnMQB?9IW<|-3y$`<%@f?a<-hg`omCO$ zR}{vi_@8EBn;%{Zxic6PH1FRi*mJJ$Ec152IO9{c-&TTeUs(BS%aNHxYIz%G7*IEn z4KxHW?o0g#^ghPo9An1(C2CZrX#^!QH7G5-syC0fJje*p&u8Hm20So^!yjCMbk2oR zfIvsbk<@nGrG5-Iui24_lU&?pMx0=7Ky_du+I!(w#%L|UG7^EYeB|<$@snvD!u3J+| zh<$Pr3_o+@Ff0Byy4>A#tpwe4LeJ=g859X7!397&)H^I(ct{W(GW%G)h5r37?-6Uj?~EPCK}+rBREFLI*H+28n*Ea- zV|q<9PIhXxAtbqW>y<-WX@y?uKV%+f;%vE1CCvmQcHdq$D6MT7`Bj{myrq=;X^)mDi|h zoAX$7j@AZz2t^^)lej-&u5HXG?&RB#=KzDKvt0??7$vRNpF8>Pg4U-S?%TEOUU|$i zKFZfoaR;f!R9*x!-8ZZjQFxz;w^~bLnkIOPTSDL^>J5C@uz@o_~62QXPvMJT;%R(#B6~ zD~fKNXzc`Zcu*N^H+5Itg$Fp8eataaqpB2CQ5I(@Y3*3obeXNmBr}=&VCvI&K{;{xFlS!VnzChj-8GZN? z?(jEV3}`z?Dc7pS%1<&0Geg$g6Uox9K~_@5B13F<53`qezdL2i==`6yQTuR~MlZvV zZ(KIbVl)5)BN;t=SAG`#Bljmn#zLHSo=CS%54twwygeH!(Jmof8A>lXRj=p{Go*Qg z)1Jz7!t&L+rwj_b3(g2Qk7OB;%_JDN>O zh-nNGl6-gA*80PP(af0Eg8ZSzkuvs&ac-ZFlMNmBm7T`mF-UJ(X&M$O0@2~7Pgi-!my^P~hs~JGGD2of%-=9FjpxN9SwL6}y1gD%Gq2C^>Bc(^CzWkdM-kT1+ zmqJH4g)Zif!(+Ky9!KaVQfa{ny=Irp881CP22wVeCGt-ZDb+f3_wsDDMlqcGnG{>? zl^RMQqnz&d;_&|rM{HDyq6sDfjIKF**&Ga2cvQJ!P|2*f5ppsJJ*!JX1Y<_QlHDfX zZ4QzY=Px>JG>~s1Y~a%jc_P#-3siWd>n;hq#Ye&}rPdp`j399BS1MEmY8<$H6nMdU8y?@FrW1yKJ=LOy!ouhspiRQ-CEbM{|++1NXH`R0f#uB-_gkdcbUAb!JQ$D`dRV=|RR3<)m{QR&B>Q4=aL!qyFAK z!+0}Yz7#Tp#z98rL5^|03CQF}G=B%4+9%XOc$&%k-PFF()YCvn2;vzz~6p%Ip9uv~w%8dWWDeU@lD zUrACbRPWyOm~?SZ?4`oc=#P$cw)R}x=mA3ajV&BlUy~%rXnAHP8-|hYr--Tpqw3Wx z=}s0#$xD~+{|Fb6Qi=*!1HHJD8^iYh-a8YsmG}{#CR1QRw4>d(9%=Sr4^-0N#$vIG zoiRiF#1ikm>~9g-*d_ihdQA^eR)`DqR&I$%^i;;;<>u@|bNTI<(eBxqV!TOKXMLBJ zr)eV?2MV1Gk$#xa_ld>qrLdqW`GhECm)O$GAb>3(?f_%|m-E58?YxFsQQ4odW3IO% zj=0dv^jJKEeLtnrLkFuDI`HV{D~bfYx!L6vl`hdjWZ$1C8Gj60yUy2WA6;$^^x0<& z%zkGv*kcXMg9QIjATF#6!fwZ2EN5q49rHK!noa&RzDwi#|4JkOUxWHe5{UMyh)=Uk zV^wjmzM|EW`$L0st)&71z-b|J$TUb37YSa5Fy=W~qTdrLAltY2sPAdfb-tK7Wk{_$ z4`~9ypW<1_Zav69%GkkS0H8^>vbufg`l+0*hPUp%^lm?{-8#6)d)9HfC?)GKC&c|M zdC~Uyj@=OZa;yk-SKZ6tCXGcTC|pm4H@rZju2!$oqVK&hZ1Bu`-npT`Ya8hSiBjnH zu$pw3lkp^1{b)s*<5He&UV1JLm2>)3dLFYjr?lby*%b%M#aVy8^?IjK(&#|YdRQe%D3W&3!l;;i<)x)kL32ySMy&~uvs zjZqwL|FTQO5rFXg9a0g@EJQr1>uzfZ!t%Gof_z7jg8EN=g@^CP?4&M4Lz){Mg~?mUm%^i-@U8z&>Ncs?hgm+PqplT3RI^eGR*eJPgjY0rd=E5 zXP(nUty9|ufxDj+>^(yq{qEf7dXeFpQUc2^;Q*xaI z-skPEhztvp`Q)+;PaBZ*MMr6>ipd1{l!woj-qFX#Ua>&p&EDkfmEZ20Ugw)7g8ugl zfR?Qf1wvv}>Hk#UQ^!8;M%M`K+LsdYima5Q>5@CVyp-48(l0OSKVk5N@r+2h);?gB zd!~ArY=H7hI0AbynEG5UcfF2z1nKZiZjt=gIq7WAlc1#R{k-7}Yq5g3*B%3f7sH`0 zB*8ReOl~xauqtQ4=ckZ=bLP3PnZQ{5Z1;v0R)N;HKtZ}Iug{$~$8NomK9!jdn?*{( zb+@gpoZOebUp)KdX6Syb58(xrogZ42u#gZ`Q2TSUm&T!(`qAN zGe#APcv53l;G)w&fZcP{meW8FSF}}9Q6bG^H$u?)rc36W9N>F3^E$`!wxb>s_SeMr zo;lXk_m}g&{q4CR$>kTqrH>Yf#($>=XkyR8AL?;3ZKZxcLh~*?m(>`c~~1N>M?mcrl0Plt*v49wiWy(w}{!eC_Odo4vD~D^~P0I*DSG z^Y)ref{fCR-`fTz1C8N2M@y9d-DYL@*v1&(iWyPECtZd)v`A4&n2Kc)Rf4ogX{>D5 zb%H#jiaZv+Z<|nrlWL>=Xx-1o58i}+iv%xWgpO?MOxoW<=4C|b8Pomm@}QcVQ4G~%Wm=a#jzf$e zznGv3VJ{ocIW&xi>pC97<#V#G#-pa>#hjqpyY6OXQD(>^*;Hu7dA zxH@J$FjqM49<__V&5qC~;wP6kxmB9`_=0CtuXc#wd4Dm({$cA!j>-L9AFgNCF(1aq zud>MUd|57t<$k#`i4(XP7W}v_W9)h3>GL_Ye&rM}RkgTxK z2XfBbFlnJ)=cGf}^U3JeaO$olNHh2;e|qj7ACA>;y1dBN_^#;d;MLBU<5s06LV5M` zq?_J66~s;Cwv@7N9c~U&UUVFmc6Xh;JY4!88HGJm%MNbY1@F!s!Z%#rNHn9v&2a+ihSJnqbTkbbsSlT+yFHRpetf zbzlt);oLBNryb7LIuu6|xWbZq?RWodYG|ULxc4-jz0-T#{5N*^f`sF8WF&cizP8>1 zsM!g6ebwsNU#RKQ0D6hSfxj+7$ac3K$6hN`@BAyt#qK^hf%e)oY(QY>xse&J>3F?> zQp5OgeTy+#Zzx{=tdk_vwwCG<@dd{DIo(JPNGtKDR#3o~Jvb z39Fi>N&Y(q4{K>nVEoEehyx;7X>uL3nnd37uS09BW-cic2-x6ztp6#{t0&_lB-9~^ zQuVO0IY=qO_#PNO^dEHiU(|IG3&{kV`_T72GKD2)1^$n4=MRr(dCsi|VB#=F)?N4O zg<@8FogjDDuT|rDlmgMS)lj|7z?%VoIR`xN@j6OGoK>o}ku*26$leOo93*=8@txsC z&b>VOvAS~C`G++-=06taVMcE|VT6dA&4jCKZWF==ve*&-F!u3g$b zi1K~=6Cgwp;TJUKvi}A}`TmtCT$39@Bcq9xVd=KBv;YE=`T@lVsdf6s;h7SS8r~9D z5@vEY2Nhb*Ch5CtSzKhaZnI$YFFKQ{=FU3GkNo9QLoH>jvwSQr>M}#S-dkH!3=@GH zlpkUIwDQ(A3r2%Etut!RZl+}{DiOY1$Krm?BaVRs_oFlxl)mm8*6&-otE;nhM@^g- zHUfWhK1Old`nLJqH-sUmB28yZCR2TOvvg8htkrm|!fc7el?7>ao>r*T-H1z0PvEK- zxSdW>90nL7bBz+1VfG>Fz!`0yt4AZVHAE}22bcSh_(>^O=r!9s_9U$3(%yb&CJn4n zm~p~w7uCUK>(r>}s@ZiezUWXc`o_X$HS(vXuq?r;T)tReGDv1a@`?%7X1Yme^fELN z3AS;Bsn!A~`)9NYkNaG`g#xAEpg6Vyi4=d6Y{JqWdChCk5-Lx$==;WGijO3O|6YDL zU8l2P426otwdvWbKxo;d!Kh!}MLHg|G+q$bVxDj+r5MUZSQmFB?b0@S00{8vO zYoO^0U5O`8H;MMCqP7^SB6e!1!v&Rr#XGiAU9cr(R7S1(4mv^y%FNj^Ed1WS192AZ z&yx(-fO3aQT|>+cs>pbuo!xJEb(~MD}1y zeml^0?6m1ubI6f1e*=y!x=d7@x1QP=2tH5lQ4%(A>af;#O|-0c8W^l_BLyztSw_b^ z*Xrf+y&V-`7jW1P@!`p0&X^Oslv_4C?1+pbh{B*qsiW*-h-8sNX#qv08%$P6s`bix z#YyMcfoC3VRLY)dOS+RZKw9*{(c~9B^~A?I*u?EKW8h!5YR1Kl`;mfW;abU<6PwrK zZ5lMJHH3HZC2b9?;ReC%u74Oh`36M4iLXeDwp$zK#%V?n19>F4q<_uS+14oeZ0Gfm zR)EiP3-8tfp=bbN^bG~8GG^$evfWyL<}@EekS;B{3Pt zq6TySrSCJa%5EE1QfIf>tHUx{x;Apfmkch+G0cd-7|d?i9Lm&mto?a`#~1cbJPz6- zjS;2m`9C8>u1a`@wMQjJToOX!@OYOq-fEZ5NEc|`jO*A_FdzT|iRc6c5;>=lnNL29wEZ zTNAi$u1KQHo!I`#W@GvHRcnrWYb$MwzF1DLN1q>JnJfM)2m-ip?CPp zJy0J!3byi9_*>qu^iQn4t|ofZ7d4%1^s#|il>6IHbB5eEm3SaWMovAy)gHq0lshRD zR&jFTE@NguQwodjv$)P$ZbhUX!iKdXmtAoq&V(c7So4wk92yElrxM&M*uv1UGu9xK zjSqpahK3&F-i{X?lkt7O);Y{_++gU@^4QwFT-A*YUSqKHW)zQ zmz6nvoK)0|Q7NR9+Y7?5s%x`-RzR|3-F&a)Hx_dC-&4bLoj;m7Yz+UNB4TnF#n>IF z1<NX7_8qF5(|tyRu2k;7p7*mQswRc53+rE1QEM)ghunj`TM1&9bEm-t5s2P zRBNcrLK54a(_hQ3hTcY=%3)De$7J(DOKXwIXkA%bHks#q$ztbKQ1~8y{h*>ZUDh@Z zn&_Hme_u+RlRqgJ1^&HVA9!G8qzuGzoz?rp{+;X2noE@OytRV zdL{4~i>Y2vP0Vnk*LP~`OmG9ICH?LZCD0lF^jz%LiZ|+NmT`a{n z>HF>v>e#ZMWaZ{IZ6NYa*i9Um%cmOLNh+6MyOjiCD5(7~98_92-la!__ohl#*cBpZ zwc~cpr$ahC-63EVgAX~+KWODwSW_`*d$z7_8<*~WKI<>zwPD>IsguNW-c?-0ZG5of zJ-gYFY#_E-B5b=2ki@fulr0u~|EqzE$bT@xjBD3CsKI-|aa{yZX6CEi`tz;pRcEEM z>k*6FW<#E``zoUzj&su|5#NX3elbbUZS$#?G?wdP8Fdk-{kjw%$>(;1d$fAZd zxKDqZ&J=3P0frZl912{U@QWjI9}~I6(yKID+OHNIl}*>(WAc#rJp@U-@MXK+4>BW0 z^}PG(UoUz9Xzp%}j1u9sH&!Cp)OThu%N_{(1Ik+jP1^s-iZC~xd*RwFqXdrxaqZQL z#s>#D7(!?o!nf+Fp~-~ZlLkGxW9nYr zHYg0|cy2Y)c^u`1ZI`%j+HO(TbT&o!6?In`(<2cglL{(XfJnYQf#$&jl09&WP(FloAadi)$u z{nikipC5fu3|ya`d=z++bH(zOmX;u&P0xY3VQmxw@WaD$lG<~*mod^}JS_wrfuMO( zFvriS0OfQ*`Eb4rG%;GgRZe4H*f~_f|drosdOA2+jF6n zc70s*0AjDuRH1O=$$%Shv9`|kGQzBpJyU}Pj)=OJYl%c}r$M+tqmQLiraqy>T)CSi zHzFH<`kzkanhvv1MKZAoNzSJ{T&3MqjxQe)?n1=Ip!?C7w8@xAa4p`l2BJkU0%;T1 z<#37!29p#1EN~c)W&hqi^s@Eyp7X7ZfQKUrO=7@_97AggewTwbf=+{7Xmxc}`vTtT z<{6ZI2y-MJ4rw0o1aIaqa7er53irI)9?CNh39f`ADUifiY}uwsGf(3+LyQ|vERj}+ zn?H{Ya@dBGDXa(jgq)d}GV3snJ{{)UXkQ{wu6+=jfnD-|I7c$J(7RNNcelO};-ya0 zTGJxhMSo3O+>&I3S01xphP=+|4~_Ym#Tr!^eCHNnN_3V=0NMD17!;u=4of;JG)>K^ zzO-9Ps+pot+xy1%c~L!bfiX|{kXuBv%jat?HDOOMKEXW!RNkuCF;FT}(M88Sk?UXn zKjQzVq>LW?`(OTmhTpMnz;o_+RDa>RR$#&)hGTljQN%va(j)!DPrLd1Htp2De37-t z0F38wC1~z3Ly|bh5q>xAAgYN%bL=4;ltGT0KydVr$3!|G+6OWzgMB3p_BrVa&Oa62(p&#i#Qj zt83Y_o%kRlsta<>m|1YHR=+Jm8O4CCH@PEjV=zsV9RA>1q0@c5Ng#3#G-FJb@jIo{ zpZ@)6D}u4{?J`H}Nso{nGKQe9?=g^$WI0@j{&lk0Ir%kJuWAL}mIdFrjr z(Vwm~**U24po(F4E8`8{CUIBY3Khm^d;IyW-FW@RiD1%2eiJKW;R!x*$^LPf7zg;K z=yKDAk4;=v1%ch9Hd&t3y{N-1_YcKbEClYDf$vA=a170-I#ph-=Qc&0N7}w==}}6H zNlcv1U1!mG3)i(Toh;a`w$Bo|OrBju*=i5F@ejp_I9q=qaI1?CM**`D&29My9X!3F zT|Re2bHB`oF1&79X7S1ksOZD*U4*{po^D+J2jOq5$4zH4D#!BELD#Y|KccQKN53!~ z>4nXo?~Q{-mx3I%x>{}P<_k40Xo+h$ZsJ~SWJB0pnx=G1Z<_a0QaRWI5jC84Zg#O| zKw|gUDQjlHMc;A2iE?A~(w|4p{Q$*>e2*SBMf?|otyO6Mu!r4Cl|b2@DM3a{DVIJ^ z;T=jCT}m4kO}1v}#r_*|7Dzy={ct<7VQ_4egRV~2 zU613g&Y{9i;pgI+7JT5(rt|@m;RrL4*{Z%Of(QY75 z_o6g5Qm(2ep<&fhy44Sfllqga%&%Fu#LBkD2dQD%)g>lBH{6QNUVf0)DS})Vtf>YY z47u<-#yLthd_OH~nTu&L!xn}1%&tgzgi)F@Vb=H9J&>WBUIlxPcA+}(!Ta*#wKUuP zlHGyGiAG5%T6{=z%Tr;yybc{KIh3PH*5x+?`vAnD=rZ|$3G_KXovF=dj zK7x9SC{Qg2Wskz~t&J|qC4PerNv`EZRGK6O(FGSlx=*C=KP^TX2{CXNqaOuk7+)?> zQT`7sph5-}#E*25Nhn`gnmkgY=kCy{)m6?t!ak@w)pSFvvZA&BR7Vxf>Qj9NAUtpY$LylKMEQ1a`A6##w7(&$z3NDVY2Z`D~ zYN0)bPDg3(-pY5-iGl_8g#5x7L>E11GnOlSig2&KcwuTIk&mkT(O<*C?UeZP9kV2U z?hZkVy!#x($alE@@o=>il*r4toM^(<$Z=`v3+Wz`*0m@KIg(Xth(t`GR(mXCrI`0n z@feS69dL8Dgk7O|?l2D8kuWhcDdAb-+GQHUTXP~-x;OVYQiP%#VE{#u*0sbpO!qUN zl?q%9^y<5tlif;Dnv9T#|I0EhmZ}w!bv`2b*${ZrcVFzLC%ZtuPkU~6FUeBNR^9o- zh^{3K88*NSO*NCwMEyl9r0GIfV3_n-iG=N!-sdn;=4u3f3V#c9!NP5mH(Y%jv3tp%p&p) zc0ftWd96Aa`a6=8FtINd9-GpC&tWr4W1Fk<{B z(X7%#O+RU8GUh>>-E8*YN$fq8`add8kH~($nN+`SzcI}mGhWW7& z*+m82;=N_)42J<0S?iTT#2~}}sC(E^`|b6t;Y=zTyv#ll{PuwXVo0*8cq}w8A&?SO z!DTwIrD)%PQ=wzLlUcPN=JfS$!#wluoTal&XT1ou>fsCu#Vb+`ZT{ zQN%Z+Dzc^Y_a`0tXuFQBeZ`_iOP|hMdUN+pskXDtZ;HD}GT0Tqr8bNO>3^TtL|dnb z%&Z$B>DKDPfRdt;yw_O~oGW$zg}nzPs(` z#T!}dEmPH)qh>zA{!!aYhAVEglCPK0dPau9{4Ne7b*wsBv12?1WS?7n5)qR2q5sGA zQy4+c%Z-kf+FHTZ^nLTPSzMPp&N@_m+ng}B&50&7PON%bR35vTMKhT^{*eY9N!{TBy`{G?gX`Kb6y!QE+v}UTB#ImjjaD5Mj1b*Y_JT7}OO}63vi+W_r;5;n4`6ngGWKiu zeY6U{MGUJ+_0_bSC?Hwfkek*m6&n$8#!#KyH}_N4nBOJu89NX9M$-}pDuDW z;B8HGQh7esl~A;_ns|I zDDcYJlm7V+Q(hhcf6M2-*SqFX-~P~#AC5rRHt-M3lVRPgQg-sNd})$Xc3vgT{u z*1x!8I2E(F3_SHH4s`bGY%#jS!ckN)bhMPJzemmGU;z%jzvqyL6AoW~GHh_sNV5!o z!vZL3=KTK{?PUL~EezyU2PY4#MUJuEN~@Pvg!X7%jfqM;Mgw`~*ZLeqs@ƵI{M z80X*KC!cO})%AjI;C3|~!)4ed*^T?w&78Fl8CHJo5eREE+`I8+J1=SlWu9(%A7)q7 zbXcs107DUvLm~~w$JKL^W%M1uv**y`n(k#yG26Ow z=XvR{%lFw^)T#E@m9Ca8iV`(_r_ef@J91j_BG$ScQyD$0Oiw;v|ISu}8TI7k>9 zLxtXLzjqJz`syi}afCbADj}F^X zPLorp%pj^5i_fWN*Jz*h5Gkc(iXy|0y$ZZMrVEqqXDxiiSXd0D-t|X=_DZa(!IJyp#M84vy*<9 zfX9amkJ*?WmOPdFRwSfG3|X>$ehE;DG?cJ{2pcK>s;!*Q z=@qm)NX9zwYEIJ!@GLc?Oq4M?i#g20z;;gj&LPLs-CvX)+A3@g`4WVRs2(VP{wL>~ z^iQM5E;eU!ysy+Cpj1jWu#Lrm_$1j=ufaYI#Ds2TF6$vlEzQ^+Q$1QLsP0@$BzXMD zmMxxB72T&Pd;Ui?Z3cHQ{Ex7juJ!|cZ=s`AL@Q6)wLeBjMJe;q*_!yWJhS7YOYw%YS{9a4S&q52^yw?|TsHD_p2Wlv3$Y+p&p+!F3wsMG_Jz&)?2-o_xMZ zwsf05S%bc~pGEU-AkOKVti0RGQ7(((=#Gh7Z(i1Reoc7Qb?6R;)!|NB&PY&drS(MQ za^M(c%X!?fw!dV4X&tEHKW}VM5O~(qX>Jo`f{YALmYY^ceBdt0_8dY*EiTazh)kJB zJ4xV2F~0}_d_4cS8RlnEuW_f8MbWd)F1EukG!$Q*vt`2^=n?8gz<#dt@ z@%Q{~Z4aIZhVj^a{KBuA31Su;;O=m>z6XDpef@pPDS}ppGT7f)^zNwP*RY&U9?~tT<@PTmdIx?IlcFW6rLB(O+Fu0r085?3tGkLjp^&v z2=^0uoYhh0`1q7$yZIg*TYSH#da;F@`q;Rnl=Hi2E6F)CsKj4yu1ER|{55coJx&&_@~b=&`=f@k0x3*?d>3?6iG&*+F@YrV%S@nB?kUR{~*w?`|q;>>1hKjzcqF z9xu(zpjDbt;{)&|@|go!+CLvpHVsm%z7i09N`my7nzMv*{55}P0bIo2uHAWVM zo)|$W=9nA$_na?Ygow@E^BXK{M_W-9fK(ud#TrjoYW5~J zs_OT#`Fcs^HjgOV!~2MtY2SH#{>T?Bk%H>V019ZGa9{NU)=c$m{|R~=Q1iOC z?|zOw1=!oU*gU4JeQGbQ`yXz&#Z~Fb>s>a5BH`B`IB*)^F}WOJ;8xBZiB%xrbC|{m z1TFro*Xw9*URJW9nn#-wc;p0y0Q#C-ZUS`Ks)@s7EUdSJkg&b`u3cmJ5d}TZI5O?S z^9;Q1UF8H6&SP3u4~7NbwM11J_sJ^$ulBAps0nNfvnXOg5JW&hniP>hM34?yfyfF{ z1?gQ49TFhYQHr3n?7Dyg2?C4KivdC)0Ux0Vq(})ZG>HL3Is%eNcfy}liH;7vvO<+ z(9zrlrV2@*znxjywN{iXvbf9Q1#pPAlz9?r*q>jO-9mv_9}0$ymK&^s*;@3#rfu<( z-hw#2kidm3zg+lK0N=-fu=j4Z{rc=Lgh`p0utn2RrRrkA!WGN}KUbZJ|D=@lm4P?5 z4)*aIN#xG5#FM)(+UdmU`8S8S#2V2RW#?5qjTX!Ce51m3Z(l!0BK>t>6>7YT$0Mxp z&mwY_*^%TgqSadA`w~3j&Ey(ooEVd~sf*r1me;hg=kgZglLiaIKX=;c`-;_)liK!` z;3UF9$tIbY@=iWl#nL4H(Zt_e{eC&f#fiAta0N1sW9H^ucLR%=p!=FBR7U1#0e&DbeY%QN9Or0fM$##`5RrPlpt(> zq!wc;f;H(Q=7^H8k1crUx4mGS+*LaOGbxrlx+Xi2oR$1LPuXaL%Ysw5JH!1|)8!6! zl*bEQjZD5f(Z)rp2>AOCzS_Od@5s{2sv@b>YXQcH_ukSb4qHpq1xAPnXW-ewUcQN_ z>Fi=SbD1kHl7-Ja!ebw>7dB!{ye`;Tn5a^G+OI=SbQ#-c4u3K<+nyO0dEGD|9o`pS zJk!0!`9As3myqXwQ^Z|%_>ur@mt|B}b(pPu0q|Z8{Hru@)bOASaL2X6BY7=Jb!W2C z=3JO#5zQ0xe>k_JLOcW1!^~($CUZJMpdp~yKdO#I={E= zJj96sI9?9&FfM{MGKj22#mC0ks~(W|VUtYp7}IR(Ib#unCy8|z<&PPb&1AW3OgM{Z zWv{k_i!X|}l@k9EjlhV>`^?!(A*7BJIm2h)W+^z9?q@aK>rimQ!Z`RQeD0B56XFl(x(cD2%08D$%xwe!ZCf|+ zg3POg`|r9Q=TWjwFFmwdL&HW}*QhzuLlx;^$6zbl>pS9?qA$X1Qr_Qx~ie?UQ&y0Kl6klHxkB2h^|2i>9#|WU3j=CX;SG3*Je16Pe%B9)F5iVuK`2! z0k28Au4TM6P0EE&A(=^<0)Di@8Us?y; z6vTF}VZTU+De3TN8!5L$EY;?K8CbXIfvNGw+&%HzXfpF^c~+HL^+T4Ksog%K++x15 z)SykV2G8y_r6}Cnl)$Y&8hII=*k#$l>#Tb3u_DdPasa^S{XS%O|Ag$i_o3NS%`#Os zr}MsYi}s%&A4rj(z3iFrTMm$wBv4sF&+>0ME!-EQ92M*6R$9V_Lbqx>|lj*+t3XY zQy~%!vV)9+&pEU#zh*V{Rg)*i4S6RZG?IqV9?%y-FZHi@>#ryuHjs^9=A~Wmp!e+8 z;G-h4ntA9P>^xTfkd?(V;hQC9U?byS(15Fg0`bHok$NAi`X$hU=h;w|CAs=jj<+Rc zz=$fRrc$tIg5*(yX31oK@?B*}tX`W}KRM@6VDv7}?6Q{XW|Tfpwj3-YCz1oZ#_5l7~K(Tvz$zul1U5+%{%a@hWG7iS7?T<|9 zwV30WbB9=bF4*Q%kMMB3x)?inoi+#oogI30po=x>TB8{pJUykSiY)0rLD6IFP@%Xu z!Jne?#{#d`#5gsGj-_(^#CiDWDaoH7{B@#X2s$A%5a~)o5i#9_TR^;~cc5ACSXD27^XwCd*z~tElq**MWu#g!zg44{9j=H8{UVamndBdCN<%b)^_> zT_-%6S(Vm+ugeYJm4JSnm^J8M6-J{wgKYv*?fd-*U7n=!BHELaop&)Dx)lWeho%1l_6WW>2IzD{iFh&+N7TUrr94`{s8vpAM}y@;UtIu de?q~-I1a#yi+I$fO$5imxB)gdsy1+s|0i#$43Yo< literal 0 HcmV?d00001 diff --git a/inception/image_processing.py b/inception/image_processing.py new file mode 100644 index 000000000..df9f5c23e --- /dev/null +++ b/inception/image_processing.py @@ -0,0 +1,479 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Read and preprocess image data. + + Image processing occurs on a single image at a time. Image are read and + preprocessed in pararllel across mulitple threads. The resulting images + are concatenated together to form a single batch for training or evaluation. + + -- Provide processed image data for a network: + inputs: Construct batches of evaluation examples of images. + distorted_inputs: Construct batches of training examples of images. + batch_inputs: Construct batches of training or evaluation examples of images. + + -- Data processing: + parse_example_proto: Parses an Example proto containing a training example + of an image. + + -- Image decoding: + decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor. + + -- Image preprocessing: + image_preprocessing: Decode and preprocess one image for evaluation or training + distort_image: Distort one image for training a network. + eval_image: Prepare one image for evaluation. + distort_color: Distort the color in one image for training. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_integer('batch_size', 32, + """Number of images to process in a batch.""") +tf.app.flags.DEFINE_integer('image_size', 299, + """Provide square images of this size.""") +tf.app.flags.DEFINE_integer('num_preprocess_threads', 4, + """Number of preprocessing threads per tower. """ + """Please make this a multiple of 4.""") + +# Images are preprocessed asynchronously using multiple threads specifed by +# --num_preprocss_threads and the resulting processed images are stored in a +# random shuffling queue. The shuffling queue dequeues --batch_size images +# for processing on a given Inception tower. A larger shuffling queue guarantees +# better mixing across examples within a batch and results in slightly higher +# predictive performance in a trained model. Empirically, +# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size +# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of +# 16GB. If the machine is memory limited, then decrease this factor to +# decrease the CPU memory footprint, accordingly. +tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16, + """Size of the queue of preprocessed images. """ + """Default is ideal but try smaller values, e.g. """ + """4, 2 or 1, if host memory is constrained. See """ + """comments in code for more details.""") + + +def inputs(dataset, batch_size=None, num_preprocess_threads=None): + """Generate batches of ImageNet images for evaluation. + + Use this function as the inputs for evaluating a network. + + Note that some (minimal) image preprocessing occurs during evaluation + including central cropping and resizing of the image to fit the network. + + Args: + dataset: instance of Dataset class specifying the dataset. + batch_size: integer, number of examples in batch + num_preprocess_threads: integer, total number of preprocessing threads but + None defaults to FLAGS.num_preprocess_threads. + + Returns: + images: Images. 4D tensor of size [batch_size, FLAGS.image_size, + image_size, 3]. + labels: 1-D integer Tensor of [FLAGS.batch_size]. + """ + if not batch_size: + batch_size = FLAGS.batch_size + + # Force all input processing onto CPU in order to reserve the GPU for + # the forward inference and back-propagation. + with tf.device('/cpu:0'): + images, labels = batch_inputs( + dataset, batch_size, train=False, + num_preprocess_threads=num_preprocess_threads) + + return images, labels + + +def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None): + """Generate batches of distorted versions of ImageNet images. + + Use this function as the inputs for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + dataset: instance of Dataset class specifying the dataset. + batch_size: integer, number of examples in batch + num_preprocess_threads: integer, total number of preprocessing threads but + None defaults to FLAGS.num_preprocess_threads. + + Returns: + images: Images. 4D tensor of size [batch_size, FLAGS.image_size, + FLAGS.image_size, 3]. + labels: 1-D integer Tensor of [batch_size]. + """ + if not batch_size: + batch_size = FLAGS.batch_size + + # Force all input processing onto CPU in order to reserve the GPU for + # the forward inference and back-propagation. + with tf.device('/cpu:0'): + images, labels = batch_inputs( + dataset, batch_size, train=True, + num_preprocess_threads=num_preprocess_threads) + return images, labels + + +def decode_jpeg(image_buffer, scope=None): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + with tf.op_scope([image_buffer], scope, 'decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3) + + # After this point, all image pixels reside in [0,1) + # until the very end, when they're rescaled to (-1, 1). The various + # adjust_* ops all require this range for dtype float. + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + return image + + +def distort_color(image, thread_id=0, scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: Tensor containing single image. + thread_id: preprocessing thread ID. + scope: Optional scope for op_scope. + Returns: + color-distorted image + """ + with tf.op_scope([image], scope, 'distort_color'): + color_ordering = thread_id % 2 + + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +def distort_image(image, height, width, bbox, thread_id=0, scope=None): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D float Tensor of image + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + thread_id: integer indicating the preprocessing thread. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor of distorted image used for training. + """ + with tf.op_scope([image, height, width, bbox], scope, 'distort_image'): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # Display the bounding box in the first thread only. + if not thread_id: + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + tf.image_summary('image_with_bounding_boxes', image_with_box) + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an allowed + # range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.75, 1.33], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + if not thread_id: + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distort_bbox) + tf.image_summary('images_with_distorted_bounding_box', + image_with_distorted_box) + + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + resize_method = thread_id % 4 + distorted_image = tf.image.resize_images(distorted_image, height, width, + resize_method) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.image_summary('cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. + distorted_image = distort_color(distorted_image, thread_id) + + if not thread_id: + tf.image_summary('final_distorted_image', + tf.expand_dims(distorted_image, 0)) + return distorted_image + + +def eval_image(image, height, width, scope=None): + """Prepare one image for evaluation. + + Args: + image: 3-D float Tensor + height: integer + width: integer + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor of prepared image. + """ + with tf.op_scope([image, height, width], scope, 'eval_image'): + # Crop the central region of the image with an area containing 87.5% of + # the original image. + image = tf.image.central_crop(image, central_fraction=0.875) + + # Resize the image to the original height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + return image + + +def image_preprocessing(image_buffer, bbox, train, thread_id=0): + """Decode and preprocess one image for evaluation or training. + + Args: + image_buffer: JPEG encoded string Tensor + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + train: boolean + thread_id: integer indicating preprocessing thread + + Returns: + 3-D float Tensor containing an appropriately scaled image + + Raises: + ValueError: if user does not provide bounding box + """ + if bbox is None: + raise ValueError('Please supply a bounding box.') + + image = decode_jpeg(image_buffer) + height = FLAGS.image_size + width = FLAGS.image_size + + if train: + image = distort_image(image, height, width, bbox, thread_id) + else: + image = eval_image(image, height, width) + + # Finally, rescale to [-1,1] instead of [0, 1) + image = tf.sub(image, 0.5) + image = tf.mul(image, 2.0) + return image + + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat(0, [ymin, xmin, ymax, xmax]) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None): + """Contruct batches of training or evaluation examples from the image dataset. + + Args: + dataset: instance of Dataset class specifying the dataset. + See dataset.py for details. + batch_size: integer + train: boolean + num_preprocess_threads: integer, total number of preprocessing threads + + Returns: + images: 4-D float Tensor of a batch of images + labels: 1-D integer Tensor of [batch_size]. + + Raises: + ValueError: if data is not found + """ + with tf.name_scope('batch_processing'): + data_files = dataset.data_files() + if data_files is None: + raise ValueError('No data files found for this dataset') + filename_queue = tf.train.string_input_producer(data_files, capacity=16) + + if num_preprocess_threads is None: + num_preprocess_threads = FLAGS.num_preprocess_threads + + if num_preprocess_threads % 4: + raise ValueError('Please make num_preprocess_threads a multiple ' + 'of 4 (%d % 4 != 0).', num_preprocess_threads) + # Create a subgraph with its own reader (but sharing the + # filename_queue) for each preprocessing thread. + images_and_labels = [] + for thread_id in range(num_preprocess_threads): + reader = dataset.reader() + _, example_serialized = reader.read(filename_queue) + + # Parse a serialized Example proto to extract the image and metadata. + image_buffer, label_index, bbox, _ = parse_example_proto( + example_serialized) + image = image_preprocessing(image_buffer, bbox, train, thread_id) + images_and_labels.append([image, label_index]) + + # Approximate number of examples per shard. + examples_per_shard = 1024 + # Size the random shuffle queue to balance between good global + # mixing (more examples) and memory use (fewer examples). + # 1 image uses 299*299*3*4 bytes = 1MB + # The default input_queue_memory_factor is 16 implying a shuffling queue + # size: examples_per_shard * 16 * 1MB = 17.6GB + min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor + + # Create a queue that produces the examples in batches after shuffling. + if train: + images, label_index_batch = tf.train.shuffle_batch_join( + images_and_labels, + batch_size=batch_size, + capacity=min_queue_examples + 3 * batch_size, + min_after_dequeue=min_queue_examples) + else: + images, label_index_batch = tf.train.batch_join( + images_and_labels, + batch_size=batch_size, + capacity=min_queue_examples + 3 * batch_size) + + # Reshape images into these desired dimensions. + height = FLAGS.image_size + width = FLAGS.image_size + depth = 3 + + images = tf.cast(images, tf.float32) + images = tf.reshape(images, shape=[batch_size, height, width, depth]) + + # Display the training images in the visualizer. + tf.image_summary('images', images) + + return images, tf.reshape(label_index_batch, [batch_size]) diff --git a/inception/imagenet_data.py b/inception/imagenet_data.py new file mode 100644 index 000000000..0a6d22e12 --- /dev/null +++ b/inception/imagenet_data.py @@ -0,0 +1,59 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Small library that points to the ImageNet data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from inception.dataset import Dataset + + +class ImagenetData(Dataset): + """ImageNet data set.""" + + def __init__(self, subset): + super(ImagenetData, self).__init__('ImageNet', subset) + + def num_classes(self): + """Returns the number of classes in the data set.""" + return 1000 + + def num_examples_per_epoch(self): + """Returns the number of examples in the data set.""" + # Bounding box data consists of 615299 bounding boxes for 544546 images. + if self.subset == 'train': + return 1281167 + if self.subset == 'validation': + return 50000 + + def download_message(self): + """Instruction to download and extract the tarball from Flowers website.""" + + print('Failed to find any ImageNet %s files'% self.subset) + print('') + print('If you have already downloaded and processed the data, then make ' + 'sure to set --data_dir to point to the directory containing the ' + 'location of the sharded TFRecords.\n') + print('If you have not downloaded and prepared the ImageNet data in the ' + 'TFRecord format, you will need to do this at least once. This ' + 'process could take several hours depending on the speed of your ' + 'computer and network connection\n') + print('Please see README.md for instructions on how to build ' + 'the ImageNet dataset using download_and_preprocess_imagenet.\n') + print('Note that the raw data size is 300 GB and the processed data size ' + 'is 150 GB. Please ensure you have at least 500GB disk space.') diff --git a/inception/imagenet_eval.py b/inception/imagenet_eval.py new file mode 100644 index 000000000..5444f1927 --- /dev/null +++ b/inception/imagenet_eval.py @@ -0,0 +1,46 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to evaluate Inception on the flowers data set. + +Note that using the supplied pre-trained inception checkpoint, the eval should +achieve: + precision @ 1 = 0.7874 recall @ 5 = 0.9436 [50000 examples] + +See the README.md for more details. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception import inception_eval +from inception.imagenet_data import ImagenetData + +FLAGS = tf.app.flags.FLAGS + + +def main(unused_argv=None): + dataset = ImagenetData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.eval_dir): + tf.gfile.DeleteRecursively(FLAGS.eval_dir) + tf.gfile.MakeDirs(FLAGS.eval_dir) + inception_eval.evaluate(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/inception/imagenet_train.py b/inception/imagenet_train.py new file mode 100644 index 000000000..3ffb55ee9 --- /dev/null +++ b/inception/imagenet_train.py @@ -0,0 +1,41 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to train Inception on the ImageNet data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import tensorflow as tf + +from inception import inception_train +from inception.imagenet_data import ImagenetData + +FLAGS = tf.app.flags.FLAGS + + +def main(_): + dataset = ImagenetData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.train_dir): + tf.gfile.DeleteRecursively(FLAGS.train_dir) + tf.gfile.MakeDirs(FLAGS.train_dir) + inception_train.train(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/inception/inception_eval.py b/inception/inception_eval.py new file mode 100644 index 000000000..b91b2f9f0 --- /dev/null +++ b/inception/inception_eval.py @@ -0,0 +1,171 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A library to evaluate Inception on a single GPU. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import math +import os.path +import time + + +import numpy as np +import tensorflow as tf + +from inception import image_processing +from inception import inception_model as inception + + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('eval_dir', '/tmp/imagenet_eval', + """Directory where to write event logs.""") +tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/imagenet_train', + """Directory where to read model checkpoints.""") + +# Flags governing the frequency of the eval. +tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5, + """How often to run the eval.""") +tf.app.flags.DEFINE_boolean('run_once', False, + """Whether to run eval only once.""") + +# Flags governing the data used for the eval. +tf.app.flags.DEFINE_integer('num_examples', 50000, + """Number of examples to run. Note that the eval """ + """ImageNet dataset contains 50000 examples.""") +tf.app.flags.DEFINE_string('subset', 'validation', + """Either 'validation' or 'train'.""") + + +def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op): + """Runs Eval once. + + Args: + saver: Saver. + summary_writer: Summary writer. + top_1_op: Top 1 op. + top_5_op: Top 5 op. + summary_op: Summary op. + """ + with tf.Session() as sess: + ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) + if ckpt and ckpt.model_checkpoint_path: + if os.path.isabs(ckpt.model_checkpoint_path): + # Restores from checkpoint with absolute path. + saver.restore(sess, ckpt.model_checkpoint_path) + else: + # Restores from checkpoint with relative path. + saver.restore(sess, os.path.join(FLAGS.checkpoint_dir, + ckpt.model_checkpoint_path)) + + # Assuming model_checkpoint_path looks something like: + # /my-favorite-path/imagenet_train/model.ckpt-0, + # extract global_step from it. + global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] + print('Succesfully loaded model from %s at step=%s.' % + (ckpt.model_checkpoint_path, global_step)) + else: + print('No checkpoint file found') + return + + # Start the queue runners. + coord = tf.train.Coordinator() + try: + threads = [] + for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): + threads.extend(qr.create_threads(sess, coord=coord, daemon=True, + start=True)) + + num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size)) + # Counts the number of correct predictions. + count_top_1 = 0.0 + count_top_5 = 0.0 + total_sample_count = num_iter * FLAGS.batch_size + step = 0 + + print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset)) + start_time = time.time() + while step < num_iter and not coord.should_stop(): + top_1, top_5 = sess.run([top_1_op, top_5_op]) + count_top_1 += np.sum(top_1) + count_top_5 += np.sum(top_5) + step += 1 + if step % 20 == 0: + duration = time.time() - start_time + sec_per_batch = duration / 20.0 + examples_per_sec = FLAGS.batch_size / sec_per_batch + print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f' + 'sec/batch)' % (datetime.now(), step, num_iter, + examples_per_sec, sec_per_batch)) + start_time = time.time() + + # Compute precision @ 1. + precision_at_1 = count_top_1 / total_sample_count + recall_at_5 = count_top_5 / total_sample_count + print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % + (datetime.now(), precision_at_1, recall_at_5, total_sample_count)) + + summary = tf.Summary() + summary.ParseFromString(sess.run(summary_op)) + summary.value.add(tag='Precision @ 1', simple_value=precision_at_1) + summary.value.add(tag='Recall @ 5', simple_value=recall_at_5) + summary_writer.add_summary(summary, global_step) + + except Exception as e: # pylint: disable=broad-except + coord.request_stop(e) + + coord.request_stop() + coord.join(threads, stop_grace_period_secs=10) + + +def evaluate(dataset): + """Evaluate model on Dataset for a number of steps.""" + with tf.Graph().as_default(): + # Get images and labels from the dataset. + images, labels = image_processing.inputs(dataset) + + # Number of classes in the Dataset label set plus 1. + # Label 0 is reserved for an (unused) background class. + num_classes = dataset.num_classes() + 1 + + # Build a Graph that computes the logits predictions from the + # inference model. + logits, _ = inception.inference(images, num_classes) + + # Calculate predictions. + top_1_op = tf.nn.in_top_k(logits, labels, 1) + top_5_op = tf.nn.in_top_k(logits, labels, 5) + + # Restore the moving average version of the learned variables for eval. + variable_averages = tf.train.ExponentialMovingAverage( + inception.MOVING_AVERAGE_DECAY) + variables_to_restore = variable_averages.variables_to_restore() + saver = tf.train.Saver(variables_to_restore) + + # Build the summary operation based on the TF collection of Summaries. + summary_op = tf.merge_all_summaries() + + graph_def = tf.get_default_graph().as_graph_def() + summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, + graph_def=graph_def) + + while True: + _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op) + if FLAGS.run_once: + break + time.sleep(FLAGS.eval_interval_secs) diff --git a/inception/inception_model.py b/inception/inception_model.py new file mode 100644 index 000000000..defad2dad --- /dev/null +++ b/inception/inception_model.py @@ -0,0 +1,160 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Build the Inception v3 network on ImageNet data set. + +The Inception v3 architecture is described in http://arxiv.org/abs/1512.00567 + +Summary of available functions: + inference: Compute inference on the model inputs to make a prediction + loss: Compute the loss of the prediction with respect to the labels +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re + + +import tensorflow as tf + +from inception.slim import slim + +FLAGS = tf.app.flags.FLAGS + +# If a model is trained using multiple GPUs, prefix all Op names with tower_name +# to differentiate the operations. Note that this prefix is removed from the +# names of the summaries when visualizing a model. +TOWER_NAME = 'tower' + +# Batch normalization. Constant governing the exponential moving average of +# the 'global' mean and variance for all activations. +BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997 + +# The decay to use for the moving average. +MOVING_AVERAGE_DECAY = 0.9999 + + +def inference(images, num_classes, for_training=False, restore_logits=True, + scope=None): + """Build Inception v3 model architecture. + + See here for reference: http://arxiv.org/abs/1512.00567 + + Args: + images: Images returned from inputs() or distorted_inputs(). + num_classes: number of classes + for_training: If set to `True`, build the inference model for training. + Kernels that operate differently for inference during training + e.g. dropout, are appropriately configured. + restore_logits: whether or not the logits layers should be restored. + Useful for fine-tuning a model with different num_classes. + scope: optional prefix string identifying the ImageNet tower. + + Returns: + Logits. 2-D float Tensor. + Auxiliary Logits. 2-D float Tensor of side-head. Used for training only. + """ + # Parameters for BatchNorm. + batch_norm_params = { + # Decay for the moving averages. + 'decay': BATCHNORM_MOVING_AVERAGE_DECAY, + # epsilon to prevent 0s in variance. + 'epsilon': 0.001, + } + # Set weight_decay for weights in Conv and FC layers. + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): + with slim.arg_scope([slim.ops.conv2d], + stddev=0.1, + activation=tf.nn.relu, + batch_norm_params=batch_norm_params): + # Force all Variables to reside on the CPU. + with slim.arg_scope([slim.variables.variable], device='/cpu:0'): + logits, endpoints = slim.inception.inception_v3( + images, + dropout_keep_prob=0.8, + num_classes=num_classes, + is_training=for_training, + restore_logits=restore_logits, + scope=scope) + + # Add summaries for viewing model statistics on TensorBoard. + _activation_summaries(endpoints) + + # Grab the logits associated with the side head. Employed during training. + auxiliary_logits = endpoints['aux_logits'] + + return logits, auxiliary_logits + + +def loss(logits, labels, batch_size=None): + """Adds all losses for the model. + + Note the final loss is not returned. Instead, the list of losses are collected + by slim.losses. The losses are accumulated in tower_loss() and summed to + calculate the total loss. + + Args: + logits: List of logits from inference(). Each entry is a 2-D float Tensor. + labels: Labels from distorted_inputs or inputs(). 1-D tensor + of shape [batch_size] + batch_size: integer + """ + if not batch_size: + batch_size = FLAGS.batch_size + + # Reshape the labels into a dense Tensor of + # shape [FLAGS.batch_size, num_classes]. + sparse_labels = tf.reshape(labels, [batch_size, 1]) + indices = tf.reshape(tf.range(batch_size), [batch_size, 1]) + concated = tf.concat(1, [indices, sparse_labels]) + num_classes = logits[0].get_shape()[-1].value + dense_labels = tf.sparse_to_dense(concated, + [batch_size, num_classes], + 1.0, 0.0) + + # Cross entropy loss for the main softmax prediction. + slim.losses.cross_entropy_loss(logits[0], + dense_labels, + label_smoothing=0.1, + weight=1.0) + + # Cross entropy loss for the auxiliary softmax head. + slim.losses.cross_entropy_loss(logits[1], + dense_labels, + label_smoothing=0.1, + weight=0.4, + scope='aux_loss') + + +def _activation_summary(x): + """Helper to create summaries for activations. + + Creates a summary that provides a histogram of activations. + Creates a summary that measure the sparsity of activations. + + Args: + x: Tensor + """ + # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training + # session. This helps the clarity of presentation on tensorboard. + tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) + tf.histogram_summary(tensor_name + '/activations', x) + tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) + + +def _activation_summaries(endpoints): + with tf.name_scope('summaries'): + for act in endpoints.values(): + _activation_summary(act) diff --git a/inception/inception_train.py b/inception/inception_train.py new file mode 100644 index 000000000..eef06b324 --- /dev/null +++ b/inception/inception_train.py @@ -0,0 +1,351 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A library to train Inception using multiple GPU's with synchronous updates. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +from datetime import datetime +import os.path +import re +import time + + + +import numpy as np +import tensorflow as tf + +from inception import image_processing +from inception import inception_model as inception +from inception.slim import slim + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train', + """Directory where to write event logs """ + """and checkpoint.""") +tf.app.flags.DEFINE_integer('max_steps', 10000000, + """Number of batches to run.""") +tf.app.flags.DEFINE_string('subset', 'train', + """Either 'train' or 'validation'.""") + +# Flags governing the hardware employed for running TensorFlow. +tf.app.flags.DEFINE_integer('num_gpus', 1, + """How many GPUs to use.""") +tf.app.flags.DEFINE_boolean('log_device_placement', False, + """Whether to log device placement.""") + +# Flags governing the type of training. +tf.app.flags.DEFINE_boolean('fine_tune', False, + """If set, randomly initialize the final layer """ + """of weights in order to train the network on a """ + """new task.""") +tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '', + """If specified, restore this pretrained model """ + """before beginning any training.""") + +# **IMPORTANT** +# Please note that this learning rate schedule is heavily dependent on the +# hardware architecture, batch size and any changes to the model architecture +# specification. Selecting a finely tuned learning rate schedule is an +# empirical process that requires some experimentation. Please see README.md +# more guidance and discussion. +# +# With 8 Tesla K40's and a batch size = 256, the following setup achieves +# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs). +# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997. +tf.app.flags.DEFINE_float('initial_learning_rate', 0.1, + """Initial learning rate.""") +tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0, + """Epochs after which learning rate decays.""") +tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16, + """Learning rate decay factor.""") + +# Constants dictating the learning rate schedule. +RMSPROP_DECAY = 0.9 # Decay term for RMSProp. +RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp. +RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. + + +def _tower_loss(images, labels, num_classes, scope): + """Calculate the total loss on a single tower running the ImageNet model. + + We perform 'batch splitting'. This means that we cut up a batch across + multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2, + then each tower will operate on an batch of 16 images. + + Args: + images: Images. 4D tensor of size [batch_size, FLAGS.image_size, + FLAGS.image_size, 3]. + labels: 1-D integer Tensor of [batch_size]. + num_classes: number of classes + scope: unique prefix string identifying the ImageNet tower, e.g. + 'tower_0'. + + Returns: + Tensor of shape [] containing the total loss for a batch of data + """ + # When fine-tuning a model, we do not restore the logits but instead we + # randomly initialize the logits. The number of classes in the output of the + # logit is the number of classes in specified Dataset. + restore_logits = not FLAGS.fine_tune + + # Build inference Graph. + logits = inception.inference(images, num_classes, for_training=True, + restore_logits=restore_logits, + scope=scope) + + # Build the portion of the Graph calculating the losses. Note that we will + # assemble the total_loss using a custom function below. + split_batch_size = images.get_shape().as_list()[0] + inception.loss(logits, labels, batch_size=split_batch_size) + + # Assemble all of the losses for the current tower only. + losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope) + + # Calculate the total loss for the current tower. + regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + total_loss = tf.add_n(losses + regularization_losses, name='total_loss') + + # Compute the moving average of all individual losses and the total loss. + loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') + loss_averages_op = loss_averages.apply(losses + [total_loss]) + + # Attach a scalar summmary to all individual losses and the total loss; do the + # same for the averaged version of the losses. + for l in losses + [total_loss]: + # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training + # session. This helps the clarity of presentation on TensorBoard. + loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name) + # Name each loss as '(raw)' and name the moving average version of the loss + # as the original loss name. + tf.scalar_summary(loss_name +' (raw)', l) + tf.scalar_summary(loss_name, loss_averages.average(l)) + + with tf.control_dependencies([loss_averages_op]): + total_loss = tf.identity(total_loss) + return total_loss + + +def _average_gradients(tower_grads): + """Calculate the average gradient for each shared variable across all towers. + + Note that this function provides a synchronization point across all towers. + + Args: + tower_grads: List of lists of (gradient, variable) tuples. The outer list + is over individual gradients. The inner list is over the gradient + calculation for each tower. + Returns: + List of pairs of (gradient, variable) where the gradient has been averaged + across all towers. + """ + average_grads = [] + for grad_and_vars in zip(*tower_grads): + # Note that each grad_and_vars looks like the following: + # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) + grads = [] + for g, _ in grad_and_vars: + # Add 0 dimension to the gradients to represent the tower. + expanded_g = tf.expand_dims(g, 0) + + # Append on a 'tower' dimension which we will average over below. + grads.append(expanded_g) + + # Average over the 'tower' dimension. + grad = tf.concat(0, grads) + grad = tf.reduce_mean(grad, 0) + + # Keep in mind that the Variables are redundant because they are shared + # across towers. So .. we will just return the first tower's pointer to + # the Variable. + v = grad_and_vars[0][1] + grad_and_var = (grad, v) + average_grads.append(grad_and_var) + return average_grads + + +def train(dataset): + """Train on dataset for a number of steps.""" + with tf.Graph().as_default(), tf.device('/cpu:0'): + # Create a variable to count the number of train() calls. This equals the + # number of batches processed * FLAGS.num_gpus. + global_step = tf.get_variable( + 'global_step', [], + initializer=tf.constant_initializer(0), trainable=False) + + # Calculate the learning rate schedule. + num_batches_per_epoch = (dataset.num_examples_per_epoch() / + FLAGS.batch_size) + decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) + + # Decay the learning rate exponentially based on the number of steps. + lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, + global_step, + decay_steps, + FLAGS.learning_rate_decay_factor, + staircase=True) + + # Create an optimizer that performs gradient descent. + opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY, + momentum=RMSPROP_MOMENTUM, + epsilon=RMSPROP_EPSILON) + + # Get images and labels for ImageNet and split the batch across GPUs. + assert FLAGS.batch_size % FLAGS.num_gpus == 0, ( + 'Batch size must be divisible by number of GPUs') + split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus) + + # Override the number of preprocessing threads to account for the increased + # number of GPU towers. + num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus + images, labels = image_processing.distorted_inputs( + dataset, + batch_size=split_batch_size, + num_preprocess_threads=num_preprocess_threads) + + input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES)) + + # Number of classes in the Dataset label set plus 1. + # Label 0 is reserved for an (unused) background class. + num_classes = dataset.num_classes() + 1 + + # Calculate the gradients for each model tower. + tower_grads = [] + for i in xrange(FLAGS.num_gpus): + with tf.device('/gpu:%d' % i): + with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope: + # Calculate the loss for one tower of the ImageNet model. This + # function constructs the entire ImageNet model but shares the + # variables across all towers. + loss = _tower_loss(images, labels, num_classes, scope) + + # Reuse variables for the next tower. + tf.get_variable_scope().reuse_variables() + + # Retain the summaries from the final tower. + summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) + + # Retain the Batch Normalization updates operations only from the + # final tower. Ideally, we should grab the updates from all towers + # but these stats accumulate extremely fast so we can ignore the + # other stats from the other towers without significant detriment. + batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION, + scope) + + # Calculate the gradients for the batch of data on this ImageNet + # tower. + grads = opt.compute_gradients(loss) + + # Keep track of the gradients across all towers. + tower_grads.append(grads) + + # We must calculate the mean of each gradient. Note that this is the + # synchronization point across all towers. + grads = _average_gradients(tower_grads) + + # Add a summaries for the input processing and global_step. + summaries.extend(input_summaries) + + # Add a summary to track the learning rate. + summaries.append(tf.scalar_summary('learning_rate', lr)) + + # Add histograms for gradients. + for grad, var in grads: + if grad: + summaries.append( + tf.histogram_summary(var.op.name + '/gradients', grad)) + + # Apply the gradients to adjust the shared variables. + apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) + + # Add histograms for trainable variables. + for var in tf.trainable_variables(): + summaries.append(tf.histogram_summary(var.op.name, var)) + + # Track the moving averages of all trainable variables. + # Note that we maintain a "double-average" of the BatchNormalization + # global statistics. This is more complicated then need be but we employ + # this for backward-compatibility with our previous models. + variable_averages = tf.train.ExponentialMovingAverage( + inception.MOVING_AVERAGE_DECAY, global_step) + + # Another possiblility is to use tf.slim.get_variables(). + variables_to_average = (tf.trainable_variables() + + tf.moving_average_variables()) + variables_averages_op = variable_averages.apply(variables_to_average) + + # Group all updates to into a single train op. + batchnorm_updates_op = tf.group(*batchnorm_updates) + train_op = tf.group(apply_gradient_op, variables_averages_op, + batchnorm_updates_op) + + # Create a saver. + saver = tf.train.Saver(tf.all_variables()) + + # Build the summary operation from the last tower summaries. + summary_op = tf.merge_summary(summaries) + + # Build an initialization operation to run below. + init = tf.initialize_all_variables() + + # Start running operations on the Graph. allow_soft_placement must be set to + # True to build towers on GPU, as some of the ops do not have GPU + # implementations. + sess = tf.Session(config=tf.ConfigProto( + allow_soft_placement=True, + log_device_placement=FLAGS.log_device_placement)) + sess.run(init) + + if FLAGS.pretrained_model_checkpoint_path: + assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path) + variables_to_restore = tf.get_collection( + slim.variables.VARIABLES_TO_RESTORE) + restorer = tf.train.Saver(variables_to_restore) + restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path) + print('%s: Pre-trained model restored from %s' % + (datetime.now(), FLAGS.pretrained_model_checkpoint_path)) + + # Start the queue runners. + tf.train.start_queue_runners(sess=sess) + + summary_writer = tf.train.SummaryWriter( + FLAGS.train_dir, + graph_def=sess.graph.as_graph_def(add_shapes=True)) + + for step in xrange(FLAGS.max_steps): + start_time = time.time() + _, loss_value = sess.run([train_op, loss]) + duration = time.time() - start_time + + assert not np.isnan(loss_value), 'Model diverged with loss = NaN' + + if step % 10 == 0: + examples_per_sec = FLAGS.batch_size / float(duration) + format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' + 'sec/batch)') + print(format_str % (datetime.now(), step, loss_value, + examples_per_sec, duration)) + + if step % 100 == 0: + summary_str = sess.run(summary_op) + summary_writer.add_summary(summary_str, step) + + # Save the model checkpoint periodically. + if step % 5000 == 0 or (step + 1) == FLAGS.max_steps: + checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') + saver.save(sess, checkpoint_path, global_step=step) diff --git a/inception/slim/BUILD b/inception/slim/BUILD new file mode 100644 index 000000000..d28d4acdb --- /dev/null +++ b/inception/slim/BUILD @@ -0,0 +1,112 @@ +# Description: +# Contains the operations and nets for building TensorFlow-Slim models. + +package(default_visibility = ["//inception:internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +py_library( + name = "scopes", + srcs = ["scopes.py"], + deps = [ + "@tf//tensorflow:tensorflow_py", + ], +) + +py_test( + name = "scopes_test", + size = "small", + srcs = ["scopes_test.py"], + deps = [ + ":scopes", + ], +) + +py_library( + name = "variables", + srcs = ["variables.py"], + deps = [ + "@tf//tensorflow:tensorflow_py", + ":scopes", + ], +) + +py_test( + name = "variables_test", + size = "small", + srcs = ["variables_test.py"], + deps = [ + ":variables", + ], +) + +py_library( + name = "losses", + srcs = ["losses.py"], + deps = [ + "@tf//tensorflow:tensorflow_py", + ], +) + +py_test( + name = "losses_test", + size = "small", + srcs = ["losses_test.py"], + deps = [ + ":losses", + ], +) + +py_library( + name = "ops", + srcs = ["ops.py"], + deps = [ + "@tf//tensorflow:tensorflow_py", + ":losses", + ":scopes", + ":variables", + ], +) + +py_test( + name = "ops_test", + size = "small", + srcs = ["ops_test.py"], + deps = [ + ":ops", + ":variables", + ], +) + +py_library( + name = "inception", + srcs = ["inception_model.py"], + deps = [ + "@tf//tensorflow:tensorflow_py", + ":ops", + ":scopes", + ], +) + +py_test( + name = "inception_test", + size = "medium", + srcs = ["inception_test.py"], + deps = [ + ":inception", + ], +) + +py_library( + name = "slim", + srcs = ["slim.py"], + deps = [ + ":inception", + ":losses", + ":ops", + ":scopes", + ":variables", + ], +) diff --git a/inception/slim/README.md b/inception/slim/README.md new file mode 100644 index 000000000..3813b00cf --- /dev/null +++ b/inception/slim/README.md @@ -0,0 +1,650 @@ +# TensorFlow-Slim + +TF-Slim is a lightweight library for defining, training and evaluating models +in TensorFlow. It enables defining complex networks quickly and concisely while +keeping a model's architecture transparent and its hyperparameters explicit. + + +[TOC] + +## Teaser + +As a demonstration of the simplicity of using TF-Slim, compare the simplicity +of the code necessary for defining the entire +[VGG](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) network using TF-Slim +to the lengthy and verbose nature of defining just the first three layers (out +of 16) using native tensorflow: + +```python{.good} +# VGG16 in TF-Slim. +def vgg16(inputs): + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') + net = slim.ops.max_pool(net, [2, 2], scope='pool1') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') + net = slim.ops.max_pool(net, [2, 2], scope='pool2') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') + net = slim.ops.max_pool(net, [2, 2], scope='pool3') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') + net = slim.ops.max_pool(net, [2, 2], scope='pool4') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') + net = slim.ops.max_pool(net, [2, 2], scope='pool5') + net = slim.ops.flatten(net, scope='flatten5') + net = slim.ops.fc(net, 4096, scope='fc6') + net = slim.ops.dropout(net, 0.5, scope='dropout6') + net = slim.ops.fc(net, 4096, scope='fc7') + net = slim.ops.dropout(net, 0.5, scope='dropout7') + net = slim.ops.fc(net, 1000, activation=None, scope='fc8') + return net +``` + +```python{.bad} +# Layers 1-3 (out of 16) of VGG16 in native tensorflow. +def vgg16(inputs): + with tf.name_scope('conv1_1') as scope: + kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights') + conv = tf.nn.conv2d(inputs, kernel, [1, 1, 1, 1], padding='SAME') + biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') + bias = tf.nn.bias_add(conv, biases) + conv1 = tf.nn.relu(bias, name=scope) + with tf.name_scope('conv1_2') as scope: + kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32, stddev=1e-1), name='weights') + conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') + biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') + bias = tf.nn.bias_add(conv, biases) + conv1 = tf.nn.relu(bias, name=scope) + with tf.name_scope('pool1') + pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1') +``` + +## Why TF-Slim? + +TF-Slim offers several advantages over just the built-in tensorflow libraries: + +* Allows one to define models much more compactly by eliminating +boilerplate code. This is accomplished through the use of +[argument scoping](scopes.py) +and numerous high level +[operations](ops.py). +These tools increase readability and maintainability, reduce the likelihood +of an error from copy-and-pasting hyperparameter values and simplifies +hyperparameter tuning. +* Makes developing models simple by providing commonly used +[loss functions](losses.py) +* Provides a concise +[definition](inception.py) +of [Inception v3](http://arxiv.org/abs/1512.00567) network architecture +ready to be used out-of-the-box or subsumed into new models. + +Additionally TF-Slim was designed with several principles in mind: + +* The various modules of TF-Slim (scopes, variables, ops, losses) are +independent. This flexibility allows users to pick and choose +components of TF-Slim completely à la carte. +* TF-Slim is written using a Functional Programming style. That means it's +super-lightweight and can be used right alongside any of TensorFlow's native +operations. +* Makes re-using network architectures easy. This allows users to build new +networks on top of existing ones as well as fine-tuning pre-trained models on +new tasks. + +## What are the various components of TF-Slim? + +TF-Slim is composed of several parts which were design to exist independently. +These include: + +* [scopes.py](./scopes.py): +provides a new scope named `arg_scope` that allows a user to define default +arguments for specific operations within that scope. +* [variables.py](./variables.py): +provides convenience wrappers for variable creation and manipulation. +* [ops.py](./ops.py): +provides high level operations for building models using tensorflow. +* [losses.py](./losses.py): +contains commonly used loss functions. + +## Defining Models + +Models can be succinctly defined using TF-Slim by combining its variables, +operations and scopes. Each of these elements are defined below. + +### Variables + +Creating +[`Variables`](https://www.tensorflow.org/how_tos/variables/index.html) +in native tensorflow requires either a predefined value or an initialization +mechanism +(random, normally distributed). Furthermore, if a variable needs to be created +on a specific device, such as a GPU, the specification must be +[made explicit](https://www.tensorflow.org/how_tos/using_gpu/index.html). +To alleviate the code required for variable creation, TF-Slim provides a set +of thin wrapper functions in [variables.py](./variables.py) +which allow callers to easily define variables. + +For example, to create a `weight` variable, initialize it using a truncated +normal distribution, regularize it with an `l2_loss` and place it on the `CPU`, +one need only declare the following: + +```python +weights = variables.variable('weights', + shape=[10, 10, 3 , 3], + initializer=tf.truncated_normal_initializer(stddev=0.1), + regularizer=lambda t: losses.l2_loss(t, weight=0.05), + device='/cpu:0') +``` + +In addition to the functionality provided by `tf.Variable`, `slim.variables` +keeps track of the variables created by `slim.ops` to define a model, which +allows one to distinguish variables that belong to the model versus other +variables. + +```python +# Get all the variables defined by the model. +model_variables = slim.variables.get_variables() + +# Get all the variables with the same given name, i.e. 'weights', 'biases'. +weights = slim.variables.get_variables_by_name('weights') +biases = slim.variables.get_variables_by_name('biases') + +# Get all the variables in VARIABLES_TO_RESTORE collection. +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) + + +weights = variables.variable('weights', + shape=[10, 10, 3 , 3], + initializer=tf.truncated_normal_initializer(stddev=0.1), + regularizer=lambda t: losses.l2_loss(t, weight=0.05), + device='/cpu:0') +``` + +### Operations (Layers) + +While the set of TensorFlow operations is quite extensive, builders of +neural networks typically think of models in terms of "layers". A layer, +such as a Convolutional Layer, a Fully Connected Layer or a BatchNorm Layer +are more abstract than a single TensorFlow operation and typically involve +many such operations. For example, a Convolutional Layer in a neural network +is built using several steps: + +1. Creating the weight variables +2. Creating the bias variables +3. Convolving the weights with the input from the previous layer +4. Adding the biases to the result of the convolution. + +In python code this can be rather laborious: + + +```python +input = ... +with tf.name_scope('conv1_1') as scope: + kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, + stddev=1e-1), name='weights') + conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME') + biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), + trainable=True, name='biases') + bias = tf.nn.bias_add(conv, biases) + conv1 = tf.nn.relu(bias, name=scope) +``` + +To alleviate the need to duplicate this code repeatedly, TF-Slim provides a +number of convenient operations defined at the (more abstract) level of +neural network layers. For example, compare the code above to an invocation +of the TF-Slim code: + +```python +input = ... +net = slim.ops.conv2d(input, [3, 3], 128, scope='conv1_1') +``` + +TF-Slim provides numerous operations used in building neural networks which +roughly correspond to such layers. These include: + +Layer | TF-Slim Op +------- | -------- +Convolutional Layer | [ops.conv2d](ops.py) +Fully Connected Layer | [ops.fc](ops.py) +BatchNorm layer | [ops.batch_norm](ops.py) +Max Pooling Layer | [ops.max_pool](ops.py) +Avg Pooling Layer | [ops.avg_pool](ops.py) +Dropout Layer | [ops.dropout](ops.py) + +[ops.py](./ops.py) +also includes operations that are not really "layers" per se, but are +often used to manipulate hidden unit representations during inference: + +Operation | TF-Slim Op +------- | -------- +Flatten | [ops.flatten](ops.py) + +TF-Slim also provides a meta-operation called `repeat_op` that allows one to +repeatedly perform the same operation. Consider the following snippet from the +[VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) network whose layers +perform several convolutions in a row between pooling layers: + +```python +net = ... +net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_1') +net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_2') +net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_3') +net = slim.ops.max_pool(net, [2, 2], scope='pool3') +``` + +This clear duplication of code can be removed via a standard loop: + +```python +net = ... +for i in range(3): + net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_' % (i+1)) +net = slim.ops.max_pool(net, [2, 2], scope='pool3') +``` + +While this does reduce the amount of duplication, it can be made even cleaner +by using the `RepeatOp`: + +```python +net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') +net = slim.ops.max_pool(net, [2, 2], scope='pool2') +``` + +Notice that the RepeatOp not only applies the same argument in-line, it also +is smart enough to unroll the scopes such that the scopes assigned to each +subsequent call of `ops.conv2d` is appended with an underscore and iteration +number. More concretely, the scopes in the example above would be 'conv3_1', +'conv3_2' and 'conv3_3'. + + +### Scopes + +In addition to the types of scope mechanisms in TensorFlow +([name_scope](https://www.tensorflow.org/api_docs/python/framework.html#name_scope), +[op_scope](https://www.tensorflow.org/api_docs/python/framework.html#op_scope), +[variable_scope](https://www.tensorflow.org/api_docs/python/state_ops.html#variable_scope), +[variable_op_scope](https://www.tensorflow.org/api_docs/python/state_ops.html#variable_op_scope)), +TF-Slim adds a new scoping mechanism called "argument scope" or +[arg_scope](scopes.py). +This new scope allows a user to specify one or more operations and a set of +arguments which will be passed to each of the operations defined in the +`arg_scope`. This functionality is best illustrated by example. Consider the +following code snippet: + + +```python +net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv1') +net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=0.01, weight_decay=0.0005, scope='conv2') +net = slim.ops.conv2d(net, 256, [11, 11], padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv3') +``` + +It should be clear that these three Convolution layers share many of the same +hyperparameters. Two have the same padding, all three have the same weight_decay +and standard deviation of its weights. Not only do the duplicated values make +the code more difficult to read, it also adds the addition burder to the writer +of needing to doublecheck that all of the values are identical in each step. +One solution would be to specify default values using variables: + +```python +padding='SAME' +stddev=0.01 +weight_decay=0.0005 +net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv1') +net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=stddev, weight_decay=weight_decay, scope='conv2') +net = slim.ops.conv2d(net, 256, [11, 11], padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv3') + +``` + +This solution ensures that all three convolutions share the exact same variable +values but doesn't reduce the code clutter. By using an `arg_scope`, we can both +ensure that each layer uses the same values and simplify the code: + +```python + with slim.arg_scope([slim.ops.conv2d], padding='SAME', stddev=0.01, weight_decay=0.0005): + net = slim.ops.conv2d(inputs, 64, [11, 11], scope='conv1') + net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', scope='conv2') + net = slim.ops.conv2d(net, 256, [11, 11], scope='conv3') +``` + +As the example illustrates, the use of arg_scope makes the code cleaner, +simpler and easier to maintain. Notice that while argument values are specifed +in the arg_scope, they can be overwritten locally. In particular, while +the padding argument has been set to 'SAME', the second convolution overrides +it with the value of 'VALID'. + +One can also nest `arg_scope`s and use multiple operations in the same scope. +For example: + +```python +with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + with arg_scope([slim.ops.conv2d], padding='SAME'), slim.arg_scope([slim.ops.fc], bias=1.0): + net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') + net = slim.ops.conv2d(net, 256, [5, 5], stddev=0.03, scope='conv2') + net = slim.ops.flatten(net) + net = slim.ops.fc(net, 1000, activation=None, scope='fc') +``` + +In this example, the first `arg_scope` applies the same `stddev` and `weight_decay` +arguments to the `conv2d` and `fc` ops in its scope. In the second `arg_scope`, +additional default arguments to `conv2d` only are specified. + +In addition to `arg_scope`, TF-Slim provides several decorators that wrap the +use of tensorflow arg scopes. These include `@AddArgScope`, `@AddNameScope`, +`@AddVariableScope`, `@AddOpScope` and `@AddVariableOpScope`. To illustrate +their use, consider the following example. + +```python +def MyNewOp(inputs): + varA = ... + varB = ... + outputs = tf.mul(varA, inputs) + varB + return outputs + +``` + +In this example, the user has created a new op which creates two variables. To +ensure that these variables exist within a certain variable scope (to avoid +collisions with variables with the same name), in standard TF, the op must be +called within a variable scope: + +```python +inputs = ... +with tf.variable_scope('layer1'): + outputs = MyNewOp(inputs) +``` + +As an alternative, one can use TF-Slim's decorators to decorate the function +and simplify the call: + +```python +@AddVariableScope +def MyNewOp(inputs): + ... + return outputs + + +inputs = ... +outputs = MyNewOp('layer1') +``` + +The `@AddVariableScope` decorater simply applies the `tf.variable_scope` scoping +to the called function taking "layer1" as its argument. This allows the code +to be written more concisely. + +### Losses + +The loss function defines a quantity that we want to minimize. For +classification problems, this is typically the cross entropy between the true +(one-hot) distribution and the predicted probability distribution across +classes. For regression problems, this is often the sum-of-squares differences +between the predicted and true values. + +Certain models, such as multi-task +learning models, require the use of multiple loss functions simultaneously. In +other words, the loss function ultimatey being minimized is the sum of various +other loss functions. For example, consider a model that predicts both +the type of scene in an image as well as the depth from the +camera of each pixel. This model's loss function would be the sum of the +classification loss and depth prediction loss. + +TF-Slim provides an easy-to-use mechanism for defining and keeping track of +loss functions via the +[losses.py](./losses.py) +module. Consider the simple case where we want to train the VGG network: + + +```python +# Load the images and labels. +images, labels = ... + +# Create the model. +predictions = ... + +# Define the loss functions and get the total loss. +loss = losses.ClassificationLoss(predictions, labels) +``` + +In this example, we start by creating the model (using TF-Slim's VGG +implementation), and add the standard classification loss. Now, lets turn +to the case where we have a multi-task model that produces multiple outputs: + + +```python +# Load the images and labels. +images, scene_labels, depth_labels = ... + +# Create the model. +scene_predictions, depth_predictions = CreateMultiTaskModel(images) + +# Define the loss functions and get the total loss. +classification_loss = slim.losses.ClassificationLoss(scene_predictions, scene_labels) +sum_of_squares_loss = slim.losses.SumOfSquaresLoss(depth_predictions, depth_labels) + +# The following two lines have the same effect: +total_loss1 = classification_loss + sum_of_squares_loss +total_loss2 = tf.get_collection(slim.losses.LOSSES_COLLECTION) +``` + +In this example, we have two losses which we add by calling +`losses.ClassificationLoss` and `losses.SumOfSquaresLoss`. We can obtain the +total loss by adding them together (`total_loss1`) or by calling +`losses.GetTotalLoss()`. How did this work? +When you create a loss function via TF-Slim, TF-Slim adds the loss to a +special TensorFlow collection of loss functions. This enables you to either +manage the total loss manually, or allow TF-Slim to manage them for you. + +What if you want to let TF-Slim manage the losses for you but have a custom loss +function? +[losses.py](./losses.py) +also has a function that adds this loss to TF-Slims collection. For example: + + +```python +# Load the images and labels. +images, scene_labels, depth_labels, pose_labels = ... + +# Create the model. +scene_predictions, depth_predictions, pose_predictions = CreateMultiTaskModel(images) + +# Define the loss functions and get the total loss. +classification_loss = slim.losses.ClassificationLoss(scene_predictions, scene_labels) +sum_of_squares_loss = slim.losses.SumOfSquaresLoss(depth_predictions, depth_labels) +pose_loss = MyCustomLossFunction(pose_predictions, pose_labels) +tf.add_to_collection(slim.losses.LOSSES_COLLECTION, pose_loss) # Letting TF-Slim know about the additional loss. + +# The following two lines have the same effect: +total_loss1 = classification_loss + sum_of_squares_loss + pose_loss +total_loss2 = losses.GetTotalLoss() +``` +In this example, we can again either produce the total loss function manually +or let TF-Slim know about the additional loss and let TF-Slim handle the losses. + + +## Putting the Pieces Together + +By combining TF-Slim Variables, Operations and scopes, we can write a normally +very complex network with very few lines of code. For example, the entire +[VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) architecture can be +defined with just the following snippet: + +```python +with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + net = slim.ops.repeat_op(1, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') + net = slim.ops.max_pool(net, [2, 2], scope='pool1') + net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') + net = slim.ops.max_pool(net, [2, 2], scope='pool2') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') + net = slim.ops.max_pool(net, [2, 2], scope='pool3') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') + net = slim.ops.max_pool(net, [2, 2], scope='pool4') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') + net = slim.ops.max_pool(net, [2, 2], scope='pool5') + net = slim.ops.flatten(net, scope='flatten5') + net = slim.ops.fc(net, 4096, scope='fc6') + net = slim.ops.dropout(net, 0.5, scope='dropout6') + net = slim.ops.fc(net, 4096, scope='fc7') + net = slim.ops.dropout(net, 0.5, scope='dropout7') + net = slim.ops.fc(net, 1000, activation=None, scope='fc8') +return net +``` + +## Re-using previously defined network architectures and pre-trained models. + +### Brief Recap on Restoring Variables from a Checkpoint + +After a model has been trained, it can be restored using `tf.train.Saver()` +which restores `Variables` from a given checkpoint. For many cases, +`tf.train.Saver()` provides a simple mechanism to restore all or just a +few variables. + +```python +# Create some variables. +v1 = tf.Variable(..., name="v1") +v2 = tf.Variable(..., name="v2") +... +# Add ops to restore all the variables. +restorer = tf.train.Saver() + +# Add ops to restore some variables. +restorer = tf.train.Saver([v1, v2]) + +# Later, launch the model, use the saver to restore variables from disk, and +# do some work with the model. +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + # Do some work with the model + ... +``` + +See [Restoring Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#restoring-variables) +and +[Choosing which Variables to Save and Restore](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#choosing-which-variables-to-save-and-restore) +sections of the [Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html) +page for more details. + +### Using slim.variables to Track which Variables need to be Restored + +It is often desirable to fine-tune a pre-trained model on an entirely new +dataset or even a new task. In these situations, one must specify which layers +of the model should be reused (and consequently loaded from a checkpoint) +and which layers are new. Indicating which variables or layers should be +restored is a process that quickly becomes cumbersome when done manually. + +To help keep track of which variables to restore, `slim.variables` provides a +`restore` argument when creating each Variable. By default, all variables are +marked as `restore=True`, which results in all variables defined by the model +being restored. + +```python +# Create some variables. +v1 = slim.variables.variable(name="v1", ..., restore=False) +v2 = slim.variables.variable(name="v2", ...) # By default restore=True +... +# Get list of variables to restore (which contains only 'v2') +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) +restorer = tf.train.Saver(variables_to_restore) +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + # Do some work with the model + ... +``` + +Additionally, every layer in `slim.ops` that creates slim.variables (such as +`slim.ops.conv2d`, `slim.ops.fc`, `slim.ops.batch_norm`) also has a `restore` +argument which controls whether the variables created by that layer should be +restored or not. + + +```python +# Create a small network. +net = slim.ops.conv2d(images, 32, [7, 7], stride=2, scope='conv1') +net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2') +net = slim.ops.conv2d(net, 128, [3, 3], scope='conv3') +net = slim.ops.max_pool(net, [3, 3], stride=2, scope='pool3') +net = slim.ops.flatten(net) +net = slim.ops.fc(net, 10, scope='logits', restore=False) +... + +# VARIABLES_TO_RESTORE would contain the 'weights' and 'bias' defined by 'conv1' +# 'conv2' and 'conv3' but not the ones defined by 'logits' +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) + +# Create a restorer that would restore only the needed variables. +restorer = tf.train.Saver(variables_to_restore) + +# Create a saver that would save all the variables (including 'logits'). +saver = tf.train.Saver() +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + + # Do some work with the model + ... + saver.save(sess, "/tmp/new_model.ckpt") +``` + +Note: When restoring variables from a checkpoint, the `Saver` locates the +variable names in a checkpoint file and maps them to variables in the current +graph. Above, we created a saver by passing to it a list of variables. In this +case, the names of the variables to locate in the checkpoint file were +implicitly obtained from each provided variable's `var.op.name`. + +This works well when the variable names in the checkpoint file match those in +the graph. However, sometimes, we want to restore a model from a checkpoint +whose variables have different names those in the current graph. In this case, +we must provide the `Saver` a dictionary that maps from each checkpoint variable +name to each graph variable. Consider the following example where the checkpoint +variables names are obtained via a simple function: + +```python +# Assuming than 'conv1/weights' should be restored from 'vgg16/conv1/weights' +def name_in_checkpoint(var): + return 'vgg16/' + var.op.name + +# Assuming than 'conv1/weights' and 'conv1/bias' should be restored from 'conv1/params1' and 'conv1/params2' +def name_in_checkpoint(var): + if "weights" in var.op.name: + return var.op.name.replace("weights", "params1") + if "bias" in var.op.name: + return var.op.name.replace("bias", "params2") + +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) +variables_to_restore = {name_in_checkpoint(var):var for var in variables_to_restore} +restorer = tf.train.Saver(variables_to_restore) +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") +``` + +### Reusing the VGG16 network defined in TF-Slim on a different task, i.e. PASCAL-VOC. + +Assuming one have already a pre-trained VGG16 model, one just need to replace +the last layer `fc8` with a new layer `fc8_pascal` and use `restore=False`. + +```python +def vgg16_pascal(inputs): + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') + net = slim.ops.max_pool(net, [2, 2], scope='pool1') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') + net = slim.ops.max_pool(net, [2, 2], scope='pool2') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') + net = slim.ops.max_pool(net, [2, 2], scope='pool3') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') + net = slim.ops.max_pool(net, [2, 2], scope='pool4') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') + net = slim.ops.max_pool(net, [2, 2], scope='pool5') + net = slim.ops.flatten(net, scope='flatten5') + net = slim.ops.fc(net, 4096, scope='fc6') + net = slim.ops.dropout(net, 0.5, scope='dropout6') + net = slim.ops.fc(net, 4096, scope='fc7') + net = slim.ops.dropout(net, 0.5, scope='dropout7') + # To reuse vgg16 on PASCAL-VOC, just change the last layer. + net = slim.ops.fc(net, 21, activation=None, scope='fc8_pascal', restore=False) + return net +``` + +## Authors + +Sergio Guadarrama and Nathan Silberman diff --git a/inception/slim/inception_model.py b/inception/slim/inception_model.py new file mode 100644 index 000000000..509666c91 --- /dev/null +++ b/inception/slim/inception_model.py @@ -0,0 +1,329 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Inception-v3 expressed in TensorFlow-Slim. + + Usage: + + # Parameters for BatchNorm. + batch_norm_params = { + # Decay for the batch_norm moving averages. + 'decay': BATCHNORM_MOVING_AVERAGE_DECAY, + # epsilon to prevent 0s in variance. + 'epsilon': 0.001, + } + # Set weight_decay for weights in Conv and FC layers. + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): + with slim.arg_scope([slim.ops.conv2d], + stddev=0.1, + activation=tf.nn.relu, + batch_norm_params=batch_norm_params): + # Force all Variables to reside on the CPU. + with slim.arg_scope([slim.variables.variable], device='/cpu:0'): + logits, endpoints = slim.inception.inception_v3( + images, + dropout_keep_prob=0.8, + num_classes=num_classes, + is_training=for_training, + restore_logits=restore_logits, + scope=scope) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception.slim import ops +from inception.slim import scopes + + +def inception_v3(inputs, + dropout_keep_prob=0.8, + num_classes=1000, + is_training=True, + restore_logits=True, + scope=''): + """Latest Inception from http://arxiv.org/abs/1512.00567. + + "Rethinking the Inception Architecture for Computer Vision" + + Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, + Zbigniew Wojna + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + dropout_keep_prob: dropout keep_prob. + num_classes: number of predicted classes. + is_training: whether is training or not. + restore_logits: whether or not the logits layers should be restored. + Useful for fine-tuning a model with different num_classes. + scope: Optional scope for op_scope. + + Returns: + a list containing 'logits', 'aux_logits' Tensors. + """ + # end_points will collect relevant activations for external use, for example + # summaries or losses. + end_points = {} + with tf.op_scope([inputs], scope, 'inception_v3'): + with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout], + is_training=is_training): + with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], + stride=1, padding='VALID'): + # 299 x 299 x 3 + end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2, + scope='conv0') + # 149 x 149 x 32 + end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3], + scope='conv1') + # 147 x 147 x 32 + end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3], + padding='SAME', scope='conv2') + # 147 x 147 x 64 + end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3], + stride=2, scope='pool1') + # 73 x 73 x 64 + end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1], + scope='conv3') + # 71 x 71 x 80. + end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3], + scope='conv4') + # 69 x 69 x 192. + end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3], + stride=2, scope='pool2') + # 35 x 35 x 192. + net = end_points['pool2'] + # Inception blocks + with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], + stride=1, padding='SAME'): + # mixed: 35 x 35 x 256. + with tf.variable_scope('mixed_35x35x256a'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 64, [1, 1]) + with tf.variable_scope('branch5x5'): + branch5x5 = ops.conv2d(net, 48, [1, 1]) + branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 32, [1, 1]) + net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool]) + end_points['mixed_35x35x256a'] = net + # mixed_1: 35 x 35 x 288. + with tf.variable_scope('mixed_35x35x288a'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 64, [1, 1]) + with tf.variable_scope('branch5x5'): + branch5x5 = ops.conv2d(net, 48, [1, 1]) + branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) + net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool]) + end_points['mixed_35x35x288a'] = net + # mixed_2: 35 x 35 x 288. + with tf.variable_scope('mixed_35x35x288b'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 64, [1, 1]) + with tf.variable_scope('branch5x5'): + branch5x5 = ops.conv2d(net, 48, [1, 1]) + branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) + net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool]) + end_points['mixed_35x35x288b'] = net + # mixed_3: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768a'): + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID') + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3], + stride=2, padding='VALID') + with tf.variable_scope('branch_pool'): + branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') + net = tf.concat(3, [branch3x3, branch3x3dbl, branch_pool]) + end_points['mixed_17x17x768a'] = net + # mixed4: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768b'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 128, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 128, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 128, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768b'] = net + # mixed_5: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768c'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 160, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 160, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768c'] = net + # mixed_6: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768d'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 160, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 160, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768d'] = net + # mixed_7: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768e'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 192, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 192, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 192, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768e'] = net + # Auxiliary Head logits + aux_logits = tf.identity(end_points['mixed_17x17x768e']) + with tf.variable_scope('aux_logits'): + aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3, + padding='VALID') + aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj') + # Shape of feature map before the final layer. + shape = aux_logits.get_shape() + aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01, + padding='VALID') + aux_logits = ops.flatten(aux_logits) + aux_logits = ops.fc(aux_logits, num_classes, activation=None, + stddev=0.001, restore=restore_logits) + end_points['aux_logits'] = aux_logits + # mixed_8: 17 x 17 x 1280. + with tf.variable_scope('mixed_17x17x1280a'): + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 192, [1, 1]) + branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2, + padding='VALID') + with tf.variable_scope('branch7x7x3'): + branch7x7x3 = ops.conv2d(net, 192, [1, 1]) + branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7]) + branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1]) + branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3], + stride=2, padding='VALID') + with tf.variable_scope('branch_pool'): + branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') + net = tf.concat(3, [branch3x3, branch7x7x3, branch_pool]) + end_points['mixed_17x17x1280a'] = net + # mixed_9: 8 x 8 x 2048. + with tf.variable_scope('mixed_8x8x2048a'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 320, [1, 1]) + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 384, [1, 1]) + branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]), + ops.conv2d(branch3x3, 384, [3, 1])]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 448, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) + branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]), + ops.conv2d(branch3x3dbl, 384, [3, 1])]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool]) + end_points['mixed_8x8x2048a'] = net + # mixed_10: 8 x 8 x 2048. + with tf.variable_scope('mixed_8x8x2048b'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 320, [1, 1]) + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 384, [1, 1]) + branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]), + ops.conv2d(branch3x3, 384, [3, 1])]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 448, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) + branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]), + ops.conv2d(branch3x3dbl, 384, [3, 1])]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool]) + end_points['mixed_8x8x2048b'] = net + # Final pooling and prediction + with tf.variable_scope('logits'): + shape = net.get_shape() + net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool') + # 1 x 1 x 2048 + net = ops.dropout(net, dropout_keep_prob, scope='dropout') + net = ops.flatten(net, scope='flatten') + # 2048 + logits = ops.fc(net, num_classes, activation=None, scope='logits', + restore=restore_logits) + # 1000 + end_points['logits'] = logits + end_points['predictions'] = tf.nn.softmax(logits, name='predictions') + return logits, end_points + diff --git a/inception/slim/inception_test.py b/inception/slim/inception_test.py new file mode 100644 index 000000000..ca4d91aef --- /dev/null +++ b/inception/slim/inception_test.py @@ -0,0 +1,119 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.inception.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception.slim import inception_model as inception + + +class InceptionTest(tf.test.TestCase): + + def testBuildLogits(self): + batch_size = 5 + height, width = 299, 299 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + logits, _ = inception.inception_v3(inputs, num_classes) + self.assertTrue(logits.op.name.startswith('logits')) + self.assertListEqual(logits.get_shape().as_list(), + [batch_size, num_classes]) + + def testBuildEndPoints(self): + batch_size = 5 + height, width = 299, 299 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + _, end_points = inception.inception_v3(inputs, num_classes) + self.assertTrue('logits' in end_points) + logits = end_points['logits'] + self.assertListEqual(logits.get_shape().as_list(), + [batch_size, num_classes]) + self.assertTrue('aux_logits' in end_points) + aux_logits = end_points['aux_logits'] + self.assertListEqual(aux_logits.get_shape().as_list(), + [batch_size, num_classes]) + pre_pool = end_points['mixed_8x8x2048b'] + self.assertListEqual(pre_pool.get_shape().as_list(), + [batch_size, 8, 8, 2048]) + + def testHalfSizeImages(self): + batch_size = 5 + height, width = 150, 150 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + logits, end_points = inception.inception_v3(inputs, num_classes) + self.assertTrue(logits.op.name.startswith('logits')) + self.assertListEqual(logits.get_shape().as_list(), + [batch_size, num_classes]) + pre_pool = end_points['mixed_8x8x2048b'] + self.assertListEqual(pre_pool.get_shape().as_list(), + [batch_size, 3, 3, 2048]) + + def testUnknowBatchSize(self): + batch_size = 1 + height, width = 299, 299 + num_classes = 1000 + with self.test_session() as sess: + inputs = tf.placeholder(tf.float32, (None, height, width, 3)) + logits, _ = inception.inception_v3(inputs, num_classes) + self.assertTrue(logits.op.name.startswith('logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, num_classes]) + images = tf.random_uniform((batch_size, height, width, 3)) + sess.run(tf.initialize_all_variables()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch_size, num_classes)) + + def testEvaluation(self): + batch_size = 2 + height, width = 299, 299 + num_classes = 1000 + with self.test_session() as sess: + eval_inputs = tf.random_uniform((batch_size, height, width, 3)) + logits, _ = inception.inception_v3(eval_inputs, num_classes, + is_training=False) + predictions = tf.argmax(logits, 1) + sess.run(tf.initialize_all_variables()) + output = sess.run(predictions) + self.assertEquals(output.shape, (batch_size,)) + + def testTrainEvalWithReuse(self): + train_batch_size = 5 + eval_batch_size = 2 + height, width = 150, 150 + num_classes = 1000 + with self.test_session() as sess: + train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) + inception.inception_v3(train_inputs, num_classes) + tf.get_variable_scope().reuse_variables() + eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) + logits, _ = inception.inception_v3(eval_inputs, num_classes, + is_training=False) + predictions = tf.argmax(logits, 1) + sess.run(tf.initialize_all_variables()) + output = sess.run(predictions) + self.assertEquals(output.shape, (eval_batch_size,)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/inception/slim/losses.py b/inception/slim/losses.py new file mode 100644 index 000000000..7027c0498 --- /dev/null +++ b/inception/slim/losses.py @@ -0,0 +1,110 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for various Neural Network TensorFlow losses. + + All the losses defined here add themselves to the LOSSES_COLLECTION + collection. + + l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso. + l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay. + cross_entropy_loss: Define a cross entropy loss using + softmax_cross_entropy_with_logits. Useful for classification. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +# In order to gather all losses in a network, the user should use this +# key for get_collection, i.e: +# losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) +LOSSES_COLLECTION = '_losses' + + +def l1_loss(tensor, weight=1.0, scope=None): + """Define a L1Loss, useful for regularize, i.e. lasso. + + Args: + tensor: tensor to regularize. + weight: scale the loss by this factor. + scope: Optional scope for op_scope. + + Returns: + the L1 loss op. + """ + with tf.op_scope([tensor], scope, 'L1Loss'): + weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='loss_weight') + loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss + + +def l2_loss(tensor, weight=1.0, scope=None): + """Define a L2Loss, useful for regularize, i.e. weight decay. + + Args: + tensor: tensor to regularize. + weight: an optional weight to modulate the loss. + scope: Optional scope for op_scope. + + Returns: + the L2 loss op. + """ + with tf.op_scope([tensor], scope, 'L2Loss'): + weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='loss_weight') + loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss + + +def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0, + weight=1.0, scope=None): + """Define a Cross Entropy loss using softmax_cross_entropy_with_logits. + + It can scale the loss by weight factor, and smooth the labels. + + Args: + logits: [batch_size, num_classes] logits outputs of the network . + one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels. + label_smoothing: if greater than 0 then smooth the labels. + weight: scale the loss by this factor. + scope: Optional scope for op_scope. + + Returns: + A tensor with the softmax_cross_entropy loss. + """ + logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape()) + with tf.op_scope([logits, one_hot_labels], scope, 'CrossEntropyLoss'): + num_classes = one_hot_labels.get_shape()[-1].value + one_hot_labels = tf.cast(one_hot_labels, logits.dtype) + if label_smoothing > 0: + smooth_positives = 1.0 - label_smoothing + smooth_negatives = label_smoothing / num_classes + one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives + cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, + one_hot_labels, + name='xentropy') + weight = tf.convert_to_tensor(weight, + dtype=logits.dtype.base_dtype, + name='loss_weight') + loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss diff --git a/inception/slim/losses_test.py b/inception/slim/losses_test.py new file mode 100644 index 000000000..4468bb936 --- /dev/null +++ b/inception/slim/losses_test.py @@ -0,0 +1,89 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.losses.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import tensorflow as tf + +from inception.slim import losses + + +class LossesTest(tf.test.TestCase): + + def testL1Loss(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + weights = tf.constant(1.0, shape=shape) + wd = 0.01 + loss = losses.l1_loss(weights, wd) + self.assertEquals(loss.op.name, 'L1Loss/value') + self.assertAlmostEqual(loss.eval(), num_elem * wd, 5) + + def testL2Loss(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + weights = tf.constant(1.0, shape=shape) + wd = 0.01 + loss = losses.l2_loss(weights, wd) + self.assertEquals(loss.op.name, 'L2Loss/value') + self.assertAlmostEqual(loss.eval(), num_elem * wd / 2, 5) + + +class CrossEntropyLossTest(tf.test.TestCase): + + def testCrossEntropyLossAllCorrect(self): + with self.test_session(): + logits = tf.constant([[10.0, 0.0, 0.0], + [0.0, 10.0, 0.0], + [0.0, 0.0, 10.0]]) + labels = tf.constant([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + loss = losses.cross_entropy_loss(logits, labels) + self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') + self.assertAlmostEqual(loss.eval(), 0.0, 3) + + def testCrossEntropyLossAllWrong(self): + with self.test_session(): + logits = tf.constant([[10.0, 0.0, 0.0], + [0.0, 10.0, 0.0], + [0.0, 0.0, 10.0]]) + labels = tf.constant([[0, 0, 1], + [1, 0, 0], + [0, 1, 0]]) + loss = losses.cross_entropy_loss(logits, labels) + self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') + self.assertAlmostEqual(loss.eval(), 10.0, 3) + + def testCrossEntropyLossAllWrongWithWeight(self): + with self.test_session(): + logits = tf.constant([[10.0, 0.0, 0.0], + [0.0, 10.0, 0.0], + [0.0, 0.0, 10.0]]) + labels = tf.constant([[0, 0, 1], + [1, 0, 0], + [0, 1, 0]]) + loss = losses.cross_entropy_loss(logits, labels, weight=0.5) + self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') + self.assertAlmostEqual(loss.eval(), 5.0, 3) + +if __name__ == '__main__': + tf.test.main() diff --git a/inception/slim/ops.py b/inception/slim/ops.py new file mode 100644 index 000000000..6ed6d79a4 --- /dev/null +++ b/inception/slim/ops.py @@ -0,0 +1,418 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for typical Neural Network TensorFlow layers. + + Additionally it maintains a collection with update_ops that need to be + updated after the ops have been computed, for exmaple to update moving means + and moving variances of batch_norm. + + Ops that have different behavior during training or eval have an is_training + parameter. Additionally Ops that contain variables.variable have a trainable + parameter, which control if the ops variables are trainable or not. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import tensorflow as tf + +from tensorflow.python.training import moving_averages + +from inception.slim import losses +from inception.slim import scopes +from inception.slim import variables + +# Used to keep the update ops done by batch_norm. +UPDATE_OPS_COLLECTION = '_update_ops_' + + +@scopes.add_arg_scope +def batch_norm(inputs, + decay=0.999, + scale=False, + epsilon=0.001, + moving_vars='moving_vars', + activation=None, + is_training=True, + trainable=True, + restore=True, + scope=None): + """Adds a Batch Normalization layer. + + Args: + inputs: a tensor of size [batch_size, height, width, channels] + or [batch_size, channels]. + decay: decay for the moving average. + scale: If True, multiply by gamma. If False, gamma is + not used. When the next layer is linear (also e.g. ReLU), this can be + disabled since the scaling can be done by the next layer. + epsilon: small float added to variance to avoid dividing by zero. + moving_vars: collection to store the moving_mean and moving_variance. + activation: activation function. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_op_scope. + + Returns: + a tensor representing the output of the operation. + + """ + inputs_shape = inputs.get_shape() + with tf.variable_op_scope([inputs], scope, 'BatchNorm'): + axis = range(len(inputs_shape) - 1) + params_shape = inputs_shape[-1:] + with scopes.arg_scope([variables.variable], restore=restore): + # Allocate parameters for the beta and gamma of the normalization. + beta = variables.variable('beta', + params_shape, + initializer=tf.zeros_initializer, + trainable=trainable) + if scale: + gamma = variables.variable('gamma', + params_shape, + initializer=tf.ones, + trainable=trainable) + else: + gamma = None + # Create moving_mean and moving_variance add them to moving_vars and + # GraphKeys.MOVING_AVERAGE_VARIABLES collections. + with scopes.arg_scope([variables.variable], trainable=False, + collections=[ + moving_vars, + tf.GraphKeys.MOVING_AVERAGE_VARIABLES]): + moving_mean = variables.variable('moving_mean', + params_shape, + initializer=tf.zeros_initializer) + moving_variance = variables.variable('moving_variance', + params_shape, + initializer=tf.ones) + if is_training: + # Calculate the moments based on the individual batch. + mean, variance = tf.nn.moments(inputs, axis) + + update_moving_mean = moving_averages.assign_moving_average( + moving_mean, mean, decay) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) + update_moving_variance = moving_averages.assign_moving_average( + moving_variance, variance, decay) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) + else: + # Just use the moving_mean and moving_variance. + mean = moving_mean + variance = moving_variance + # Normalize the activations. + outputs = tf.nn.batch_normalization( + inputs, mean, variance, beta, gamma, epsilon) + outputs.set_shape(inputs.get_shape()) + if activation: + outputs = activation(outputs) + return outputs + + +@scopes.add_arg_scope +def conv2d(inputs, + num_filters_out, + kernel_size, + stride=1, + padding='SAME', + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None): + """Adds a 2D convolution followed by an optional batch_norm layer. + + conv2d creates a variable called 'weights', representing the convolutional + kernel, that is convolved with the input. If `batch_norm_params` is None, a + second variable called 'biases' is added to the result of the convolution + operation. + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + num_filters_out: the number of output filters. + kernel_size: a 2-D list comprising of the height and width of the filters. + stride: the stride in height and width of the convolution. + padding: one of 'VALID' or 'SAME'. + activation: activation function. + stddev: standard deviation of the truncated guassian weight distribution. + bias: the initial value of the biases. + weight_decay: the weight decay. + batch_norm_params: parameters for the batch_norm. If is None don't use it. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_op_scope. + + Returns: + a tensor representing the output of the operation. + + Raises: + ValueError: if 'kernel_size' is not a 2-D list. + """ + if len(kernel_size) != 2: + raise ValueError('kernel_size must be a 2-D list.') + with tf.variable_op_scope([inputs], scope, 'Conv'): + num_filters_in = inputs.get_shape()[-1] + weights_shape = [kernel_size[0], kernel_size[1], + num_filters_in, num_filters_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = lambda t: losses.l2_loss(t, weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + conv = tf.nn.conv2d(inputs, weights, [1, stride, stride, 1], + padding=padding) + if batch_norm_params is not None: + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(conv, **batch_norm_params) + else: + bias_shape = [num_filters_out,] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.bias_add(conv, biases) + if activation: + outputs = activation(outputs) + return outputs + + +@scopes.add_arg_scope +def fc(inputs, + num_units_out, + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None): + """Adds a fully connected layer followed by an optional batch_norm layer. + + FC creates a variable called 'weights', representing the fully connected + weight matrix, that is multiplied by the input. If `batch_norm` is None, a + second variable called 'biases' is added to the result of the initial + vector-matrix multiplication. + + Args: + inputs: a [B x N] tensor where B is the batch size and N is the number of + input units in the layer. + num_units_out: the number of output units in the layer. + activation: activation function. + stddev: the standard deviation for the weights. + bias: the initial value of the biases. + weight_decay: the weight decay. + batch_norm_params: parameters for the batch_norm. If is None don't use it. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_op_scope. + + Returns: + the tensor variable representing the result of the series of operations. + """ + with tf.variable_op_scope([inputs], scope, 'FC'): + num_units_in = inputs.get_shape()[1] + weights_shape = [num_units_in, num_units_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = lambda t: losses.l2_loss(t, weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + if batch_norm_params is not None: + outputs = tf.matmul(inputs, weights) + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(outputs, **batch_norm_params) + else: + bias_shape = [num_units_out,] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.xw_plus_b(inputs, weights, biases) + if activation: + outputs = activation(outputs) + return outputs + + +def one_hot_encoding(labels, num_classes, scope=None): + """Transform numeric labels into onehot_labels. + + Args: + labels: [batch_size] target labels. + num_classes: total number of classes. + scope: Optional scope for op_scope. + Returns: + one hot encoding of the labels. + """ + with tf.op_scope([labels], scope, 'OneHotEncoding'): + batch_size = labels.get_shape()[0] + indices = tf.expand_dims(tf.range(0, batch_size), 1) + labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) + concated = tf.concat(1, [indices, labels]) + onehot_labels = tf.sparse_to_dense( + concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) + onehot_labels.set_shape([batch_size, num_classes]) + return onehot_labels + + +@scopes.add_arg_scope +def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): + """Adds a Max Pooling layer. + + It is assumed by the wrapper that the pooling is only done per image and not + in depth or batch. + + Args: + inputs: a tensor of size [batch_size, height, width, depth]. + kernel_size: the size of the pooling kernel over which the op is computed. + stride: the stride in height and width of the convolution. + padding: the padding method, either 'VALID' or 'SAME'. + scope: Optional scope for op_scope. + + Returns: + a tensor representing the results of the pooling operation. + Raises: + ValueError: if 'kernel_size' is not a 2-D list + """ + if len(kernel_size) != 2: + raise ValueError('kernel_size must be a 2-D list.') + with tf.op_scope([inputs], scope, 'MaxPool'): + return tf.nn.max_pool(inputs, + ksize=[1, kernel_size[0], kernel_size[1], 1], + strides=[1, stride, stride, 1], + padding=padding) + + +@scopes.add_arg_scope +def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): + """Adds a Avg Pooling layer. + + It is assumed by the wrapper that the pooling is only done per image and not + in depth or batch. + + Args: + inputs: a tensor of size [batch_size, height, width, depth]. + kernel_size: the size of the pooling kernel over which the op is computed. + stride: the stride in height and width of the convolution. + padding: the padding method, either 'VALID' or 'SAME'. + scope: Optional scope for op_scope. + + Returns: + a tensor representing the results of the pooling operation. + Raises: + ValueError: if 'kernel_size' is not a 2-D list + """ + if len(kernel_size) != 2: + raise ValueError('kernel_size must be a 2-D list.') + with tf.op_scope([inputs], scope, 'AvgPool'): + return tf.nn.avg_pool(inputs, + ksize=[1, kernel_size[0], kernel_size[1], 1], + strides=[1, stride, stride, 1], + padding=padding) + + +@scopes.add_arg_scope +def dropout(inputs, keep_prob=0.5, is_training=True, scope=None): + """Returns a dropout layer applied to the input. + + Args: + inputs: the tensor to pass to the Dropout layer. + keep_prob: the probability of dropping each input unit. + is_training: whether or not the model is in training mode. If so, dropout is + applied and values scaled. Otherwise, inputs is returned. + scope: Optional scope for op_scope. + + Returns: + a tensor representing the output of the operation. + """ + if is_training and keep_prob > 0: + with tf.op_scope([inputs], scope, 'Dropout'): + return tf.nn.dropout(inputs, keep_prob) + else: + return inputs + + +def flatten(inputs, scope=None): + """Flattens the input while maintaining the batch_size. + + Assumes that the first dimension represents the batch. + + Args: + inputs: a tensor of size [batch_size, ...]. + scope: Optional scope for op_scope. + + Returns: + a flattened tensor with shape [batch_size, k]. + Raises: + ValueError: if inputs.shape is wrong. + """ + if len(inputs.get_shape()) < 2: + raise ValueError('Inputs must be have a least 2 dimensions') + dims = inputs.get_shape()[1:] + k = dims.num_elements() + with tf.op_scope([inputs], scope, 'Flatten'): + return tf.reshape(inputs, [-1, k]) + + +def repeat_op(repetitions, inputs, op, *args, **kwargs): + """Build a sequential Tower starting from inputs by using an op repeatedly. + + It creates new scopes for each operation by increasing the counter. + Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') + it will repeat the given op under the following variable_scopes: + conv1/Conv + conv1/Conv_1 + conv1/Conv_2 + + Args: + repetitions: number or repetitions. + inputs: a tensor of size [batch_size, height, width, channels]. + op: an operation. + *args: args for the op. + **kwargs: kwargs for the op. + + Returns: + a tensor result of applying the operation op, num times. + Raises: + ValueError: if the op is unknown or wrong. + """ + scope = kwargs.pop('scope', None) + with tf.variable_op_scope([inputs], scope, 'RepeatOp'): + tower = inputs + for _ in range(repetitions): + tower = op(tower, *args, **kwargs) + return tower diff --git a/inception/slim/ops_test.py b/inception/slim/ops_test.py new file mode 100644 index 000000000..363f5c292 --- /dev/null +++ b/inception/slim/ops_test.py @@ -0,0 +1,510 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.ops.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import numpy as np +import tensorflow as tf + +from tensorflow.python.ops import control_flow_ops + +from inception.slim import losses +from inception.slim import ops +from inception.slim import scopes +from inception.slim import variables + + +class ConvTest(tf.test.TestCase): + + def testCreateConv(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3]) + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateConvCreatesWeightsAndBiasesVars(self): + height, width = 3, 3 + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.test_session(): + self.assertFalse(variables.get_variables('conv1/weights')) + self.assertFalse(variables.get_variables('conv1/biases')) + ops.conv2d(images, 32, [3, 3], scope='conv1') + self.assertTrue(variables.get_variables('conv1/weights')) + self.assertTrue(variables.get_variables('conv1/biases')) + + def testCreateConvWithScope(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEquals(output.op.name, 'conv1/Relu') + + def testCreateConvWithoutActivation(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3], activation=None) + self.assertEquals(output.op.name, 'Conv/BiasAdd') + + def testCreateConvValid(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3], padding='VALID') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32]) + + def testCreateConvWithWD(self): + height, width = 3, 3 + with self.test_session() as sess: + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.conv2d(images, 32, [3, 3], weight_decay=0.01) + wd = tf.get_collection(losses.LOSSES_COLLECTION)[0] + self.assertEquals(wd.op.name, 'Conv/weights/Regularizer/L2Loss/value') + sess.run(tf.initialize_all_variables()) + self.assertTrue(sess.run(wd) <= 0.01) + + def testReuseConvWithWD(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1') + self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1) + tf.get_variable_scope().reuse_variables() + ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1') + self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1) + + def testConvWithBatchNorm(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + with scopes.arg_scope([ops.conv2d], batch_norm_params={}): + net = ops.conv2d(images, 32, [3, 3], scope='conv1') + net = ops.conv2d(net, 32, [3, 3], scope='conv2') + self.assertEquals(len(tf.get_collection('moving_vars')), 4) + self.assertEquals(len(variables.get_variables('conv1/BatchNorm')), 3) + self.assertEquals(len(variables.get_variables('conv2/BatchNorm')), 3) + + +class FCTest(tf.test.TestCase): + + def testCreateFC(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + output = ops.fc(inputs, 32) + self.assertEquals(output.op.name, 'FC/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, 32]) + + def testCreateFCWithScope(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + output = ops.fc(inputs, 32, scope='fc1') + self.assertEquals(output.op.name, 'fc1/Relu') + + def testCreateFcCreatesWeightsAndBiasesVars(self): + height, width = 3, 3 + inputs = tf.random_uniform((5, height * width * 3), seed=1) + with self.test_session(): + self.assertFalse(variables.get_variables('fc1/weights')) + self.assertFalse(variables.get_variables('fc1/biases')) + ops.fc(inputs, 32, scope='fc1') + self.assertTrue(variables.get_variables('fc1/weights')) + self.assertTrue(variables.get_variables('fc1/biases')) + + def testReuseVars(self): + height, width = 3, 3 + inputs = tf.random_uniform((5, height * width * 3), seed=1) + with self.test_session(): + ops.fc(inputs, 32, scope='fc1') + self.assertEquals(len(variables.get_variables('fc1')), 2) + tf.get_variable_scope().reuse_variables() + ops.fc(inputs, 32, scope='fc1') + self.assertEquals(len(variables.get_variables('fc1')), 2) + + def testNonReuseVars(self): + height, width = 3, 3 + inputs = tf.random_uniform((5, height * width * 3), seed=1) + with self.test_session(): + ops.fc(inputs, 32) + self.assertEquals(len(variables.get_variables('FC')), 2) + ops.fc(inputs, 32) + self.assertEquals(len(variables.get_variables('FC')), 4) + + def testCreateFCWithoutActivation(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + output = ops.fc(inputs, 32, activation=None) + self.assertEquals(output.op.name, 'FC/xw_plus_b') + + def testCreateFCWithWD(self): + height, width = 3, 3 + with self.test_session() as sess: + inputs = tf.random_uniform((5, height * width * 3), seed=1) + ops.fc(inputs, 32, weight_decay=0.01) + wd = tf.get_collection(losses.LOSSES_COLLECTION)[0] + self.assertEquals(wd.op.name, 'FC/weights/Regularizer/L2Loss/value') + sess.run(tf.initialize_all_variables()) + self.assertTrue(sess.run(wd) <= 0.01) + + def testReuseFCWithWD(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + ops.fc(inputs, 32, weight_decay=0.01, scope='fc') + self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1) + tf.get_variable_scope().reuse_variables() + ops.fc(inputs, 32, weight_decay=0.01, scope='fc') + self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1) + + def testFCWithBatchNorm(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height * width * 3), seed=1) + with scopes.arg_scope([ops.fc], batch_norm_params={}): + net = ops.fc(images, 32, scope='fc1') + net = ops.fc(net, 32, scope='fc2') + self.assertEquals(len(tf.get_collection('moving_vars')), 4) + self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3) + self.assertEquals(len(variables.get_variables('fc2/BatchNorm')), 3) + + +class MaxPoolTest(tf.test.TestCase): + + def testCreateMaxPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3]) + self.assertEquals(output.op.name, 'MaxPool/MaxPool') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + def testCreateMaxPoolWithScope(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3], scope='pool1') + self.assertEquals(output.op.name, 'pool1/MaxPool') + + def testCreateMaxPoolSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3], padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3]) + + def testCreateMaxPoolStrideSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3], stride=1, padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) + + +class AvgPoolTest(tf.test.TestCase): + + def testCreateAvgPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3]) + self.assertEquals(output.op.name, 'AvgPool/AvgPool') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + def testCreateAvgPoolWithScope(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3], scope='pool1') + self.assertEquals(output.op.name, 'pool1/AvgPool') + + def testCreateAvgPoolSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3], padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3]) + + def testCreateAvgPoolStrideSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) + + +class OneHotEncodingTest(tf.test.TestCase): + + def testOneHotEncodingCreate(self): + with self.test_session(): + labels = tf.constant([0, 1, 2]) + output = ops.one_hot_encoding(labels, num_classes=3) + self.assertEquals(output.op.name, 'OneHotEncoding/SparseToDense') + self.assertListEqual(output.get_shape().as_list(), [3, 3]) + + def testOneHotEncoding(self): + with self.test_session(): + labels = tf.constant([0, 1, 2]) + one_hot_labels = tf.constant([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + output = ops.one_hot_encoding(labels, num_classes=3) + self.assertAllClose(output.eval(), one_hot_labels.eval()) + + +class DropoutTest(tf.test.TestCase): + + def testCreateDropout(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.dropout(images) + self.assertEquals(output.op.name, 'Dropout/dropout/mul_1') + output.get_shape().assert_is_compatible_with(images.get_shape()) + + def testCreateDropoutNoTraining(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1, name='images') + output = ops.dropout(images, is_training=False) + self.assertEquals(output, images) + + +class FlattenTest(tf.test.TestCase): + + def testFlatten4D(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1, name='images') + output = ops.flatten(images) + self.assertEquals(output.get_shape().num_elements(), + images.get_shape().num_elements()) + self.assertEqual(output.get_shape()[0], images.get_shape()[0]) + + def testFlatten3D(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width), seed=1, name='images') + output = ops.flatten(images) + self.assertEquals(output.get_shape().num_elements(), + images.get_shape().num_elements()) + self.assertEqual(output.get_shape()[0], images.get_shape()[0]) + + def testFlattenBatchSize(self): + height, width = 3, 3 + with self.test_session() as sess: + images = tf.random_uniform((5, height, width, 3), seed=1, name='images') + inputs = tf.placeholder(tf.int32, (None, height, width, 3)) + output = ops.flatten(inputs) + self.assertEquals(output.get_shape().as_list(), + [None, height * width * 3]) + output = sess.run(output, {inputs: images.eval()}) + self.assertEquals(output.size, + images.get_shape().num_elements()) + self.assertEqual(output.shape[0], images.get_shape()[0]) + + +class BatchNormTest(tf.test.TestCase): + + def testCreateOp(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.batch_norm(images) + self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm')) + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) + + def testCreateVariables(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scale=True) + beta = variables.get_variables_by_name('beta')[0] + gamma = variables.get_variables_by_name('gamma')[0] + self.assertEquals(beta.op.name, 'BatchNorm/beta') + self.assertEquals(gamma.op.name, 'BatchNorm/gamma') + moving_mean = tf.get_collection('moving_vars')[0] + moving_variance = tf.get_collection('moving_vars')[1] + self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') + self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') + + def testMovingAverageVariables(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scale=True) + moving_mean = tf.moving_average_variables()[0] + moving_variance = tf.moving_average_variables()[1] + self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') + self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') + + def testUpdateOps(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + update_moving_mean = update_ops[0] + update_moving_variance = update_ops[1] + self.assertEquals(update_moving_mean.op.name, + 'BatchNorm/AssignMovingAvg') + self.assertEquals(update_moving_variance.op.name, + 'BatchNorm/AssignMovingAvg_1') + + def testReuseVariables(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scale=True, scope='bn') + tf.get_variable_scope().reuse_variables() + ops.batch_norm(images, scale=True, scope='bn') + beta = variables.get_variables_by_name('beta') + gamma = variables.get_variables_by_name('gamma') + self.assertEquals(len(beta), 1) + self.assertEquals(len(gamma), 1) + moving_vars = tf.get_collection('moving_vars') + self.assertEquals(len(moving_vars), 2) + + def testReuseUpdateOps(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scope='bn') + self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2) + tf.get_variable_scope().reuse_variables() + ops.batch_norm(images, scope='bn') + self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4) + + def testCreateMovingVars(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + _ = ops.batch_norm(images, moving_vars='moving_vars') + moving_mean = tf.get_collection('moving_vars', + 'BatchNorm/moving_mean') + self.assertEquals(len(moving_mean), 1) + self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean') + moving_variance = tf.get_collection('moving_vars', + 'BatchNorm/moving_variance') + self.assertEquals(len(moving_variance), 1) + self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance') + + def testComputeMovingVars(self): + height, width = 3, 3 + with self.test_session() as sess: + image_shape = (10, height, width, 3) + image_values = np.random.rand(*image_shape) + expected_mean = np.mean(image_values, axis=(0, 1, 2)) + expected_var = np.var(image_values, axis=(0, 1, 2)) + images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) + output = ops.batch_norm(images, decay=0.1) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + with tf.control_dependencies(update_ops): + barrier = tf.no_op(name='gradient_barrier') + output = control_flow_ops.with_dependencies([barrier], output) + # Initialize all variables + sess.run(tf.initialize_all_variables()) + moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] + moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] + mean, variance = sess.run([moving_mean, moving_variance]) + # After initialization moving_mean == 0 and moving_variance == 1. + self.assertAllClose(mean, [0] * 3) + self.assertAllClose(variance, [1] * 3) + for _ in range(10): + sess.run([output]) + mean = moving_mean.eval() + variance = moving_variance.eval() + # After 10 updates with decay 0.1 moving_mean == expected_mean and + # moving_variance == expected_var. + self.assertAllClose(mean, expected_mean) + self.assertAllClose(variance, expected_var) + + def testEvalMovingVars(self): + height, width = 3, 3 + with self.test_session() as sess: + image_shape = (10, height, width, 3) + image_values = np.random.rand(*image_shape) + expected_mean = np.mean(image_values, axis=(0, 1, 2)) + expected_var = np.var(image_values, axis=(0, 1, 2)) + images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) + output = ops.batch_norm(images, decay=0.1, is_training=False) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + with tf.control_dependencies(update_ops): + barrier = tf.no_op(name='gradient_barrier') + output = control_flow_ops.with_dependencies([barrier], output) + # Initialize all variables + sess.run(tf.initialize_all_variables()) + moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] + moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] + mean, variance = sess.run([moving_mean, moving_variance]) + # After initialization moving_mean == 0 and moving_variance == 1. + self.assertAllClose(mean, [0] * 3) + self.assertAllClose(variance, [1] * 3) + # Simulate assigment from saver restore. + init_assigns = [tf.assign(moving_mean, expected_mean), + tf.assign(moving_variance, expected_var)] + sess.run(init_assigns) + for _ in range(10): + sess.run([output], {images: np.random.rand(*image_shape)}) + mean = moving_mean.eval() + variance = moving_variance.eval() + # Although we feed different images, the moving_mean and moving_variance + # shouldn't change. + self.assertAllClose(mean, expected_mean) + self.assertAllClose(variance, expected_var) + + def testReuseVars(self): + height, width = 3, 3 + with self.test_session() as sess: + image_shape = (10, height, width, 3) + image_values = np.random.rand(*image_shape) + expected_mean = np.mean(image_values, axis=(0, 1, 2)) + expected_var = np.var(image_values, axis=(0, 1, 2)) + images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) + output = ops.batch_norm(images, decay=0.1, is_training=False) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + with tf.control_dependencies(update_ops): + barrier = tf.no_op(name='gradient_barrier') + output = control_flow_ops.with_dependencies([barrier], output) + # Initialize all variables + sess.run(tf.initialize_all_variables()) + moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] + moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] + mean, variance = sess.run([moving_mean, moving_variance]) + # After initialization moving_mean == 0 and moving_variance == 1. + self.assertAllClose(mean, [0] * 3) + self.assertAllClose(variance, [1] * 3) + # Simulate assigment from saver restore. + init_assigns = [tf.assign(moving_mean, expected_mean), + tf.assign(moving_variance, expected_var)] + sess.run(init_assigns) + for _ in range(10): + sess.run([output], {images: np.random.rand(*image_shape)}) + mean = moving_mean.eval() + variance = moving_variance.eval() + # Although we feed different images, the moving_mean and moving_variance + # shouldn't change. + self.assertAllClose(mean, expected_mean) + self.assertAllClose(variance, expected_var) + +if __name__ == '__main__': + tf.test.main() diff --git a/inception/slim/scopes.py b/inception/slim/scopes.py new file mode 100644 index 000000000..86c278b38 --- /dev/null +++ b/inception/slim/scopes.py @@ -0,0 +1,144 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains the new arg_scope used for TF-Slim ops. + + Allows one to define models much more compactly by eliminating boilerplate + code. This is accomplished through the use of argument scoping (arg_scope). + + Example of how to use scopes.arg_scope: + + with slim.arg_scope(ops.conv2d, padding='SAME', + stddev=0.01, weight_decay=0.0005): + net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') + net = ops.conv2d(net, 256, [5, 5], scope='conv2') + + The first call to conv2d will use predefined args: + ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', + stddev=0.01, weight_decay=0.0005, scope='conv1') + + The second call to Conv will overwrite padding: + ops.conv2d(inputs, 256, [5, 5], padding='SAME', + stddev=0.01, weight_decay=0.0005, scope='conv2') + + Example of how to use scopes.add_arg_scope: + + @scopes.add_arg_scope + def conv2d(*args, **kwargs) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import functools + + +from tensorflow.python.framework import ops + +_ARGSTACK_KEY = ("__arg_stack",) + +_DECORATED_OPS = set() + + +def _get_arg_stack(): + stack = ops.get_collection(_ARGSTACK_KEY) + if stack: + return stack[0] + else: + stack = [{}] + ops.add_to_collection(_ARGSTACK_KEY, stack) + return stack + + +def _current_arg_scope(): + stack = _get_arg_stack() + return stack[-1] + + +def _add_op(op): + key_op = (op.__module__, op.__name__) + if key_op not in _DECORATED_OPS: + _DECORATED_OPS.add(key_op) + + +@contextlib.contextmanager +def arg_scope(list_ops, **kwargs): + """Stores the default arguments for the given set of list_ops. + + Args: + list_ops: List or tuple of operations to set argument scope for. Every op in + list_ops need to be decorated with @add_arg_scope to work. + **kwargs: keyword=value that will define the defaults for each op in + list_ops. All the ops need to accept the given set of arguments. + + Yields: + the current_scope, which is a dictionary of {op: {arg: value}} + Raises: + TypeError: if list_ops is not a list or a tuple. + ValueError: if any op in list_ops has not be decorated with @add_arg_scope. + """ + if not isinstance(list_ops, (list, tuple)): + raise TypeError("list_ops is not a list or a tuple") + try: + current_scope = _current_arg_scope().copy() + for op in list_ops: + key_op = (op.__module__, op.__name__) + if not has_arg_scope(op): + raise ValueError("%s is not decorated with @add_arg_scope", key_op) + if key_op in current_scope: + current_kwargs = current_scope[key_op].copy() + current_kwargs.update(kwargs) + current_scope[key_op] = current_kwargs + else: + current_scope[key_op] = kwargs.copy() + _get_arg_stack().append(current_scope) + yield current_scope + finally: + _get_arg_stack().pop() + + +def add_arg_scope(func): + """Decorates a function with args so it can be used within an arg_scope. + + Args: + func: function to decorate. + + Returns: + A tuple with the decorated function func_with_args(). + """ + @functools.wraps(func) + def func_with_args(*args, **kwargs): + current_scope = _current_arg_scope() + current_args = kwargs + key_func = (func.__module__, func.__name__) + if key_func in current_scope: + current_args = current_scope[key_func].copy() + current_args.update(kwargs) + return func(*args, **current_args) + _add_op(func) + return func_with_args + + +def has_arg_scope(func): + """Checks whether a func has been decorated with @add_arg_scope or not. + + Args: + func: function to check. + + Returns: + a boolean. + """ + key_op = (func.__module__, func.__name__) + return key_op in _DECORATED_OPS diff --git a/inception/slim/scopes_test.py b/inception/slim/scopes_test.py new file mode 100644 index 000000000..7bc27ad71 --- /dev/null +++ b/inception/slim/scopes_test.py @@ -0,0 +1,118 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests slim.scopes.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import tensorflow as tf +from inception.slim import scopes + + +@scopes.add_arg_scope +def func1(*args, **kwargs): + return (args, kwargs) + + +@scopes.add_arg_scope +def func2(*args, **kwargs): + return (args, kwargs) + + +class ArgScopeTest(tf.test.TestCase): + + def testEmptyArgScope(self): + with self.test_session(): + self.assertEqual(scopes._current_arg_scope(), {}) + + def testSimpleArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with self.test_session(): + with scopes.arg_scope([func1], a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testSimpleArgScopeWithTuple(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with self.test_session(): + with scopes.arg_scope((func1,), a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testOverwriteArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': 2, 'c': [1]} + with scopes.arg_scope([func1], a=1, b=None, c=[1]): + args, kwargs = func1(0, b=2) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testNestedArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with scopes.arg_scope([func1], a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + func1_kwargs['b'] = 2 + with scopes.arg_scope([func1], b=2): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testSharedArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with scopes.arg_scope([func1, func2], a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + args, kwargs = func2(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testSharedArgScopeTuple(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with scopes.arg_scope((func1, func2), a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + args, kwargs = func2(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testPartiallySharedArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + func2_args = (1,) + func2_kwargs = {'a': 1, 'b': None, 'd': [2]} + with scopes.arg_scope([func1, func2], a=1, b=None): + with scopes.arg_scope([func1], c=[1]), scopes.arg_scope([func2], d=[2]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + args, kwargs = func2(1) + self.assertTupleEqual(args, func2_args) + self.assertDictEqual(kwargs, func2_kwargs) + +if __name__ == '__main__': + tf.test.main() diff --git a/inception/slim/slim.py b/inception/slim/slim.py new file mode 100644 index 000000000..b7a5c0f8c --- /dev/null +++ b/inception/slim/slim.py @@ -0,0 +1,24 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TF-Slim grouped API. Please see README.md for details and usage.""" +# pylint: disable=unused-import + +# Collapse tf-slim into a single namespace. +from inception.slim import inception_model as inception +from inception.slim import losses +from inception.slim import ops +from inception.slim import scopes +from inception.slim import variables +from inception.slim.scopes import arg_scope diff --git a/inception/slim/variables.py b/inception/slim/variables.py new file mode 100644 index 000000000..07d142cd1 --- /dev/null +++ b/inception/slim/variables.py @@ -0,0 +1,224 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for creating Variables in TensorFlow. + +Usage: + weights_initializer = tf.truncated_normal_initializer(stddev=0.01) + l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005) + weights = variables.variable('weights', + shape=[100, 100], + initializer=weights_initializer, + regularizer=l2_regularizer, + device='/cpu:0') + + biases = variables.variable('biases', + shape=[100], + initializer=tf.zeros_initializer, + device='/cpu:0') + + # More complex example. + + net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1') + net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2') + with slim.arg_scope(variables.Variables, restore=False): + net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3') + + # Get all model variables from all the layers. + model_variables = slim.variables.get_variables() + + # Get all model variables from a specific the layer, i.e 'conv1'. + conv1_variables = slim.variables.get_variables('conv1') + + # Get all weights from all the layers. + weights = slim.variables.get_variables_by_name('weights') + + # Get all bias from all the layers. + biases = slim.variables.get_variables_by_name('biases') + + # Get all variables in the VARIABLES_TO_RESTORE collection + # (i.e. only those created by 'conv1' and 'conv2') + variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) + +************************************************ +* Initializing model variables from a checkpoint +************************************************ + +# Create some variables. +v1 = slim.variables.variable(name="v1", ..., restore=False) +v2 = slim.variables.variable(name="v2", ...) # By default restore=True +... +# The list of variables to restore should only contain 'v2'. +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) +restorer = tf.train.Saver(variables_to_restore) +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + # Do some work with the model + ... + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception.slim import scopes + +# Collection containing all the variables created using slim.variables +VARIABLES_COLLECTION = '_variables_' + +# Collection containing all the slim.variables that are marked to_restore +VARIABLES_TO_RESTORE = '_variables_to_restore_' + + +def get_variable_given_name(var): + """Gets the variable given name without the scope. + + Args: + var: a variable. + + Returns: + the given name of the variable without the scope. + """ + name = var.op.name + if '/' in name: + name = name.split('/')[-1] + return name + + +def default_collections(given_name, restore): + """Define the set of default collections that variables should be added. + + Args: + given_name: the given name of the variable. + restore: whether the variable should be added to the VARIABLES_TO_RESTORE + collection. + + Returns: + a list of default collections. + """ + defaults = [tf.GraphKeys.VARIABLES, VARIABLES_COLLECTION] + defaults += [VARIABLES_COLLECTION + given_name] + if restore: + defaults += [VARIABLES_TO_RESTORE] + return defaults + + +def add_variable(var, restore=True): + """Adds a variable to the default set of collections. + + Args: + var: a variable. + restore: whether the variable should be added to the + VARIABLES_TO_RESTORE collection. + """ + given_name = get_variable_given_name(var) + for collection in default_collections(given_name, restore): + if var not in tf.get_collection(collection): + tf.add_to_collection(collection, var) + + +def get_variables(prefix=None, suffix=None): + """Gets the list of variables, filtered by prefix and/or suffix. + + Args: + prefix: an optional prefix for filtering the variables to return. + suffix: an optional suffix for filtering the variables to return. + + Returns: + a list of variables with prefix and suffix. + """ + candidates = tf.get_collection(VARIABLES_COLLECTION, prefix) + if suffix is not None: + candidates = [var for var in candidates if var.op.name.endswith(suffix)] + return candidates + + +def get_variables_by_name(given_name, prefix=None): + """Gets the list of variables were given that name. + + Args: + given_name: name given to the variable without scope. + prefix: an optional prefix for filtering the variables to return. + + Returns: + a list of variables with prefix and suffix. + """ + return tf.get_collection(VARIABLES_COLLECTION + given_name, prefix) + + +def get_unique_variable(name): + """Gets the variable uniquely identified by that name. + + Args: + name: a name that uniquely identifies the variable. + + Returns: + a tensorflow variable. + + Raises: + ValueError: if no variable uniquely identified by the name exists. + """ + candidates = tf.get_collection(tf.GraphKeys.VARIABLES, name) + if not candidates: + raise ValueError('Couldnt find variable %s' % name) + + for candidate in candidates: + if candidate.op.name == name: + return candidate + raise ValueError('Variable %s does not uniquely identify a variable', name) + + +@scopes.add_arg_scope +def variable(name, shape=None, dtype=tf.float32, initializer=None, + regularizer=None, trainable=True, collections=None, device='', + restore=True): + """Gets an existing variable with these parameters or creates a new one. + + It also add itself to a group with its name. + + Args: + name: the name of the new or existing variable. + shape: shape of the new or existing variable. + dtype: type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: initializer for the variable if one is created. + regularizer: a (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). + collections: A list of collection names to which the Variable will be added. + Note that the variable is always also added to the tf.GraphKeys.VARIABLES + collection. + device: Optional device to place the variable. It can be an string or a + function that is called to get the device for the variable. + restore: whether the variable should be added to the + VARIABLES_TO_RESTORE collection. + + Returns: + The created or existing variable. + """ + # Instantiate the device for this variable if it is passed as a function. + if device and callable(device): + device = device() + collections = set(list(collections or []) + default_collections(name, + restore)) + with tf.device(device): + return tf.get_variable(name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, + trainable=trainable, collections=collections) diff --git a/inception/slim/variables_test.py b/inception/slim/variables_test.py new file mode 100644 index 000000000..2718265cf --- /dev/null +++ b/inception/slim/variables_test.py @@ -0,0 +1,200 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.variables.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception.slim import scopes +from inception.slim import variables + + +class VariablesTest(tf.test.TestCase): + + def testCreateVariable(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + self.assertEquals(a.op.name, 'A/a') + self.assertListEqual(a.get_shape().as_list(), [5]) + + def testGetVariableGivenName(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('B'): + b = variables.variable('a', [5]) + self.assertEquals('a', variables.get_variable_given_name(a)) + self.assertEquals('a', variables.get_variable_given_name(b)) + + def testGetVariableGivenNameScoped(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + b = variables.variable('b', [5]) + self.assertEquals([a], variables.get_variables_by_name('a')) + self.assertEquals([b], variables.get_variables_by_name('b')) + + def testGetVariables(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('B'): + b = variables.variable('a', [5]) + self.assertEquals([a], variables.get_variables('A')) + self.assertEquals([b], variables.get_variables('B')) + + def testGetVariablesSuffix(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('A'): + b = variables.variable('b', [5]) + self.assertEquals([a], variables.get_variables(suffix='a')) + self.assertEquals([b], variables.get_variables(suffix='b')) + + def testGetVariableWithSingleVar(self): + with self.test_session(): + with tf.variable_scope('parent'): + a = variables.variable('child', [5]) + self.assertEquals(a, variables.get_unique_variable('parent/child')) + + def testGetVariableWithDistractors(self): + with self.test_session(): + with tf.variable_scope('parent'): + a = variables.variable('child', [5]) + with tf.variable_scope('child'): + variables.variable('grandchild1', [7]) + variables.variable('grandchild2', [9]) + self.assertEquals(a, variables.get_unique_variable('parent/child')) + + def testGetVariableThrowsExceptionWithNoMatch(self): + var_name = 'cant_find_me' + with self.test_session(): + with self.assertRaises(ValueError): + variables.get_unique_variable(var_name) + + def testGetThrowsExceptionWithChildrenButNoMatch(self): + var_name = 'parent/child' + with self.test_session(): + with tf.variable_scope(var_name): + variables.variable('grandchild1', [7]) + variables.variable('grandchild2', [9]) + with self.assertRaises(ValueError): + variables.get_unique_variable(var_name) + + def testGetVariablesToRestore(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('B'): + b = variables.variable('b', [5]) + self.assertListEqual([a, b], + tf.get_collection(variables.VARIABLES_TO_RESTORE)) + + def testGetVariablesToRestorePartial(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('B'): + b = variables.variable('b', [5], restore=False) + self.assertListEqual([a, b], variables.get_variables()) + self.assertListEqual([a], + tf.get_collection(variables.VARIABLES_TO_RESTORE)) + + def testReuseVariable(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', []) + with tf.variable_scope('A', reuse=True): + b = variables.variable('a', []) + self.assertEquals(a, b) + self.assertListEqual([a], variables.get_variables()) + + def testVariableWithDevice(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [], device='cpu:0') + b = variables.variable('b', [], device='cpu:1') + self.assertDeviceEqual(a.device, 'cpu:0') + self.assertDeviceEqual(b.device, 'cpu:1') + + def testVariableWithDeviceFromScope(self): + with self.test_session(): + with tf.device('/cpu:0'): + a = variables.variable('a', []) + b = variables.variable('b', [], device='cpu:1') + self.assertDeviceEqual(a.device, 'cpu:0') + self.assertDeviceEqual(b.device, 'cpu:1') + + def testVariableCollection(self): + with self.test_session(): + a = variables.variable('a', [], collections='A') + b = variables.variable('b', [], collections='B') + self.assertEquals(a, tf.get_collection('A')[0]) + self.assertEquals(b, tf.get_collection('B')[0]) + + def testVariableCollections(self): + with self.test_session(): + a = variables.variable('a', [], collections=['A', 'C']) + b = variables.variable('b', [], collections=['B', 'C']) + self.assertEquals(a, tf.get_collection('A')[0]) + self.assertEquals(b, tf.get_collection('B')[0]) + + def testVariableCollectionsWithArgScope(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], collections='A'): + a = variables.variable('a', []) + b = variables.variable('b', []) + self.assertListEqual([a, b], tf.get_collection('A')) + + def testVariableCollectionsWithArgScopeNested(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], collections='A'): + a = variables.variable('a', []) + with scopes.arg_scope([variables.variable], collections='B'): + b = variables.variable('b', []) + self.assertEquals(a, tf.get_collection('A')[0]) + self.assertEquals(b, tf.get_collection('B')[0]) + + def testVariableCollectionsWithArgScopeNonNested(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], collections='A'): + a = variables.variable('a', []) + with scopes.arg_scope([variables.variable], collections='B'): + b = variables.variable('b', []) + variables.variable('c', []) + self.assertListEqual([a], tf.get_collection('A')) + self.assertListEqual([b], tf.get_collection('B')) + + def testVariableRestoreWithArgScopeNested(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], restore=True): + a = variables.variable('a', []) + with scopes.arg_scope([variables.variable], trainable=False, + collections=['A', 'B']): + b = variables.variable('b', []) + c = variables.variable('c', []) + self.assertListEqual([a, b, c], + tf.get_collection(variables.VARIABLES_TO_RESTORE)) + self.assertListEqual([a, c], tf.trainable_variables()) + self.assertListEqual([b], tf.get_collection('A')) + self.assertListEqual([b], tf.get_collection('B')) + +if __name__ == '__main__': + tf.test.main() diff --git a/third_party b/third_party new file mode 120000 index 000000000..aab4d4d53 --- /dev/null +++ b/third_party @@ -0,0 +1 @@ +tensorflow/third_party \ No newline at end of file diff --git a/tools b/tools new file mode 120000 index 000000000..19e3f35d4 --- /dev/null +++ b/tools @@ -0,0 +1 @@ +tensorflow/tools \ No newline at end of file -- GitLab