From a8a58316c6397aa67e1926b603e2d8cc7d1b51d7 Mon Sep 17 00:00:00 2001 From: zhang wenhui Date: Thu, 14 May 2020 19:45:48 +0800 Subject: [PATCH] Update paddlerec 1.8 (#4622) * update api 1.8 * update model 1.8 --- .../ctr/Paddle_baseline_KDD2019/README.md | 40 - PaddleRec/ctr/Paddle_baseline_KDD2019/args.py | 85 - .../Paddle_baseline_KDD2019/build_submit.py | 45 - .../data_set_phase1/._profiles.csv | Bin 244 -> 0 bytes .../data_set_phase1/._test_plans.csv | Bin 244 -> 0 bytes .../data_set_phase1/._test_queries.csv | Bin 244 -> 0 bytes .../data_set_phase1/._train_clicks.csv | Bin 244 -> 0 bytes .../data_set_phase1/._train_queries.csv | Bin 244 -> 0 bytes .../Paddle_baseline_KDD2019/generate_test.py | 147 -- .../ctr/Paddle_baseline_KDD2019/infer.py | 141 -- .../Paddle_baseline_KDD2019/local_train.py | 90 - .../ctr/Paddle_baseline_KDD2019/map_reader.py | 145 -- .../Paddle_baseline_KDD2019/network_confv6.py | 123 -- .../networks/network_conf.py | 106 - .../networks/network_confv4.py | 154 -- .../networks/network_confv6.py | 123 -- .../pre_process_test.py | 307 --- .../Paddle_baseline_KDD2019/pre_test_dense.py | 260 --- .../ctr/Paddle_baseline_KDD2019/preprocess.py | 262 --- .../preprocess_dense.py | 294 --- .../ctr/Paddle_baseline_KDD2019/weather.json | 1 - PaddleRec/ctr/dcn/cluster_train.py | 205 -- PaddleRec/ctr/dcn/cluster_train.sh | 58 - PaddleRec/ctr/dcn/reader.py | 4 +- PaddleRec/ctr/deepfm/cluster_train.py | 193 -- PaddleRec/ctr/deepfm/cluster_train.sh | 58 - PaddleRec/ctr/deepfm/infer.py | 2 +- PaddleRec/ctr/deepfm_dygraph/data_reader.py | 3 +- PaddleRec/ctr/din/README.md | 11 - PaddleRec/ctr/din/cluster_train.py | 172 -- PaddleRec/ctr/din/cluster_train.sh | 56 - PaddleRec/ctr/dnn/README.md | 38 +- PaddleRec/ctr/dnn/infer.py | 26 +- PaddleRec/ctr/xdeepfm/cluster_train.py | 198 -- PaddleRec/ctr/xdeepfm/cluster_train.sh | 58 - PaddleRec/ctr/xdeepfm/infer.py | 2 +- PaddleRec/gru4rec/README.md | 13 +- PaddleRec/gru4rec/cluster_train.py | 164 -- PaddleRec/gru4rec/cluster_train.sh | 62 - PaddleRec/gru4rec/utils.py | 2 +- PaddleRec/multiview_simnet/infer.py | 4 +- PaddleRec/multiview_simnet/train.py | 4 +- PaddleRec/ncf/evaluate.py | 55 +- PaddleRec/ssr/README.md | 3 - PaddleRec/ssr/cluster_train.py | 207 -- PaddleRec/ssr/cluster_train.sh | 58 - PaddleRec/ssr/infer.py | 16 +- PaddleRec/ssr/utils.py | 2 +- PaddleRec/tagspace/README.md | 8 - PaddleRec/tagspace/cluster_train.py | 137 -- PaddleRec/tagspace/cluster_train.sh | 58 - PaddleRec/tagspace/utils.py | 2 +- PaddleRec/tdm/tdm_demo/README.md | 26 +- PaddleRec/tdm/tdm_demo/dataset_generator.py | 4 +- PaddleRec/tdm/tdm_demo/infer_network.py | 58 +- PaddleRec/text_matching_on_quora/.run_ce.sh | 14 - PaddleRec/text_matching_on_quora/README.md | 177 -- PaddleRec/text_matching_on_quora/__init__.py | 0 PaddleRec/text_matching_on_quora/_ce.py | 66 - .../text_matching_on_quora/cdssm_base.log | 1834 ----------------- .../configs/__init__.py | 19 - .../configs/basic_config.py | 56 - .../text_matching_on_quora/configs/cdssm.py | 38 - .../text_matching_on_quora/configs/dec_att.py | 73 - .../configs/infer_sent.py | 63 - .../text_matching_on_quora/configs/sse.py | 43 - .../data/prepare_quora_data.sh | 33 - .../text_matching_on_quora/imgs/README.md | 1 - .../imgs/models_test_acc.png | Bin 136935 -> 0 bytes PaddleRec/text_matching_on_quora/metric.py | 37 - .../text_matching_on_quora/models/__init__.py | 18 - .../text_matching_on_quora/models/cdssm.py | 71 - .../text_matching_on_quora/models/dec_att.py | 159 -- .../models/infer_sent.py | 78 - .../models/match_layers.py | 55 - .../models/my_layers.py | 45 - .../text_matching_on_quora/models/pwim.py | 14 - .../text_matching_on_quora/models/sse.py | 81 - .../text_matching_on_quora/models/test.py | 13 - .../pretrained_word2vec.py | 67 - .../quora_question_pairs.py | 195 -- .../train_and_evaluate.py | 314 --- PaddleRec/text_matching_on_quora/utils.py | 210 -- PaddleRec/word2vec/README.md | 7 - PaddleRec/word2vec/cluster_train.py | 264 --- PaddleRec/word2vec/cluster_train.sh | 68 - 86 files changed, 128 insertions(+), 8275 deletions(-) delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/README.md delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/args.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/build_submit.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._profiles.csv delete mode 100755 PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._test_plans.csv delete mode 100755 PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._test_queries.csv delete mode 100755 PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._train_clicks.csv delete mode 100755 PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._train_queries.csv delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/generate_test.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/infer.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/local_train.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/map_reader.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/network_confv6.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_conf.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv4.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv6.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/pre_process_test.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/pre_test_dense.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess_dense.py delete mode 100644 PaddleRec/ctr/Paddle_baseline_KDD2019/weather.json delete mode 100644 PaddleRec/ctr/dcn/cluster_train.py delete mode 100755 PaddleRec/ctr/dcn/cluster_train.sh delete mode 100644 PaddleRec/ctr/deepfm/cluster_train.py delete mode 100755 PaddleRec/ctr/deepfm/cluster_train.sh delete mode 100644 PaddleRec/ctr/din/cluster_train.py delete mode 100644 PaddleRec/ctr/din/cluster_train.sh delete mode 100644 PaddleRec/ctr/xdeepfm/cluster_train.py delete mode 100755 PaddleRec/ctr/xdeepfm/cluster_train.sh delete mode 100644 PaddleRec/gru4rec/cluster_train.py delete mode 100644 PaddleRec/gru4rec/cluster_train.sh delete mode 100644 PaddleRec/ssr/cluster_train.py delete mode 100644 PaddleRec/ssr/cluster_train.sh delete mode 100644 PaddleRec/tagspace/cluster_train.py delete mode 100644 PaddleRec/tagspace/cluster_train.sh delete mode 100755 PaddleRec/text_matching_on_quora/.run_ce.sh delete mode 100644 PaddleRec/text_matching_on_quora/README.md delete mode 100644 PaddleRec/text_matching_on_quora/__init__.py delete mode 100644 PaddleRec/text_matching_on_quora/_ce.py delete mode 100644 PaddleRec/text_matching_on_quora/cdssm_base.log delete mode 100755 PaddleRec/text_matching_on_quora/configs/__init__.py delete mode 100755 PaddleRec/text_matching_on_quora/configs/basic_config.py delete mode 100755 PaddleRec/text_matching_on_quora/configs/cdssm.py delete mode 100755 PaddleRec/text_matching_on_quora/configs/dec_att.py delete mode 100755 PaddleRec/text_matching_on_quora/configs/infer_sent.py delete mode 100755 PaddleRec/text_matching_on_quora/configs/sse.py delete mode 100755 PaddleRec/text_matching_on_quora/data/prepare_quora_data.sh delete mode 100644 PaddleRec/text_matching_on_quora/imgs/README.md delete mode 100644 PaddleRec/text_matching_on_quora/imgs/models_test_acc.png delete mode 100755 PaddleRec/text_matching_on_quora/metric.py delete mode 100755 PaddleRec/text_matching_on_quora/models/__init__.py delete mode 100755 PaddleRec/text_matching_on_quora/models/cdssm.py delete mode 100755 PaddleRec/text_matching_on_quora/models/dec_att.py delete mode 100644 PaddleRec/text_matching_on_quora/models/infer_sent.py delete mode 100755 PaddleRec/text_matching_on_quora/models/match_layers.py delete mode 100755 PaddleRec/text_matching_on_quora/models/my_layers.py delete mode 100644 PaddleRec/text_matching_on_quora/models/pwim.py delete mode 100644 PaddleRec/text_matching_on_quora/models/sse.py delete mode 100644 PaddleRec/text_matching_on_quora/models/test.py delete mode 100755 PaddleRec/text_matching_on_quora/pretrained_word2vec.py delete mode 100755 PaddleRec/text_matching_on_quora/quora_question_pairs.py delete mode 100755 PaddleRec/text_matching_on_quora/train_and_evaluate.py delete mode 100755 PaddleRec/text_matching_on_quora/utils.py delete mode 100644 PaddleRec/word2vec/cluster_train.py delete mode 100644 PaddleRec/word2vec/cluster_train.sh diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/README.md b/PaddleRec/ctr/Paddle_baseline_KDD2019/README.md deleted file mode 100644 index 4555bb2a..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Paddle_baseline_KDD2019 -Paddle baseline for KDD2019 "Context-Aware Multi-Modal Transportation Recommendation"(https://dianshi.baidu.com/competition/29/question) - -This repository is the demo codes for the KDD2019 "Context-Aware Multi-Modal Transportation Recommendation" competition using PaddlePaddle. It is written by python and uses PaddlePaddle to solve the task. Note that this repository is on developing and welcome everyone to contribute. The current baseline solution codes can get 0.68 - 0.69 score of online submission. As an example, my submission based on these networks programmed by PaddlePaddle is 0.6898. -The reason of the publication of this baseline codes is to encourage us to use PaddlePaddle and build the most powerful recommendation model via PaddlePaddle. - -The example codes are ran on Linux, python2.7, single machine with CPU . Note that distributed train options are not provided here, if you want to learn more about this, please check more modes examples on https://github.com/PaddlePaddle/models. About the speed of training, for one epoch, 1000 batch size, it would take about 8 mins to train the whole training instances generated from raw data using SGD optimizer (it would take relatively longer using Adam optimizer). - -The configuration and process of all the networks are fundamental, a lot of optimizations can be done based on them to achieve better results e.g. better cost function, more powerful feature engineering, designed model validation, NN optimization tricks... - -The code is rough and from my daily use. They will be trimmed these days... -## Install PaddlePaddle -please visit the official site of PaddlePaddle(http://www.paddlepaddle.org/documentation/docs/zh/1.4/beginners_guide/install/index_cn.html) -## preprocess feature -```python -python preprocess_dense.py # change for different feature strategy -python pre_test_dense.py -``` -preprocess.py and preprocess_dense.py is the code for preprocessing the raw data. Two versions are provided to deal with all sparse features and sparse plus dense features. Correspondingly, pre_process_test.py and pre_test_dense.py are the codes to preproccess test raw data. The training instances are saved in json. It is very easy to add new features. In our demo, all features are generated from provided raw data except for weather feature, which is gengerated from open weather records. -Note that the feature generated in this step need to fit in the input of the model input. Make sure we use the right version. In demo codes, The sparse plus dense features are used for network_confv6. - -## build the network -main network logic is in network_confv?.py. The networks are base on fm & deep related algorithms. I try several networks and public some of them. There may be some defects in the networks but all of them are functional. - -## train the network -```python -python local_train.py -``` -In local_train.py and map_reader.py, I use dataset API, so we need to download the corresponding .whl package or clone codes on develop branch of PaddlePaddle. The reason to use this is the speed of feeding data is much faster. -Note that the input format feed into the network is self-defined. make sure we build the same format between training and test. - -## test results -```python -python generate_test.py -python build_submit.py -``` -In generate_test.py and build_submit, for convenience, I use the whole train data to train the network and test the network with provided data without label - - - diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/args.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/args.py deleted file mode 100644 index 55745918..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/args.py +++ /dev/null @@ -1,85 +0,0 @@ -import argparse - -def parse_args(): - parser = argparse.ArgumentParser(description="PaddlePaddle CTR example") - parser.add_argument( - '--train_data_path', - type=str, - default='./data/raw/train.txt', - help="The path of training dataset") - parser.add_argument( - '--test_data_path', - type=str, - default='./data/raw/valid.txt', - help="The path of testing dataset") - parser.add_argument( - '--batch_size', - type=int, - default=1000, - help="The size of mini-batch (default:1000)") - parser.add_argument( - '--embedding_size', - type=int, - default=16, - help="The size for embedding layer (default:10)") - parser.add_argument( - '--num_passes', - type=int, - default=10, - help="The number of passes to train (default: 10)") - parser.add_argument( - '--model_output_dir', - type=str, - default='models', - help='The path for model to store (default: models)') - parser.add_argument( - '--sparse_feature_dim', - type=int, - default=1000001, - help='sparse feature hashing space for index processing') - parser.add_argument( - '--is_local', - type=int, - default=1, - help='Local train or distributed train (default: 1)') - parser.add_argument( - '--cloud_train', - type=int, - default=0, - help='Local train or distributed train on paddlecloud (default: 0)') - parser.add_argument( - '--async_mode', - action='store_true', - default=False, - help='Whether start pserver in async mode to support ASGD') - parser.add_argument( - '--no_split_var', - action='store_true', - default=False, - help='Whether split variables into blocks when update_method is pserver') - parser.add_argument( - '--role', - type=str, - default='pserver', # trainer or pserver - help='The path for model to store (default: models)') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The path for model to store (default: 127.0.0.1:6000)') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='The path for model to store (default: models)') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - return parser.parse_args() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/build_submit.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/build_submit.py deleted file mode 100644 index b4ffc498..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/build_submit.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import csv -import io - - -def build(): - submit_map = {} - with io.open('./submit/submit.csv', 'wb') as csv_file: - writer = csv.writer(csv_file, delimiter=',') - writer.writerow(['sid', 'recommend_mode']) - with open('./out/normed_test_session.txt', 'r') as f1: - with open('./testres/res8', 'r') as f2: - cur_session ='' - for x, y in zip(f1.readlines(), f2.readlines()): - m1 = json.loads(x) - session_id = m1["session_id"] - if cur_session == '': - cur_session = session_id - - transport_mode = m1["plan"]["transport_mode"] - - if cur_session != session_id: - writer.writerow([str(cur_session), str(submit_map[cur_session]["transport_mode"])]) - cur_session = session_id - if session_id not in submit_map: - submit_map[session_id] = {} - submit_map[session_id]["transport_mode"] = transport_mode - submit_map[session_id]["probability"] = y - #if int(submit_map[session_id]["transport_mode"]) == 0 and submit_map[session_id]["probability"] > 0.02: - #submit_map[session_id]["probability"] = 0.99 - else: - if float(y) > float(submit_map[session_id]["probability"]): - submit_map[session_id]["transport_mode"] = transport_mode - submit_map[session_id]["probability"] = y - #if int(submit_map[session_id]["transport_mode"]) == 0 and submit_map[session_id]["probability"] > 0.02: - #submit_map[session_id]["transport_mode"] = 0 - #submit_map[session_id]["probability"] = 0.99 - - - writer.writerow([cur_session, submit_map[cur_session]["transport_mode"]]) - - - -if __name__ == "__main__": - build() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._profiles.csv b/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._profiles.csv deleted file mode 100644 index 3e7a69fbd3cccda1242c8056ce0f887f97775226..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 244 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@gX1v@xK5u5x_AdBnYYuq~ib(n?U(6 z3S`;=AQoU?kV?+a)k`cW$Vt`9Nh~fYEly2INi0cK4hV)Sz5t|E7#Ktm>IzE}ixTrn zGV@Xy!lrMH0jUdeP~`y8h57~t7Dm>l$w?;0mT9ThzM08I`NjEZB{3C72Cfy!sX5jR E07D2V@&Et; diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._test_plans.csv b/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._test_plans.csv deleted file mode 100755 index 6070f2260eb3d5a7069741ec4ebb7e0ef57d13f3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 244 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@gX1v@xK5u5x_AdBnYYuq~ib(n?U(6 z3S`;=AQoU?kV?+a)k`cW$Vt`9Nh~fYEly2INi0cK4hV)Sz5t|E7#Ktm>IzE}ixTrn zGV@Xy?rCp{0jVn%$WR5*h57~t7Dm>l$w?+gNyf?6zM08I`NjEZB{3C72Cfy!sX5jR E097w4Q2+n{ diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._test_queries.csv b/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._test_queries.csv deleted file mode 100755 index 98060354f5bf046fff792ca359aecac750cce85e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 244 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@gX1v@xK5u5x_AdBnYYuq~ib(n?U(6 z3S`;=AQoU?kV?+a)k`cW$Vt`9Nh~fYEly2INi0cK4hV)Sz5t|E7#Ktm>IzE}ixTrn zGV@Xy0<^cpfYhD4tSb+q3-t{QER3v8laox0l1vS(eKV7b@{9A+N@6OE3|uRcQ**2t E09oWJZvX%Q diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._train_clicks.csv b/PaddleRec/ctr/Paddle_baseline_KDD2019/data_set_phase1/._train_clicks.csv deleted file mode 100755 index 35f23f78ab5c254ccc1e631b8736a2854f5dfefa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 244 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@gX1v@xK5u5x_AdBnYYuq~ib(n?U(6 z3S`;=AQoU?kV?+a)k`cW$Vt`9Nh~fYEly2INi0cK4hV)Sz5t|E7#Ktm>IzE}ixTrn zGV@XyvI@7xfYhBmQz-_b3-t{QER3v8laovgO^q$BeKV7b@{9A+N@6OE3|uRcQ**2t E0CPnu$Vqox1Ojhs@R)|o50+1L3ClDI}@gX1v@xK5u5x_AdBnYYuq~ib(n?U(6 z3S`;=AQoU?kV?+a)k`cW$Vt`9Nh~fYEly2INi0cK4hV)Sz5t|E7#Ktm>IzE}ixTrn zGV@Xy`fawxfYha{+!qGXh57~t7Dm>l$w?+=X~xOczM08I`NjEZB{3C72Cfy!sX5jR E09$M-i2wiq diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/generate_test.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/generate_test.py deleted file mode 100644 index 66bf13d2..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/generate_test.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import logging -import numpy as np -# disable gpu training for this example -import os - -os.environ["CUDA_VISIBLE_DEVICES"] = "" -import paddle -import paddle.fluid as fluid -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("fluid") -logger.setLevel(logging.INFO) -num_context_feature = 22 - - -def parse_args(): - parser = argparse.ArgumentParser(description="PaddlePaddle DeepFM example") - parser.add_argument( - '--model_path', - type=str, - #required=True, - default='models', - help="The path of model parameters gz file") - parser.add_argument( - '--data_path', - type=str, - required=False, - help="The path of the dataset to infer") - parser.add_argument( - '--embedding_size', - type=int, - default=16, - help="The size for embedding layer (default:10)") - parser.add_argument( - '--sparse_feature_dim', - type=int, - default=1000001, - help="The size for embedding layer (default:1000001)") - parser.add_argument( - '--batch_size', - type=int, - default=1000, - help="The size of mini-batch (default:1000)") - - return parser.parse_args() - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - - return res - - -def data2tensor(data, place): - feed_dict = {} - dense = data[0] - sparse = data[1:-1] - y = data[-1] - #user_data = np.array([x[0] for x in data]).astype("float32") - #user_data = user_data.reshape([-1, 10]) - #feed_dict["user_profile"] = user_data - dense_data = np.array([x[0] for x in data]).astype("float32") - dense_data = dense_data.reshape([-1, 3]) - feed_dict["dense_feature"] = dense_data - for i in range(num_context_feature): - sparse_data = to_lodtensor([x[1 + i] for x in data], place) - feed_dict["context" + str(i)] = sparse_data - - context_fm = to_lodtensor( - np.array([x[-2] for x in data]).astype("float32"), place) - - feed_dict["context_fm"] = context_fm - y_data = np.array([x[-1] for x in data]).astype("int64") - y_data = y_data.reshape([-1, 1]) - feed_dict["label"] = y_data - return feed_dict - - -def test(): - args = parse_args() - - place = fluid.CPUPlace() - test_scope = fluid.core.Scope() - - # filelist = ["%s/%s" % (args.data_path, x) for x in os.listdir(args.data_path)] - from map_reader import MapDataset - map_dataset = MapDataset() - map_dataset.setup(args.sparse_feature_dim) - exe = fluid.Executor(place) - - whole_filelist = ["./out/normed_test_session.txt"] - test_files = whole_filelist[int(0.0 * len(whole_filelist)):int(1.0 * len( - whole_filelist))] - - epochs = 1 - - for i in range(epochs): - cur_model_path = os.path.join(args.model_path, - "epoch" + str(1) + ".model") - with open("./testres/res" + str(i), 'w') as r: - with fluid.scope_guard(test_scope): - [inference_program, feed_target_names, fetch_targets] = \ - fluid.io.load_inference_model(cur_model_path, exe) - - test_reader = map_dataset.test_reader(test_files, 1000, 100000) - k = 0 - for batch_id, data in enumerate(test_reader()): - print(len(data[0])) - feed_dict = data2tensor(data, place) - loss_val, auc_val, accuracy, predict, _ = exe.run( - inference_program, - feed=feed_dict, - fetch_list=fetch_targets, - return_numpy=False) - - x = np.array(predict) - for j in range(x.shape[0]): - r.write(str(x[j][1])) - r.write("\n") - - -if __name__ == '__main__': - test() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/infer.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/infer.py deleted file mode 100644 index c218ce0f..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/infer.py +++ /dev/null @@ -1,141 +0,0 @@ -import argparse -import logging - -import numpy as np -# disable gpu training for this example -import os - -os.environ["CUDA_VISIBLE_DEVICES"] = "" -import paddle -import paddle.fluid as fluid - -import map_reader -from network_conf import ctr_deepfm_dataset - -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("fluid") -logger.setLevel(logging.INFO) - - -def parse_args(): - parser = argparse.ArgumentParser(description="PaddlePaddle DeepFM example") - parser.add_argument( - '--model_path', - type=str, - #required=True, - default='models', - help="The path of model parameters gz file") - parser.add_argument( - '--data_path', - type=str, - required=False, - help="The path of the dataset to infer") - parser.add_argument( - '--embedding_size', - type=int, - default=16, - help="The size for embedding layer (default:10)") - parser.add_argument( - '--sparse_feature_dim', - type=int, - default=1000001, - help="The size for embedding layer (default:1000001)") - parser.add_argument( - '--batch_size', - type=int, - default=1000, - help="The size of mini-batch (default:1000)") - - return parser.parse_args() - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def data2tensor(data, place): - feed_dict = {} - test_dict = {} - dense = data[0] - sparse = data[1:-1] - y = data[-1] - dense_data = np.array([x[0] for x in data]).astype("float32") - dense_data = dense_data.reshape([-1, 65]) - feed_dict["user_profile"] = dense_data - for i in range(10): - sparse_data = to_lodtensor([x[1 + i] for x in data], place) - feed_dict["context" + str(i)] = sparse_data - - y_data = np.array([x[-1] for x in data]).astype("int64") - y_data = y_data.reshape([-1, 1]) - feed_dict["label"] = y_data - test_dict["test"] = [1] - return feed_dict, test_dict - - -def infer(): - args = parse_args() - - place = fluid.CPUPlace() - inference_scope = fluid.core.Scope() - - filelist = [ - "%s/%s" % (args.data_path, x) for x in os.listdir(args.data_path) - ] - from map_reader import MapDataset - map_dataset = MapDataset() - map_dataset.setup(args.sparse_feature_dim) - exe = fluid.Executor(place) - - whole_filelist = [ - "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data"))) - ] - #whole_filelist = ["./out/normed_train09", "./out/normed_train10", "./out/normed_train11"] - test_files = whole_filelist[int(0.0 * len(whole_filelist)):int(1.0 * len( - whole_filelist))] - - # file_groups = [whole_filelist[i:i+train_thread_num] for i in range(0, len(whole_filelist), train_thread_num)] - - def set_zero(var_name): - param = inference_scope.var(var_name).get_tensor() - param_array = np.zeros(param._get_dims()).astype("int64") - param.set(param_array, place) - - epochs = 2 - for i in range(epochs): - cur_model_path = os.path.join(args.model_path, - "epoch" + str(i + 1) + ".model") - with fluid.scope_guard(inference_scope): - [inference_program, feed_target_names, fetch_targets] = \ - fluid.io.load_inference_model(cur_model_path, exe) - auc_states_names = ['_generated_var_2', '_generated_var_3'] - for name in auc_states_names: - set_zero(name) - - test_reader = map_dataset.infer_reader(test_files, 1000, 100000) - for batch_id, data in enumerate(test_reader()): - loss_val, auc_val, accuracy, predict, label = exe.run( - inference_program, - feed=data2tensor(data, place), - fetch_list=fetch_targets, - return_numpy=False) - - #print(np.array(predict)) - #x = np.array(predict) - #print(.shape)x - #print("train_pass_%d, test_pass_%d\t%f\t" % (i - 1, i, auc_val)) - - -if __name__ == '__main__': - infer() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/local_train.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/local_train.py deleted file mode 100644 index 9d7e9452..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/local_train.py +++ /dev/null @@ -1,90 +0,0 @@ -from __future__ import print_function - -from args import parse_args -import os -import paddle.fluid as fluid -import sys -from network_confv6 import ctr_deepfm_dataset - -NUM_CONTEXT_FEATURE = 22 -DIM_USER_PROFILE = 10 -DIM_DENSE_FEATURE = 3 -PYTHON_PATH = "/home/yaoxuefeng/whls/paddle_release_home/python/bin/python" # this is mine change yours - - -def train(): - args = parse_args() - if not os.path.isdir(args.model_output_dir): - os.mkdir(args.model_output_dir) - - #set the input format for our model. Note that you need to carefully modify them when you define a new network - #user_profile = fluid.layers.data( - #name="user_profile", shape=[DIM_USER_PROFILE], dtype='int64', lod_level=1) - dense_feature = fluid.layers.data( - name="dense_feature", shape=[DIM_DENSE_FEATURE], dtype='float32') - context_feature = [ - fluid.layers.data( - name="context" + str(i), shape=[1], lod_level=1, dtype="int64") - for i in range(0, NUM_CONTEXT_FEATURE) - ] - context_feature_fm = fluid.layers.data( - name="context_fm", shape=[1], dtype='int64', lod_level=1) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - - print("ready to network") - #self define network - loss, auc_var, batch_auc_var, accuracy, predict = ctr_deepfm_dataset( - dense_feature, context_feature, context_feature_fm, label, - args.embedding_size, args.sparse_feature_dim) - - print("ready to optimize") - optimizer = fluid.optimizer.SGD(learning_rate=1e-4) - optimizer.minimize(loss) - #single machine CPU training. more options on trainig please visit PaddlePaddle site - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - #use dataset api for much faster speed - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var([dense_feature] + context_feature + - [context_feature_fm] + [label]) - #self define how to process generated training insatnces in map_reader.py - pipe_command = PYTHON_PATH + " map_reader.py %d" % args.sparse_feature_dim - dataset.set_pipe_command(pipe_command) - dataset.set_batch_size(args.batch_size) - thread_num = 1 - dataset.set_thread(thread_num) - #self define how to split training files for example:"split -a 2 -d -l 200000 normed_train.txt normed_train" - whole_filelist = [ - "./out/normed_train%d" % x for x in range(len(os.listdir("out"))) - ] - whole_filelist = [ - "./out/normed_train00", "./out/normed_train01", "./out/normed_train02", - "./out/normed_train03", "./out/normed_train04", "./out/normed_train05", - "./out/normed_train06", "./out/normed_train07", "./out/normed_train08", - "./out/normed_train09", "./out/normed_train10", "./out/normed_train11" - ] - print("ready to epochs") - epochs = 10 - for i in range(epochs): - print("start %dth epoch" % i) - dataset.set_filelist(whole_filelist[:int(len(whole_filelist))]) - #print the informations you want by setting fetch_list and fetch_info - exe.train_from_dataset( - program=fluid.default_main_program(), - dataset=dataset, - fetch_list=[auc_var, accuracy, predict, label], - fetch_info=["auc", "accuracy", "predict", "label"], - debug=False) - model_dir = os.path.join(args.model_output_dir, - '/epoch' + str(i + 1) + ".model") - sys.stderr.write("epoch%d finished" % (i + 1)) - #save model - fluid.io.save_inference_model( - model_dir, - [dense_feature.name] + [x.name for x in context_feature] + - [context_feature_fm.name] + [label.name], - [loss, auc_var, accuracy, predict, label], exe) - - -if __name__ == '__main__': - train() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/map_reader.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/map_reader.py deleted file mode 100644 index 4a07e512..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/map_reader.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import json -import paddle.fluid.incubate.data_generator as dg - - -class MapDataset(dg.MultiSlotDataGenerator): - def setup(self, sparse_feature_dim): - self.profile_length = 65 - self.dense_length = 3 - #feature names - self.dense_feature_list = ["distance", "price", "eta"] - - self.pid_list = ["pid"] - self.query_feature_list = ["weekday", "hour", "o1", "o2", "d1", "d2"] - self.plan_feature_list = ["transport_mode"] - self.rank_feature_list = ["plan_rank", "whole_rank", "price_rank", "eta_rank", "distance_rank"] - self.rank_whole_pic_list = ["mode_rank1", "mode_rank2", "mode_rank3", "mode_rank4", - "mode_rank5"] - self.weather_feature_list = ["max_temp", "min_temp", "wea", "wind"] - self.hash_dim = 1000001 - self.train_idx_ = 2000000 - #carefully set if you change the features - self.categorical_range_ = range(0, 22) - - #process one instance - def _process_line(self, line): - instance = json.loads(line) - """ - profile = instance["profile"] - len_profile = len(profile) - if len_profile >= 10: - user_profile_feature = profile[0:10] - else: - profile.extend([0]*(10-len_profile)) - user_profile_feature = profile - - if len(profile) > 1 or (len(profile) == 1 and profile[0] != 0): - for p in profile: - if p >= 1 and p <= 65: - user_profile_feature[p - 1] = 1 - """ - context_feature = [] - context_feature_fm = [] - dense_feature = [0] * self.dense_length - plan = instance["plan"] - for i, val in enumerate(self.dense_feature_list): - dense_feature[i] = plan[val] - - if (instance["pid"] == ""): - instance["pid"] = 0 - - query = instance["query"] - weather_dic = instance["weather"] - for fea in self.pid_list: - context_feature.append([hash(fea + str(instance[fea])) % self.hash_dim]) - context_feature_fm.append(hash(fea + str(instance[fea])) % self.hash_dim) - for fea in self.query_feature_list: - context_feature.append([hash(fea + str(query[fea])) % self.hash_dim]) - context_feature_fm.append(hash(fea + str(query[fea])) % self.hash_dim) - for fea in self.plan_feature_list: - context_feature.append([hash(fea + str(plan[fea])) % self.hash_dim]) - context_feature_fm.append(hash(fea + str(plan[fea])) % self.hash_dim) - for fea in self.rank_feature_list: - context_feature.append([hash(fea + str(instance[fea])) % self.hash_dim]) - context_feature_fm.append(hash(fea + str(instance[fea])) % self.hash_dim) - for fea in self.rank_whole_pic_list: - context_feature.append([hash(fea + str(instance[fea])) % self.hash_dim]) - context_feature_fm.append(hash(fea + str(instance[fea])) % self.hash_dim) - for fea in self.weather_feature_list: - context_feature.append([hash(fea + str(weather_dic[fea])) % self.hash_dim]) - context_feature_fm.append(hash(fea + str(weather_dic[fea])) % self.hash_dim) - - label = [int(instance["label"])] - - return dense_feature, context_feature, context_feature_fm, label - - def infer_reader(self, filelist, batch, buf_size): - print(filelist) - - def local_iter(): - for fname in filelist: - with open(fname.strip(), "r") as fin: - for line in fin: - dense_feature, sparse_feature, sparse_feature_fm, label = self._process_line(line) - yield [dense_feature] + sparse_feature + [sparse_feature_fm] + [label] - - import paddle - batch_iter = paddle.batch( - paddle.reader.shuffle( - local_iter, buf_size=buf_size), - batch_size=batch) - return batch_iter - - #generat inputs for testing - def test_reader(self, filelist, batch, buf_size): - print(filelist) - - def local_iter(): - for fname in filelist: - with open(fname.strip(), "r") as fin: - for line in fin: - dense_feature, sparse_feature, sparse_feature_fm, label = self._process_line(line) - yield [dense_feature] + sparse_feature + [sparse_feature_fm] + [label] - - import paddle - batch_iter = paddle.batch( - paddle.reader.buffered( - local_iter, size=buf_size), - batch_size=batch) - return batch_iter - - #generate inputs for trainig - def generate_sample(self, line): - def data_iter(): - dense_feature, sparse_feature, sparse_feature_fm, label = self._process_line(line) - #feature_name = ["user_profile"] - feature_name = [] - feature_name.append("dense_feature") - for idx in self.categorical_range_: - feature_name.append("context" + str(idx)) - feature_name.append("context_fm") - feature_name.append("label") - yield zip(feature_name, [dense_feature] + sparse_feature + [sparse_feature_fm] + [label]) - - return data_iter - - -if __name__ == "__main__": - map_dataset = MapDataset() - map_dataset.setup(int(sys.argv[1])) - map_dataset.run_from_stdin() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/network_confv6.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/network_confv6.py deleted file mode 100644 index 5a9a5d4b..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/network_confv6.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid -import math - -user_profile_dim = 65 -dense_feature_dim = 3 - -def ctr_deepfm_dataset(dense_feature, context_feature, context_feature_fm, label, - embedding_size, sparse_feature_dim): - def dense_fm_layer(input, emb_dict_size, factor_size, fm_param_attr): - - first_order = fluid.layers.fc(input=input, size=1) - emb_table = fluid.layers.create_parameter(shape=[emb_dict_size, factor_size], - dtype='float32', attr=fm_param_attr) - - input_mul_factor = fluid.layers.matmul(input, emb_table) - input_mul_factor_square = fluid.layers.square(input_mul_factor) - input_square = fluid.layers.square(input) - factor_square = fluid.layers.square(emb_table) - input_square_mul_factor_square = fluid.layers.matmul(input_square, factor_square) - - second_order = 0.5 * (input_mul_factor_square - input_square_mul_factor_square) - return first_order, second_order - - - dense_fm_param_attr = fluid.param_attr.ParamAttr(name="DenseFeatFactors", - initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(dense_feature_dim))) - dense_fm_first, dense_fm_second = dense_fm_layer( - dense_feature, dense_feature_dim, 16, dense_fm_param_attr) - - - def sparse_fm_layer(input, emb_dict_size, factor_size, fm_param_attr): - - first_embeddings = fluid.layers.embedding( - input=input, dtype='float32', size=[emb_dict_size, 1], is_sparse=True) - first_order = fluid.layers.sequence_pool(input=first_embeddings, pool_type='sum') - - nonzero_embeddings = fluid.layers.embedding( - input=input, dtype='float32', size=[emb_dict_size, factor_size], - param_attr=fm_param_attr, is_sparse=True) - summed_features_emb = fluid.layers.sequence_pool(input=nonzero_embeddings, pool_type='sum') - summed_features_emb_square = fluid.layers.square(summed_features_emb) - - squared_features_emb = fluid.layers.square(nonzero_embeddings) - squared_sum_features_emb = fluid.layers.sequence_pool( - input=squared_features_emb, pool_type='sum') - - second_order = 0.5 * (summed_features_emb_square - squared_sum_features_emb) - return first_order, second_order - - sparse_fm_param_attr = fluid.param_attr.ParamAttr(name="SparseFeatFactors", - initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(sparse_feature_dim))) - - #data = fluid.layers.data(name='ids', shape=[1], dtype='float32') - sparse_fm_first, sparse_fm_second = sparse_fm_layer( - context_feature_fm, sparse_feature_dim, 16, sparse_fm_param_attr) - - def embedding_layer(input): - return fluid.layers.embedding( - input=input, - is_sparse=True, - # you need to patch https://github.com/PaddlePaddle/Paddle/pull/14190 - # if you want to set is_distributed to True - is_distributed=False, - size=[sparse_feature_dim, embedding_size], - param_attr=fluid.ParamAttr(name="SparseFeatFactors", - initializer=fluid.initializer.Uniform())) - - sparse_embed_seq = list(map(embedding_layer, context_feature)) - - concated_ori = fluid.layers.concat(sparse_embed_seq + [dense_feature], axis=1) - concated = fluid.layers.batch_norm(input=concated_ori, name="bn", epsilon=1e-4) - - deep = deep_net(concated) - - predict = fluid.layers.fc(input=[deep, sparse_fm_first, sparse_fm_second, dense_fm_first, dense_fm_second], size=2, act="softmax", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(deep.shape[1])), learning_rate=0.01)) - - #similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(predict, min=-15.0, max=15.0), name="similarity_norm") - - cost = fluid.layers.cross_entropy(input=predict, label=label) - - avg_cost = fluid.layers.reduce_sum(cost) - accuracy = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = \ - fluid.layers.auc(input=predict, label=label, num_thresholds=2 ** 12, slide_steps=20) - return avg_cost, auc_var, batch_auc_var, accuracy, predict - - -def deep_net(concated, lr_x=0.0001): - fc_layers_input = [concated] - fc_layers_size = [400, 400, 400] - fc_layers_act = ["relu"] * (len(fc_layers_size)) - - for i in range(len(fc_layers_size)): - fc = fluid.layers.fc( - input=fc_layers_input[-1], - size=fc_layers_size[i], - act=fc_layers_act[i], - param_attr=fluid.ParamAttr(learning_rate=lr_x * 0.5)) - - fc_layers_input.append(fc) - #w_res = fluid.layers.create_parameter(shape=[353, 16], dtype='float32', name="w_res") - #high_path = fluid.layers.matmul(concated, w_res) - - #return fluid.layers.elementwise_add(high_path, fc_layers_input[-1]) - return fc_layers_input[-1] diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_conf.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_conf.py deleted file mode 100644 index ef6d0103..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_conf.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import paddle.fluid as fluid -import math - -user_profile_dim = 65 -num_context = 25 -dim_fm_vector = 16 -dim_concated = user_profile_dim + dim_fm_vector * (num_context) - - -def ctr_deepfm_dataset(user_profile, context_feature, label, - embedding_size, sparse_feature_dim): - def embedding_layer(input): - return fluid.layers.embedding( - input=input, - is_sparse=True, - # you need to patch https://github.com/PaddlePaddle/Paddle/pull/14190 - # if you want to set is_distributed to True - is_distributed=False, - size=[sparse_feature_dim, embedding_size], - param_attr=fluid.ParamAttr(name="SparseFeatFactors", - initializer=fluid.initializer.Uniform())) - - sparse_embed_seq = list(map(embedding_layer, context_feature)) - - w = fluid.layers.create_parameter( - shape=[65, 65], dtype='float32', - name="w_fm") - user_profile_emb = fluid.layers.matmul(user_profile, w) - - concated_ori = fluid.layers.concat(sparse_embed_seq + [user_profile_emb], axis=1) - concated = fluid.layers.batch_norm(input=concated_ori, name="bn", epsilon=1e-4) - - deep = deep_net(concated) - linear_term, second_term = fm(concated, dim_concated, 8) #depend on the number of context feature - - predict = fluid.layers.fc(input=[deep, linear_term, second_term], size=2, act="softmax", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(deep.shape[1])), learning_rate=0.01)) - - #similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(predict, min=-15.0, max=15.0), name="similarity_norm") - - - cost = fluid.layers.cross_entropy(input=predict, label=label) - - avg_cost = fluid.layers.reduce_sum(cost) - accuracy = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = \ - fluid.layers.auc(input=predict, label=label, num_thresholds=2 ** 12, slide_steps=20) - return avg_cost, auc_var, batch_auc_var, accuracy, predict - - -def deep_net(concated, lr_x=0.0001): - fc_layers_input = [concated] - fc_layers_size = [128, 64, 32, 16] - fc_layers_act = ["relu"] * (len(fc_layers_size)) - - for i in range(len(fc_layers_size)): - fc = fluid.layers.fc( - input=fc_layers_input[-1], - size=fc_layers_size[i], - act=fc_layers_act[i], - param_attr=fluid.ParamAttr(learning_rate=lr_x * 0.5)) - - fc_layers_input.append(fc) - - return fc_layers_input[-1] - - -def fm(concated, emb_dict_size, factor_size, lr_x=0.0001): - linear_term = fluid.layers.fc(input=concated, size=8, act=None, param_attr=fluid.ParamAttr(learning_rate=lr_x)) - - emb_table = fluid.layers.create_parameter(shape=[emb_dict_size, factor_size], - dtype='float32') - - input_mul_factor = fluid.layers.matmul(concated, emb_table) - input_mul_factor_square = fluid.layers.square(input_mul_factor) - input_square = fluid.layers.square(concated) - factor_square = fluid.layers.square(emb_table) - input_square_mul_factor_square = fluid.layers.matmul(input_square, factor_square) - - second_term = 0.5 * (input_mul_factor_square - input_square_mul_factor_square) - - return linear_term, second_term - - - - - - - - diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv4.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv4.py deleted file mode 100644 index 97000168..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv4.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid -import math - -user_profile_dim = 65 -slot_1 = [0, 1, 2, 3, 4, 5] -slot_2 = [6] -slot_3 = [7, 8, 9, 10, 11] -slot_4 = [12, 13, 14, 15, 16] -slot_5 = [17, 18, 19, 20] -num_context = 25 -num_slots_pair = 5 -dim_fm_vector = 16 -dim_concated = user_profile_dim + dim_fm_vector * (num_context + num_slots_pair) - -def ctr_deepfm_dataset(user_profile, dense_feature, context_feature, label, - embedding_size, sparse_feature_dim): - def embedding_layer(input): - return fluid.layers.embedding( - input=input, - is_sparse=True, - # you need to patch https://github.com/PaddlePaddle/Paddle/pull/14190 - # if you want to set is_distributed to True - is_distributed=False, - size=[sparse_feature_dim, embedding_size], - param_attr=fluid.ParamAttr(name="SparseFeatFactors", - initializer=fluid.initializer.Uniform())) - - sparse_embed_seq = list(map(embedding_layer, context_feature)) - - w = fluid.layers.create_parameter( - shape=[65, 65], dtype='float32', - name="w_fm") - - user_emb_list = [] - user_profile_emb = fluid.layers.matmul(user_profile, w) - user_emb_list.append(user_profile_emb) - user_emb_list.append(dense_feature) - - w1 = fluid.layers.create_parameter(shape=[65, dim_fm_vector], dtype='float32', name="w_1") - w2 = fluid.layers.create_parameter(shape=[65, dim_fm_vector], dtype='float32', name="w_2") - w3 = fluid.layers.create_parameter(shape=[65, dim_fm_vector], dtype='float32', name="w_3") - w4 = fluid.layers.create_parameter(shape=[65, dim_fm_vector], dtype='float32', name="w_4") - w5 = fluid.layers.create_parameter(shape=[65, dim_fm_vector], dtype='float32', name="w_5") - user_profile_emb_1 = fluid.layers.matmul(user_profile, w1) - user_profile_emb_2 = fluid.layers.matmul(user_profile, w2) - user_profile_emb_3 = fluid.layers.matmul(user_profile, w3) - user_profile_emb_4 = fluid.layers.matmul(user_profile, w4) - user_profile_emb_5 = fluid.layers.matmul(user_profile, w5) - - sparse_embed_seq_1 = embedding_layer(context_feature[slot_1[0]]) - sparse_embed_seq_2 = embedding_layer(context_feature[slot_2[0]]) - sparse_embed_seq_3 = embedding_layer(context_feature[slot_3[0]]) - sparse_embed_seq_4 = embedding_layer(context_feature[slot_4[0]]) - sparse_embed_seq_5 = embedding_layer(context_feature[slot_5[0]]) - for i in slot_1[1:-1]: - sparse_embed_seq_1 = fluid.layers.elementwise_add(sparse_embed_seq_1, embedding_layer(context_feature[i])) - for i in slot_2[1:-1]: - sparse_embed_seq_2 = fluid.layers.elementwise_add(sparse_embed_seq_2, embedding_layer(context_feature[i])) - for i in slot_3[1:-1]: - sparse_embed_seq_3 = fluid.layers.elementwise_add(sparse_embed_seq_3, embedding_layer(context_feature[i])) - for i in slot_4[1:-1]: - sparse_embed_seq_4 = fluid.layers.elementwise_add(sparse_embed_seq_4, embedding_layer(context_feature[i])) - for i in slot_5[1:-1]: - sparse_embed_seq_5 = fluid.layers.elementwise_add(sparse_embed_seq_5, embedding_layer(context_feature[i])) - - ele_product_1 = fluid.layers.elementwise_mul(user_profile_emb_1, sparse_embed_seq_1) - user_emb_list.append(ele_product_1) - ele_product_2 = fluid.layers.elementwise_mul(user_profile_emb_2, sparse_embed_seq_2) - user_emb_list.append(ele_product_2) - ele_product_3 = fluid.layers.elementwise_mul(user_profile_emb_3, sparse_embed_seq_3) - user_emb_list.append(ele_product_3) - ele_product_4 = fluid.layers.elementwise_mul(user_profile_emb_4, sparse_embed_seq_4) - user_emb_list.append(ele_product_4) - ele_product_5 = fluid.layers.elementwise_mul(user_profile_emb_5, sparse_embed_seq_5) - user_emb_list.append(ele_product_5) - - ffm_1 = fluid.layers.reduce_sum(ele_product_1, dim=1, keep_dim=True) - ffm_2 = fluid.layers.reduce_sum(ele_product_2, dim=1, keep_dim=True) - ffm_3 = fluid.layers.reduce_sum(ele_product_3, dim=1, keep_dim=True) - ffm_4 = fluid.layers.reduce_sum(ele_product_4, dim=1, keep_dim=True) - ffm_5 = fluid.layers.reduce_sum(ele_product_5, dim=1, keep_dim=True) - - - concated_ori = fluid.layers.concat(sparse_embed_seq + user_emb_list, axis=1) - concated = fluid.layers.batch_norm(input=concated_ori, name="bn", epsilon=1e-4) - - deep = deep_net(concated) - linear_term, second_term = fm(concated, dim_concated, 8) #depend on the number of context feature - - predict = fluid.layers.fc(input=[deep, linear_term, second_term, ffm_1, ffm_2, ffm_3, ffm_4, ffm_5], size=2, act="softmax", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(deep.shape[1])), learning_rate=0.01)) - - #similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(predict, min=-15.0, max=15.0), name="similarity_norm") - - - cost = fluid.layers.cross_entropy(input=predict, label=label) - - avg_cost = fluid.layers.reduce_sum(cost) - accuracy = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = \ - fluid.layers.auc(input=predict, label=label, num_thresholds=2 ** 12, slide_steps=20) - return avg_cost, auc_var, batch_auc_var, accuracy, predict - - -def deep_net(concated, lr_x=0.0001): - fc_layers_input = [concated] - fc_layers_size = [256, 128, 64, 32, 16] - fc_layers_act = ["relu"] * (len(fc_layers_size)) - - for i in range(len(fc_layers_size)): - fc = fluid.layers.fc( - input=fc_layers_input[-1], - size=fc_layers_size[i], - act=fc_layers_act[i], - param_attr=fluid.ParamAttr(learning_rate=lr_x * 0.5)) - - fc_layers_input.append(fc) - w_res = fluid.layers.create_parameter(shape=[dim_concated, 16], dtype='float32', name="w_res") - high_path = fluid.layers.matmul(concated, w_res) - - return fluid.layers.elementwise_add(high_path, fc_layers_input[-1]) - #return fc_layers_input[-1] - - -def fm(concated, emb_dict_size, factor_size, lr_x=0.0001): - linear_term = fluid.layers.fc(input=concated, size=8, act=None, param_attr=fluid.ParamAttr(learning_rate=lr_x)) - - emb_table = fluid.layers.create_parameter(shape=[emb_dict_size, factor_size], - dtype='float32') - - input_mul_factor = fluid.layers.matmul(concated, emb_table) - input_mul_factor_square = fluid.layers.square(input_mul_factor) - input_square = fluid.layers.square(concated) - factor_square = fluid.layers.square(emb_table) - input_square_mul_factor_square = fluid.layers.matmul(input_square, factor_square) - - second_term = 0.5 * (input_mul_factor_square - input_square_mul_factor_square) - - return linear_term, second_term \ No newline at end of file diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv6.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv6.py deleted file mode 100644 index ed638e97..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/networks/network_confv6.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid -import math - -user_profile_dim = 65 -dense_feature_dim = 3 - -def ctr_deepfm_dataset(dense_feature, context_feature, context_feature_fm, label, - embedding_size, sparse_feature_dim): - def dense_fm_layer(input, emb_dict_size, factor_size, fm_param_attr): - - first_order = fluid.layers.fc(input=input, size=1) - emb_table = fluid.layers.create_parameter(shape=[emb_dict_size, factor_size], - dtype='float32', attr=fm_param_attr) - - input_mul_factor = fluid.layers.matmul(input, emb_table) - input_mul_factor_square = fluid.layers.square(input_mul_factor) - input_square = fluid.layers.square(input) - factor_square = fluid.layers.square(emb_table) - input_square_mul_factor_square = fluid.layers.matmul(input_square, factor_square) - - second_order = 0.5 * (input_mul_factor_square - input_square_mul_factor_square) - return first_order, second_order - - - dense_fm_param_attr = fluid.param_attr.ParamAttr(name="DenseFeatFactors", - initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(dense_feature_dim))) - dense_fm_first, dense_fm_second = dense_fm_layer( - dense_feature, dense_feature_dim, 16, dense_fm_param_attr) - - - def sparse_fm_layer(input, emb_dict_size, factor_size, fm_param_attr): - - first_embeddings = fluid.layers.embedding( - input=input, dtype='float32', size=[emb_dict_size, 1], is_sparse=True) - first_order = fluid.layers.sequence_pool(input=first_embeddings, pool_type='sum') - - nonzero_embeddings = fluid.layers.embedding( - input=input, dtype='float32', size=[emb_dict_size, factor_size], - param_attr=fm_param_attr, is_sparse=True) - summed_features_emb = fluid.layers.sequence_pool(input=nonzero_embeddings, pool_type='sum') - summed_features_emb_square = fluid.layers.square(summed_features_emb) - - squared_features_emb = fluid.layers.square(nonzero_embeddings) - squared_sum_features_emb = fluid.layers.sequence_pool( - input=squared_features_emb, pool_type='sum') - - second_order = 0.5 * (summed_features_emb_square - squared_sum_features_emb) - return first_order, second_order - - sparse_fm_param_attr = fluid.param_attr.ParamAttr(name="SparseFeatFactors", - initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(sparse_feature_dim))) - - #data = fluid.layers.data(name='ids', shape=[1], dtype='float32') - sparse_fm_first, sparse_fm_second = sparse_fm_layer( - context_feature_fm, sparse_feature_dim, 16, sparse_fm_param_attr) - - def embedding_layer(input): - return fluid.layers.embedding( - input=input, - is_sparse=True, - # you need to patch https://github.com/PaddlePaddle/Paddle/pull/14190 - # if you want to set is_distributed to True - is_distributed=False, - size=[sparse_feature_dim, embedding_size], - param_attr=fluid.ParamAttr(name="SparseFeatFactors", - initializer=fluid.initializer.Uniform())) - - sparse_embed_seq = list(map(embedding_layer, context_feature)) - - concated_ori = fluid.layers.concat(sparse_embed_seq + [dense_feature], axis=1) - concated = fluid.layers.batch_norm(input=concated_ori, name="bn", epsilon=1e-4) - - deep = deep_net(concated) - - predict = fluid.layers.fc(input=[deep, sparse_fm_first, sparse_fm_second, dense_fm_first, dense_fm_second], size=2, act="softmax", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(deep.shape[1])), learning_rate=0.01)) - - #similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(predict, min=-15.0, max=15.0), name="similarity_norm") - - cost = fluid.layers.cross_entropy(input=predict, label=label) - - avg_cost = fluid.layers.reduce_sum(cost) - accuracy = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = \ - fluid.layers.auc(input=predict, label=label, num_thresholds=2 ** 12, slide_steps=20) - return avg_cost, auc_var, batch_auc_var, accuracy, predict - - -def deep_net(concated, lr_x=0.0001): - fc_layers_input = [concated] - fc_layers_size = [400, 400, 400] - fc_layers_act = ["relu"] * (len(fc_layers_size)) - - for i in range(len(fc_layers_size)): - fc = fluid.layers.fc( - input=fc_layers_input[-1], - size=fc_layers_size[i], - act=fc_layers_act[i], - param_attr=fluid.ParamAttr(learning_rate=lr_x * 0.5)) - - fc_layers_input.append(fc) - #w_res = fluid.layers.create_parameter(shape=[353, 16], dtype='float32', name="w_res") - #high_path = fluid.layers.matmul(concated, w_res) - - #return fluid.layers.elementwise_add(high_path, fc_layers_input[-1]) - return fc_layers_input[-1] \ No newline at end of file diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/pre_process_test.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/pre_process_test.py deleted file mode 100644 index 44462a99..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/pre_process_test.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os, sys, time, random, csv, datetime, json -import pandas as pd -import numpy as np -import argparse -import logging -import time - -logging.basicConfig( - format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("preprocess") -logger.setLevel(logging.INFO) - -TEST_QUERIES_PATH = "./data_set_phase1/test_queries.csv" -TEST_PLANS_PATH = "./data_set_phase1/test_plans.csv" -TRAIN_CLICK_PATH = "./data_set_phase1/train_clicks.csv" -PROFILES_PATH = "./data_set_phase1/profiles.csv" -OUT_NORM_TEST_PATH = "./out/normed_test_session.txt" -OUT_RAW_TEST_PATH = "./out/test_session.txt" - -O1_MIN = 115.47 -O1_MAX = 117.29 - -O2_MIN = 39.46 -O2_MAX = 40.97 - -D1_MIN = 115.44 -D1_MAX = 117.37 - -D2_MIN = 39.46 -D2_MAX = 40.96 -SCALE_OD = 0.02 - -DISTANCE_MIN = 1.0 -DISTANCE_MAX = 225864.0 -THRESHOLD_DIS = 40000.0 -SCALE_DIS = 500 - -PRICE_MIN = 200.0 -PRICE_MAX = 92300.0 -THRESHOLD_PRICE = 20000 -SCALE_PRICE = 100 - -ETA_MIN = 1.0 -ETA_MAX = 72992.0 -THRESHOLD_ETA = 10800.0 -SCALE_ETA = 120 - - -def build_norm_feature(): - with open(OUT_NORM_TEST_PATH, 'w') as nf: - with open(OUT_RAW_TEST_PATH, 'r') as f: - for line in f: - cur_map = json.loads(line) - - if cur_map["plan"]["distance"] > THRESHOLD_DIS: - cur_map["plan"]["distance"] = int(THRESHOLD_DIS) - elif cur_map["plan"]["distance"] > 0: - cur_map["plan"]["distance"] = int(cur_map["plan"]["distance"] / SCALE_DIS) - - if cur_map["plan"]["price"] and cur_map["plan"]["price"] > THRESHOLD_PRICE: - cur_map["plan"]["price"] = int(THRESHOLD_PRICE) - elif not cur_map["plan"]["price"] or cur_map["plan"]["price"] < 0: - cur_map["plan"]["price"] = 0 - else: - cur_map["plan"]["price"] = int(cur_map["plan"]["price"] / SCALE_PRICE) - - if cur_map["plan"]["eta"] > THRESHOLD_ETA: - cur_map["plan"]["eta"] = int(THRESHOLD_ETA) - elif cur_map["plan"]["eta"] > 0: - cur_map["plan"]["eta"] = int(cur_map["plan"]["eta"] / SCALE_ETA) - - # o1 - if cur_map["query"]["o1"] > O1_MAX: - cur_map["query"]["o1"] = int((O1_MAX - O1_MIN) / SCALE_OD + 1) - elif cur_map["query"]["o1"] < O1_MIN: - cur_map["query"]["o1"] = 0 - else: - cur_map["query"]["o1"] = int((cur_map["query"]["o1"] - O1_MIN) / 0.02) - - # o2 - if cur_map["query"]["o2"] > O2_MAX: - cur_map["query"]["o2"] = int((O2_MAX - O2_MIN) / SCALE_OD + 1) - elif cur_map["query"]["o2"] < O2_MIN: - cur_map["query"]["o2"] = 0 - else: - cur_map["query"]["o2"] = int((cur_map["query"]["o2"] - O2_MIN) / 0.02) - - # d1 - if cur_map["query"]["d1"] > D1_MAX: - cur_map["query"]["d1"] = int((D1_MAX - D1_MIN) / SCALE_OD + 1) - elif cur_map["query"]["d1"] < D1_MIN: - cur_map["query"]["d1"] = 0 - else: - cur_map["query"]["d1"] = int((cur_map["query"]["d1"] - D1_MIN) / SCALE_OD) - - # d2 - if cur_map["query"]["d2"] > D2_MAX: - cur_map["query"]["d2"] = int((D2_MAX - D2_MIN) / SCALE_OD + 1) - elif cur_map["query"]["d2"] < D2_MIN: - cur_map["query"]["d2"] = 0 - else: - cur_map["query"]["d2"] = int((cur_map["query"]["d2"] - D2_MIN) / SCALE_OD) - - cur_json_instance = json.dumps(cur_map) - nf.write(cur_json_instance + '\n') - - -def preprocess(): - """ - Construct the train data indexed by session id and mode id jointly. Convert some of the raw features (user profile, - od pair, req time, click time, eta, price, distance, transport mode) to one-hot ids used for - embedding. We split the one-hot features into two categories: user feature and context feature for - better understanding of FM algorithm. - Note that the user profile is already provided by one-hot encoded form, we convert it back to the - ids for unity with the context feature and easily using of PaddlePaddle embedding layer. Given the - train clicks data, we label each train instance with 1 or 0 depend on if this instance is clicked or - not. - :return: - """ - - train_data_dict = {} - with open("./weather.json", 'r') as f: - weather_dict = json.load(f) - - with open(TEST_QUERIES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - train_index_list = [] - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "": continue - if line[1] == "": - train_index_list.append(line[0] + "_0") - else: - train_index_list.append(line[0] + "_" + line[1]) - - train_index = line[0] - train_data_dict[train_index] = {} - train_data_dict[train_index]["pid"] = line[1] - train_data_dict[train_index]["query"] = {} - - reqweekday = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%w") - reqhour = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%H") - - date_key = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%m-%d") - train_data_dict[train_index]["weather"] = {} - train_data_dict[train_index]["weather"].update({"max_temp": weather_dict[date_key]["max_temp"]}) - train_data_dict[train_index]["weather"].update({"min_temp": weather_dict[date_key]["min_temp"]}) - train_data_dict[train_index]["weather"].update({"wea": weather_dict[date_key]["weather"]}) - train_data_dict[train_index]["weather"].update({"wind": weather_dict[date_key]["wind"]}) - - train_data_dict[train_index]["query"].update({"weekday":reqweekday}) - train_data_dict[train_index]["query"].update({"hour":reqhour}) - - o = line[3].split(',') - o_first = o[0] - o_second = o[1] - train_data_dict[train_index]["query"].update({"o1":float(o_first)}) - train_data_dict[train_index]["query"].update({"o2":float(o_second)}) - - d = line[4].split(',') - d_first = d[0] - d_second = d[1] - train_data_dict[train_index]["query"].update({"d1":float(d_first)}) - train_data_dict[train_index]["query"].update({"d2":float(d_second)}) - - plan_map = {} - plan_data = pd.read_csv(TEST_PLANS_PATH) - for index, row in plan_data.iterrows(): - plans_str = row['plans'] - plans_list = json.loads(plans_str) - session_id = str(row['sid']) - # train_data_dict[session_id]["plans"] = [] - plan_map[session_id] = plans_list - - profile_map = {} - with open(PROFILES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - profile_map[line[0]] = [i for i in range(len(line)) if line[i] == "1.0"] - - session_click_map = {} - with open(TRAIN_CLICK_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "" or line[1] == "" or line[2] == "": - continue - session_click_map[line[0]] = line[2] - #return train_data_dict, profile_map, session_click_map, plan_map - generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map) - - -def generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map): - if not os.path.isdir("./out/"): - os.mkdir("./out/") - with open(os.path.join("./out/", "test_session.txt"), 'w') as f_train: - for session_id, plan_list in plan_map.items(): - if session_id not in train_data_dict: - continue - cur_map = train_data_dict[session_id] - cur_map["session_id"] = session_id - if cur_map["pid"] != "": - cur_map["profile"] = profile_map[cur_map["pid"]] - else: - cur_map["profile"] = [0] - del cur_map["pid"] - whole_rank = 0 - for plan in plan_list: - whole_rank += 1 - cur_map["mode_rank" + str(whole_rank)] = plan["transport_mode"] - - if whole_rank < 5: - for r in range(whole_rank + 1, 6): - cur_map["mode_rank" + str(r)] = -1 - - cur_map["whole_rank"] = whole_rank - flag_click = False - rank = 1 - - price_list = [] - eta_list = [] - distance_list = [] - for plan in plan_list: - if not plan["price"]: - price_list.append(0) - else: - price_list.append(int(plan["price"])) - eta_list.append(int(plan["eta"])) - distance_list.append(int(plan["distance"])) - price_list.sort(reverse=False) - eta_list.sort(reverse=False) - distance_list.sort(reverse=False) - - for plan in plan_list: - if plan["price"] and int(plan["price"]) == price_list[0]: - cur_map["mode_min_price"] = plan["transport_mode"] - if plan["price"] and int(plan["price"]) == price_list[-1]: - cur_map["mode_max_price"] = plan["transport_mode"] - if int(plan["eta"]) == eta_list[0]: - cur_map["mode_min_eta"] = plan["transport_mode"] - if int(plan["eta"]) == eta_list[-1]: - cur_map["mode_max_eta"] = plan["transport_mode"] - if int(plan["distance"]) == distance_list[0]: - cur_map["mode_min_distance"] = plan["transport_mode"] - if int(plan["distance"]) == distance_list[-1]: - cur_map["mode_max_distance"] = plan["transport_mode"] - if "mode_min_price" not in cur_map: - cur_map["mode_min_price"] = -1 - if "mode_max_price" not in cur_map: - cur_map["mode_max_price"] = -1 - - - for plan in plan_list: - cur_price = int(plan["price"]) if plan["price"] else 0 - cur_eta = int(plan["eta"]) - cur_distance = int(plan["distance"]) - cur_map["price_rank"] = price_list.index(cur_price) + 1 - cur_map["eta_rank"] = eta_list.index(cur_eta) + 1 - cur_map["distance_rank"] = distance_list.index(cur_distance) + 1 - - if ("transport_mode" in plan) and (session_id in session_click_map) and ( - int(plan["transport_mode"]) == int(session_click_map[session_id])): - cur_map["plan"] = plan - cur_map["label"] = 1 - flag_click = True - # print("label is 1") - else: - cur_map["plan"] = plan - cur_map["label"] = 0 - - cur_map["plan_rank"] = rank - rank += 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - - cur_map["plan"]["distance"] = -1 - cur_map["plan"]["price"] = -1 - cur_map["plan"]["eta"] = -1 - cur_map["plan"]["transport_mode"] = 0 - cur_map["plan_rank"] = 0 - cur_map["price_rank"] = 0 - cur_map["eta_rank"] = 0 - cur_map["plan_rank"] = 0 - cur_map["label"] = 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - - build_norm_feature() - - -if __name__ == "__main__": - preprocess() \ No newline at end of file diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/pre_test_dense.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/pre_test_dense.py deleted file mode 100644 index 58fb483d..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/pre_test_dense.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os, sys, time, random, csv, datetime, json -import pandas as pd -import numpy as np -import argparse -import logging -import time - -logging.basicConfig( - format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("preprocess") -logger.setLevel(logging.INFO) - -TRAIN_QUERIES_PATH = "./data_set_phase1/test_queries.csv" -TRAIN_PLANS_PATH = "./data_set_phase1/test_plans.csv" -TRAIN_CLICK_PATH = "./data_set_phase1/train_clicks.csv" -PROFILES_PATH = "./data_set_phase1/profiles.csv" - -O1_MIN = 115.47 -O1_MAX = 117.29 - -O2_MIN = 39.46 -O2_MAX = 40.97 - -D1_MIN = 115.44 -D1_MAX = 117.37 - -D2_MIN = 39.46 -D2_MAX = 40.96 - -DISTANCE_MIN = 1.0 -DISTANCE_MAX = 225864.0 -THRESHOLD_DIS = 200000.0 - -PRICE_MIN = 200.0 -PRICE_MAX = 92300.0 -THRESHOLD_PRICE = 20000 - -ETA_MIN = 1.0 -ETA_MAX = 72992.0 -THRESHOLD_ETA = 10800.0 - - -def build_norm_feature(): - with open("./out/normed_test_session.txt", 'w') as nf: - with open("./out/test_session.txt", 'r') as f: - for line in f: - cur_map = json.loads(line) - - cur_map["plan"]["distance"] = (cur_map["plan"]["distance"] - DISTANCE_MIN) / (DISTANCE_MAX - DISTANCE_MIN) - - if cur_map["plan"]["price"]: - cur_map["plan"]["price"] = (cur_map["plan"]["price"] - PRICE_MIN) / (PRICE_MAX - PRICE_MIN) - else: - cur_map["plan"]["price"] = 0.0 - - cur_map["plan"]["eta"] = (cur_map["plan"]["eta"] - ETA_MIN) / (ETA_MAX - ETA_MIN) - - cur_json_instance = json.dumps(cur_map) - nf.write(cur_json_instance + '\n') - - -def preprocess(): - """ - Construct the train data indexed by session id and mode id jointly. Convert all the raw features (user profile, - od pair, req time, click time, eta, price, distance, transport mode) to one-hot ids used for - embedding. We split the one-hot features into two categories: user feature and context feature for - better understanding of FFM algorithm. - Note that the user profile is already provided by one-hot encoded form, we convert it back to the - ids for unity with the context feature and easily using of PaddlePaddle embedding layer. Given the - train clicks data, we label each train instance with 1 or 0 depend on if this instance is clicked or - not. - :return: - """ - #args = parse_args() - - train_data_dict = {} - with open("./weather.json", 'r') as f: - weather_dict = json.load(f) - - with open(TRAIN_QUERIES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - train_index_list = [] - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "": continue - if line[1] == "": - train_index_list.append(line[0] + "_0") - else: - train_index_list.append(line[0] + "_" + line[1]) - - train_index = line[0] - train_data_dict[train_index] = {} - train_data_dict[train_index]["pid"] = line[1] - train_data_dict[train_index]["query"] = {} - - reqweekday = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%w") - reqhour = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%H") - - date_key = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%m-%d") - train_data_dict[train_index]["weather"] = {} - train_data_dict[train_index]["weather"].update({"max_temp": weather_dict[date_key]["max_temp"]}) - train_data_dict[train_index]["weather"].update({"min_temp": weather_dict[date_key]["min_temp"]}) - train_data_dict[train_index]["weather"].update({"wea": weather_dict[date_key]["weather"]}) - train_data_dict[train_index]["weather"].update({"wind": weather_dict[date_key]["wind"]}) - - train_data_dict[train_index]["query"].update({"weekday":reqweekday}) - train_data_dict[train_index]["query"].update({"hour":reqhour}) - - o = line[3].split(',') - o_first = o[0] - o_second = o[1] - train_data_dict[train_index]["query"].update({"o1":float(o_first)}) - train_data_dict[train_index]["query"].update({"o2":float(o_second)}) - - d = line[4].split(',') - d_first = d[0] - d_second = d[1] - train_data_dict[train_index]["query"].update({"d1":float(d_first)}) - train_data_dict[train_index]["query"].update({"d2":float(d_second)}) - - plan_map = {} - plan_data = pd.read_csv(TRAIN_PLANS_PATH) - for index, row in plan_data.iterrows(): - plans_str = row['plans'] - plans_list = json.loads(plans_str) - session_id = str(row['sid']) - # train_data_dict[session_id]["plans"] = [] - plan_map[session_id] = plans_list - - profile_map = {} - with open(PROFILES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - profile_map[line[0]] = [i for i in range(len(line)) if line[i] == "1.0"] - - session_click_map = {} - with open(TRAIN_CLICK_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "" or line[1] == "" or line[2] == "": - continue - session_click_map[line[0]] = line[2] - #return train_data_dict, profile_map, session_click_map, plan_map - generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map) - - -def generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map): - if not os.path.isdir("./out/"): - os.mkdir("./out/") - with open(os.path.join("./out/", "test_session.txt"), 'w') as f_train: - for session_id, plan_list in plan_map.items(): - if session_id not in train_data_dict: - continue - cur_map = train_data_dict[session_id] - cur_map["session_id"] = session_id - if cur_map["pid"] != "": - cur_map["profile"] = profile_map[cur_map["pid"]] - else: - cur_map["profile"] = [0] - # del cur_map["pid"] - whole_rank = 0 - for plan in plan_list: - whole_rank += 1 - cur_map["mode_rank" + str(whole_rank)] = plan["transport_mode"] - - if whole_rank < 5: - for r in range(whole_rank + 1, 6): - cur_map["mode_rank" + str(r)] = -1 - - cur_map["whole_rank"] = whole_rank - rank = 1 - - price_list = [] - eta_list = [] - distance_list = [] - for plan in plan_list: - if not plan["price"]: - price_list.append(0) - else: - price_list.append(int(plan["price"])) - eta_list.append(int(plan["eta"])) - distance_list.append(int(plan["distance"])) - price_list.sort(reverse=False) - eta_list.sort(reverse=False) - distance_list.sort(reverse=False) - - for plan in plan_list: - if plan["price"] and int(plan["price"]) == price_list[0]: - cur_map["mode_min_price"] = plan["transport_mode"] - if plan["price"] and int(plan["price"]) == price_list[-1]: - cur_map["mode_max_price"] = plan["transport_mode"] - if int(plan["eta"]) == eta_list[0]: - cur_map["mode_min_eta"] = plan["transport_mode"] - if int(plan["eta"]) == eta_list[-1]: - cur_map["mode_max_eta"] = plan["transport_mode"] - if int(plan["distance"]) == distance_list[0]: - cur_map["mode_min_distance"] = plan["transport_mode"] - if int(plan["distance"]) == distance_list[-1]: - cur_map["mode_max_distance"] = plan["transport_mode"] - if "mode_min_price" not in cur_map: - cur_map["mode_min_price"] = -1 - if "mode_max_price" not in cur_map: - cur_map["mode_max_price"] = -1 - - for plan in plan_list: - cur_price = int(plan["price"]) if plan["price"] else 0 - cur_eta = int(plan["eta"]) - cur_distance = int(plan["distance"]) - cur_map["price_rank"] = price_list.index(cur_price) + 1 - cur_map["eta_rank"] = eta_list.index(cur_eta) + 1 - cur_map["distance_rank"] = distance_list.index(cur_distance) + 1 - - if ("transport_mode" in plan) and (session_id in session_click_map) and ( - int(plan["transport_mode"]) == int(session_click_map[session_id])): - cur_map["plan"] = plan - cur_map["label"] = 1 - else: - cur_map["plan"] = plan - cur_map["label"] = 0 - - cur_map["plan_rank"] = rank - rank += 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - - cur_map["plan"]["distance"] = -1 - cur_map["plan"]["price"] = -1 - cur_map["plan"]["eta"] = -1 - cur_map["plan"]["transport_mode"] = 0 - cur_map["plan_rank"] = 0 - cur_map["price_rank"] = 0 - cur_map["eta_rank"] = 0 - cur_map["plan_rank"] = 0 - cur_map["label"] = 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - - - build_norm_feature() - - -if __name__ == "__main__": - preprocess() \ No newline at end of file diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess.py deleted file mode 100644 index 8d61ae55..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os, sys, time, random, csv, datetime, json -import pandas as pd -import numpy as np -import argparse -import logging -import time - -logging.basicConfig( - format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("preprocess") -logger.setLevel(logging.INFO) - -TRAIN_QUERIES_PATH = "./data_set_phase1/train_queries.csv" -TRAIN_PLANS_PATH = "./data_set_phase1/train_plans.csv" -TRAIN_CLICK_PATH = "./data_set_phase1/train_clicks.csv" -PROFILES_PATH = "./data_set_phase1/profiles.csv" -OUT_NORM_TRAIN_PATH = "./out/normed_train.txt" -OUT_RAW_TRAIN_PATH = "./out/train.txt" - -OUT_DIR = "./out" - - -O1_MIN = 115.47 -O1_MAX = 117.29 - -O2_MIN = 39.46 -O2_MAX = 40.97 - -D1_MIN = 115.44 -D1_MAX = 117.37 - -D2_MIN = 39.46 -D2_MAX = 40.96 -SCALE_OD = 0.02 - -DISTANCE_MIN = 1.0 -DISTANCE_MAX = 225864.0 -THRESHOLD_DIS = 40000.0 -SCALE_DIS = 500 - -PRICE_MIN = 200.0 -PRICE_MAX = 92300.0 -THRESHOLD_PRICE = 20000 -SCALE_PRICE = 100 - -ETA_MIN = 1.0 -ETA_MAX = 72992.0 -THRESHOLD_ETA = 10800.0 -SCALE_ETA = 120 - - -def build_norm_feature(): - with open(OUT_NORM_TRAIN_PATH, 'w') as nf: - with open(OUT_RAW_TRAIN_PATH, 'r') as f: - for line in f: - cur_map = json.loads(line) - - if cur_map["plan"]["distance"] > THRESHOLD_DIS: - cur_map["plan"]["distance"] = int(THRESHOLD_DIS) - elif cur_map["plan"]["distance"] > 0: - cur_map["plan"]["distance"] = int(cur_map["plan"]["distance"] / SCALE_DIS) - - if cur_map["plan"]["price"] and cur_map["plan"]["price"] > THRESHOLD_PRICE: - cur_map["plan"]["price"] = int(THRESHOLD_PRICE) - elif not cur_map["plan"]["price"] or cur_map["plan"]["price"] < 0: - cur_map["plan"]["price"] = 0 - else: - cur_map["plan"]["price"] = int(cur_map["plan"]["price"] / SCALE_PRICE) - - if cur_map["plan"]["eta"] > THRESHOLD_ETA: - cur_map["plan"]["eta"] = int(THRESHOLD_ETA) - elif cur_map["plan"]["eta"] > 0: - cur_map["plan"]["eta"] = int(cur_map["plan"]["eta"] / SCALE_ETA) - - # o1 - if cur_map["query"]["o1"] > O1_MAX: - cur_map["query"]["o1"] = int((O1_MAX - O1_MIN) / SCALE_OD + 1) - elif cur_map["query"]["o1"] < O1_MIN: - cur_map["query"]["o1"] = 0 - else: - cur_map["query"]["o1"] = int((cur_map["query"]["o1"] - O1_MIN) / 0.02) - - # o2 - if cur_map["query"]["o2"] > O2_MAX: - cur_map["query"]["o2"] = int((O2_MAX - O2_MIN) / SCALE_OD + 1) - elif cur_map["query"]["o2"] < O2_MIN: - cur_map["query"]["o2"] = 0 - else: - cur_map["query"]["o2"] = int((cur_map["query"]["o2"] - O2_MIN) / 0.02) - - # d1 - if cur_map["query"]["d1"] > D1_MAX: - cur_map["query"]["d1"] = int((D1_MAX - D1_MIN) / SCALE_OD + 1) - elif cur_map["query"]["d1"] < D1_MIN: - cur_map["query"]["d1"] = 0 - else: - cur_map["query"]["d1"] = int((cur_map["query"]["d1"] - D1_MIN) / SCALE_OD) - - # d2 - if cur_map["query"]["d2"] > D2_MAX: - cur_map["query"]["d2"] = int((D2_MAX - D2_MIN) / SCALE_OD + 1) - elif cur_map["query"]["d2"] < D2_MIN: - cur_map["query"]["d2"] = 0 - else: - cur_map["query"]["d2"] = int((cur_map["query"]["d2"] - D2_MIN) / SCALE_OD) - - cur_json_instance = json.dumps(cur_map) - nf.write(cur_json_instance + '\n') - - -def preprocess(): - """ - Construct the train data indexed by session id and mode id jointly. Convert all the raw features (user profile, - od pair, req time, click time, eta, price, distance, transport mode) to one-hot ids used for - embedding. We split the one-hot features into two categories: user feature and context feature for - better understanding of FM algorithm. - Note that the user profile is already provided by one-hot encoded form, we treat it as embedded vector - for unity with the context feature and easily using of PaddlePaddle embedding layer. Given the - train clicks data, we label each train instance with 1 or 0 depend on if this instance is clicked or - not include non-click case. - :return: - """ - - train_data_dict = {} - with open(TRAIN_QUERIES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - train_index_list = [] - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "": continue - if line[1] == "": - train_index_list.append(line[0] + "_0") - else: - train_index_list.append(line[0] + "_" + line[1]) - - train_index = line[0] - train_data_dict[train_index] = {} - train_data_dict[train_index]["pid"] = line[1] - train_data_dict[train_index]["query"] = {} - - reqweekday = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%w") - reqhour = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%H") - - train_data_dict[train_index]["query"].update({"weekday":reqweekday}) - train_data_dict[train_index]["query"].update({"hour":reqhour}) - - o = line[3].split(',') - o_first = o[0] - o_second = o[1] - train_data_dict[train_index]["query"].update({"o1":float(o_first)}) - train_data_dict[train_index]["query"].update({"o2":float(o_second)}) - - d = line[4].split(',') - d_first = d[0] - d_second = d[1] - train_data_dict[train_index]["query"].update({"d1":float(d_first)}) - train_data_dict[train_index]["query"].update({"d2":float(d_second)}) - - plan_map = {} - plan_data = pd.read_csv(TRAIN_PLANS_PATH) - for index, row in plan_data.iterrows(): - plans_str = row['plans'] - plans_list = json.loads(plans_str) - session_id = str(row['sid']) - # train_data_dict[session_id]["plans"] = [] - plan_map[session_id] = plans_list - - profile_map = {} - with open(PROFILES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - profile_map[line[0]] = [i for i in range(len(line)) if line[i] == "1.0"] - - session_click_map = {} - with open(TRAIN_CLICK_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "" or line[1] == "" or line[2] == "": - continue - session_click_map[line[0]] = line[2] - #return train_data_dict, profile_map, session_click_map, plan_map - generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map) - - -def generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map): - if not os.path.isdir(OUT_DIR): - os.mkdir(OUT_DIR) - with open(os.path.join("./out/", "train.txt"), 'w') as f_train: - for session_id, plan_list in plan_map.items(): - if session_id not in train_data_dict: - continue - cur_map = train_data_dict[session_id] - if cur_map["pid"] != "": - cur_map["profile"] = profile_map[cur_map["pid"]] - else: - cur_map["profile"] = [0] - del cur_map["pid"] - whole_rank = 0 - for plan in plan_list: - whole_rank += 1 - cur_map["whole_rank"] = whole_rank - flag_click = False - rank = 1 - - - for plan in plan_list: - - if ("transport_mode" in plan) and (session_id in session_click_map) and ( - int(plan["transport_mode"]) == int(session_click_map[session_id])): - cur_map["plan"] = plan - cur_map["label"] = 1 - flag_click = True - # print("label is 1") - else: - cur_map["plan"] = plan - cur_map["label"] = 0 - - cur_map["rank"] = rank - rank += 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - if not flag_click: - cur_map["plan"]["distance"] = -1 - cur_map["plan"]["price"] = -1 - cur_map["plan"]["eta"] = -1 - cur_map["plan"]["transport_mode"] = 0 - cur_map["rank"] = 0 - cur_map["label"] = 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - else: - cur_map["plan"]["distance"] = -1 - cur_map["plan"]["price"] = -1 - cur_map["plan"]["eta"] = -1 - cur_map["plan"]["transport_mode"] = 0 - cur_map["rank"] = 0 - cur_map["label"] = 0 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - - - build_norm_feature() - - -if __name__ == "__main__": - preprocess() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess_dense.py b/PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess_dense.py deleted file mode 100644 index 10d674c9..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/preprocess_dense.py +++ /dev/null @@ -1,294 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os, random, csv, datetime, json -import pandas as pd -import numpy as np -import argparse -import logging -import time - -logging.basicConfig( - format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("preprocess") -logger.setLevel(logging.INFO) - -TRAIN_QUERIES_PATH = "./data_set_phase1/train_queries.csv" -TRAIN_PLANS_PATH = "./data_set_phase1/train_plans.csv" -TRAIN_CLICK_PATH = "./data_set_phase1/train_clicks.csv" -PROFILES_PATH = "./data_set_phase1/profiles.csv" - -OUT_DIR = "./out" -ORI_TRAIN_PATH = "train.txt" -NORM_TRAIN_PATH = "normed_train.txt" -#variable to control the ratio of positive and negative instance of transmode 0 which is original label of no click -THRESHOLD_LABEL = 0.5 - - - -O1_MIN = 115.47 -O1_MAX = 117.29 - -O2_MIN = 39.46 -O2_MAX = 40.97 - -D1_MIN = 115.44 -D1_MAX = 117.37 - -D2_MIN = 39.46 -D2_MAX = 40.96 - -DISTANCE_MIN = 1.0 -DISTANCE_MAX = 225864.0 -THRESHOLD_DIS = 200000.0 - -PRICE_MIN = 200.0 -PRICE_MAX = 92300.0 -THRESHOLD_PRICE = 20000 - -ETA_MIN = 1.0 -ETA_MAX = 72992.0 -THRESHOLD_ETA = 10800.0 - - -def build_norm_feature(): - with open(os.path.join(OUT_DIR, NORM_TRAIN_PATH), 'w') as nf: - with open(os.path.join(OUT_DIR, ORI_TRAIN_PATH), 'r') as f: - for line in f: - cur_map = json.loads(line) - - cur_map["plan"]["distance"] = (cur_map["plan"]["distance"] - DISTANCE_MIN) / (DISTANCE_MAX - DISTANCE_MIN) - - if cur_map["plan"]["price"]: - cur_map["plan"]["price"] = (cur_map["plan"]["price"] - PRICE_MIN) / (PRICE_MAX - PRICE_MIN) - else: - cur_map["plan"]["price"] = 0.0 - - cur_map["plan"]["eta"] = (cur_map["plan"]["eta"] - ETA_MIN) / (ETA_MAX - ETA_MIN) - - cur_json_instance = json.dumps(cur_map) - nf.write(cur_json_instance + '\n') - - -def preprocess(): - """ - Construct the train data indexed by session id and mode id jointly. Convert all the raw features (user profile, - od pair, req time, click time, eta, price, distance, transport mode) to one-hot ids used for - embedding. We split the one-hot features into two categories: user feature and context feature for - better understanding of FM algorithm. - Note that the user profile is already provided by one-hot encoded form, we treat it as embedded vector - for unity with the context feature and easily using of PaddlePaddle embedding layer. Given the - train clicks data, we label each train instance with 1 or 0 depend on if this instance is clicked or - not include non-click case. To Be Changed - :return: - """ - - train_data_dict = {} - - with open("./weather.json", 'r') as f: - weather_dict = json.load(f) - - with open(TRAIN_QUERIES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - train_index_list = [] - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "": continue - if line[1] == "": - train_index_list.append(line[0] + "_0") - else: - train_index_list.append(line[0] + "_" + line[1]) - - train_index = line[0] - train_data_dict[train_index] = {} - train_data_dict[train_index]["pid"] = line[1] - train_data_dict[train_index]["query"] = {} - train_data_dict[train_index]["weather"] = {} - - reqweekday = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%w") - reqhour = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%H") - - # weather related features, no big use, maybe more detailed weather information is better - date_key = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S').strftime("%m-%d") - train_data_dict[train_index]["weather"] = {} - train_data_dict[train_index]["weather"].update({"max_temp": weather_dict[date_key]["max_temp"]}) - train_data_dict[train_index]["weather"].update({"min_temp": weather_dict[date_key]["min_temp"]}) - train_data_dict[train_index]["weather"].update({"wea": weather_dict[date_key]["weather"]}) - train_data_dict[train_index]["weather"].update({"wind": weather_dict[date_key]["wind"]}) - - train_data_dict[train_index]["query"].update({"weekday":reqweekday}) - train_data_dict[train_index]["query"].update({"hour":reqhour}) - - o = line[3].split(',') - o_first = o[0] - o_second = o[1] - train_data_dict[train_index]["query"].update({"o1":float(o_first)}) - train_data_dict[train_index]["query"].update({"o2":float(o_second)}) - - d = line[4].split(',') - d_first = d[0] - d_second = d[1] - train_data_dict[train_index]["query"].update({"d1":float(d_first)}) - train_data_dict[train_index]["query"].update({"d2":float(d_second)}) - - plan_map = {} - plan_data = pd.read_csv(TRAIN_PLANS_PATH) - for index, row in plan_data.iterrows(): - plans_str = row['plans'] - plans_list = json.loads(plans_str) - session_id = str(row['sid']) - # train_data_dict[session_id]["plans"] = [] - plan_map[session_id] = plans_list - - profile_map = {} - with open(PROFILES_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - profile_map[line[0]] = [i for i in range(len(line)) if line[i] == "1.0"] - - session_click_map = {} - with open(TRAIN_CLICK_PATH, 'r') as f: - csv_reader = csv.reader(f, delimiter=',') - for k, line in enumerate(csv_reader): - if k == 0: continue - if line[0] == "" or line[1] == "" or line[2] == "": - continue - session_click_map[line[0]] = line[2] - #return train_data_dict, profile_map, session_click_map, plan_map - generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map) - - -def generate_sparse_features(train_data_dict, profile_map, session_click_map, plan_map): - if not os.path.isdir(OUT_DIR): - os.mkdir(OUT_DIR) - with open(os.path.join(OUT_DIR, ORI_TRAIN_PATH), 'w') as f_train: - for session_id, plan_list in plan_map.items(): - if session_id not in train_data_dict: - continue - cur_map = train_data_dict[session_id] - if cur_map["pid"] != "": - cur_map["profile"] = profile_map[cur_map["pid"]] - else: - cur_map["profile"] = [0] - - #rank information related feature - whole_rank = 0 - for plan in plan_list: - whole_rank += 1 - cur_map["mode_rank" + str(whole_rank)] = plan["transport_mode"] - - if whole_rank < 5: - for r in range(whole_rank + 1, 6): - cur_map["mode_rank" + str(r)] = -1 - - cur_map["whole_rank"] = whole_rank - flag_click = False - rank = 1 - - price_list = [] - eta_list = [] - distance_list = [] - for plan in plan_list: - if not plan["price"]: - price_list.append(0) - else: - price_list.append(int(plan["price"])) - eta_list.append(int(plan["eta"])) - distance_list.append(int(plan["distance"])) - price_list.sort(reverse=False) - eta_list.sort(reverse=False) - distance_list.sort(reverse=False) - - for plan in plan_list: - if plan["price"] and int(plan["price"]) == price_list[0]: - cur_map["mode_min_price"] = plan["transport_mode"] - if plan["price"] and int(plan["price"]) == price_list[-1]: - cur_map["mode_max_price"] = plan["transport_mode"] - if int(plan["eta"]) == eta_list[0]: - cur_map["mode_min_eta"] = plan["transport_mode"] - if int(plan["eta"]) == eta_list[-1]: - cur_map["mode_max_eta"] = plan["transport_mode"] - if int(plan["distance"]) == distance_list[0]: - cur_map["mode_min_distance"] = plan["transport_mode"] - if int(plan["distance"]) == distance_list[-1]: - cur_map["mode_max_distance"] = plan["transport_mode"] - if "mode_min_price" not in cur_map: - cur_map["mode_min_price"] = -1 - if "mode_max_price" not in cur_map: - cur_map["mode_max_price"] = -1 - - for plan in plan_list: - if ("transport_mode" in plan) and (session_id in session_click_map) and ( - int(plan["transport_mode"]) == int(session_click_map[session_id])): - flag_click = True - if flag_click: - - for plan in plan_list: - cur_price = int(plan["price"]) if plan["price"] else 0 - cur_eta = int(plan["eta"]) - cur_distance = int(plan["distance"]) - cur_map["price_rank"] = price_list.index(cur_price) + 1 - cur_map["eta_rank"] = eta_list.index(cur_eta) + 1 - cur_map["distance_rank"] = distance_list.index(cur_distance) + 1 - - if ("transport_mode" in plan) and (session_id in session_click_map) and ( - int(plan["transport_mode"]) == int(session_click_map[session_id])): - cur_map["plan"] = plan - cur_map["label"] = 1 - else: - cur_map["plan"] = plan - cur_map["label"] = 0 - - cur_map["plan_rank"] = rank - rank += 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - - cur_map["plan"] = {} - #since we define a new ctr task from original task, we use a basic way to generate instances of transport mode 0. - #There should be a optimal strategy to generate instances of transport mode 0 - if not flag_click: - cur_map["plan"]["distance"] = -1 - cur_map["plan"]["price"] = -1 - cur_map["plan"]["eta"] = -1 - cur_map["plan"]["transport_mode"] = 0 - cur_map["plan_rank"] = 0 - cur_map["price_rank"] = 0 - cur_map["eta_rank"] = 0 - cur_map["distance_rank"] = 0 - cur_map["label"] = 1 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - else: - if random.random() < THRESHOLD_LABEL: - cur_map["plan"]["distance"] = -1 - cur_map["plan"]["price"] = -1 - cur_map["plan"]["eta"] = -1 - cur_map["plan"]["transport_mode"] = 0 - cur_map["plan_rank"] = 0 - cur_map["price_rank"] = 0 - cur_map["eta_rank"] = 0 - cur_map["distance_rank"] = 0 - cur_map["label"] = 0 - cur_json_instance = json.dumps(cur_map) - f_train.write(cur_json_instance + '\n') - - - - build_norm_feature() - - -if __name__ == "__main__": - preprocess() diff --git a/PaddleRec/ctr/Paddle_baseline_KDD2019/weather.json b/PaddleRec/ctr/Paddle_baseline_KDD2019/weather.json deleted file mode 100644 index 0d0f17f9..00000000 --- a/PaddleRec/ctr/Paddle_baseline_KDD2019/weather.json +++ /dev/null @@ -1 +0,0 @@ -{"10-01": {"max_temp": "24", "min_temp": "12", "weather": "q", "wind": "45"}, "10-02": {"max_temp": "24", "min_temp": "11", "weather": "q", "wind": "12"}, "10-03": {"max_temp": "25", "min_temp": "10", "weather": "q", "wind": "12"}, "10-04": {"max_temp": "25", "min_temp": "12", "weather": "q", "wind": "12"}, "10-05": {"max_temp": "24", "min_temp": "14", "weather": "dy", "wind": "12"}, "10-06": {"max_temp": "20", "min_temp": "8", "weather": "q", "wind": "45"}, "10-07": {"max_temp": "21", "min_temp": "7", "weather": "q", "wind": "12"}, "10-08": {"max_temp": "21", "min_temp": "8", "weather": "dy", "wind": "12"}, "10-09": {"max_temp": "15", "min_temp": "4", "weather": "dyq", "wind": "45"}, "10-10": {"max_temp": "17", "min_temp": "4", "weather": "dyq", "wind": "12"}, "10-11": {"max_temp": "18", "min_temp": "5", "weather": "qdy", "wind": "12"}, "10-12": {"max_temp": "20", "min_temp": "5", "weather": "dyq", "wind": "12"}, "10-13": {"max_temp": "20", "min_temp": "8", "weather": "dy", "wind": "12"}, "10-14": {"max_temp": "21", "min_temp": "10", "weather": "dy", "wind": "12"}, "10-15": {"max_temp": "17", "min_temp": "11", "weather": "xq", "wind": "12"}, "10-16": {"max_temp": "17", "min_temp": "7", "weather": "dyq", "wind": "12"}, "10-17": {"max_temp": "17", "min_temp": "5", "weather": "q", "wind": "12"}, "10-18": {"max_temp": "18", "min_temp": "5", "weather": "q", "wind": "12"}, "10-19": {"max_temp": "19", "min_temp": "7", "weather": "dy", "wind": "12"}, "10-20": {"max_temp": "18", "min_temp": "7", "weather": "dy", "wind": "12"}, "10-21": {"max_temp": "18", "min_temp": "7", "weather": "dy", "wind": "12"}, "10-22": {"max_temp": "19", "min_temp": "5", "weather": "dyq", "wind": "12"}, "10-23": {"max_temp": "19", "min_temp": "4", "weather": "q", "wind": "34"}, "10-24": {"max_temp": "20", "min_temp": "6", "weather": "qdy", "wind": "12"}, "10-25": {"max_temp": "15", "min_temp": "8", "weather": "dy", "wind": "12"}, "10-26": {"max_temp": "14", "min_temp": "3", "weather": "q", "wind": "45"}, "10-27": {"max_temp": "17", "min_temp": "5", "weather": "dy", "wind": "12"}, "10-28": {"max_temp": "17", "min_temp": "4", "weather": "dyq", "wind": "45"}, "10-29": {"max_temp": "15", "min_temp": "3", "weather": "q", "wind": "34"}, "10-30": {"max_temp": "16", "min_temp": "1", "weather": "q", "wind": "12"}, "10-31": {"max_temp": "17", "min_temp": "3", "weather": "q", "wind": "12"}, "11-01": {"max_temp": "17", "min_temp": "3", "weather": "q", "wind": "12"}, "11-02": {"max_temp": "18", "min_temp": "4", "weather": "q", "wind": "12"}, "11-03": {"max_temp": "16", "min_temp": "6", "weather": "dy", "wind": "12"}, "11-04": {"max_temp": "10", "min_temp": "2", "weather": "xydy", "wind": "34"}, "11-05": {"max_temp": "10", "min_temp": "2", "weather": "dy", "wind": "12"}, "11-06": {"max_temp": "12", "min_temp": "0", "weather": "dy", "wind": "12"}, "11-07": {"max_temp": "13", "min_temp": "3", "weather": "dy", "wind": "12"}, "11-08": {"max_temp": "14", "min_temp": "2", "weather": "dy", "wind": "12"}, "11-09": {"max_temp": "15", "min_temp": "1", "weather": "qdy", "wind": "34"}, "11-10": {"max_temp": "11", "min_temp": "0", "weather": "dy", "wind": "12"}, "11-11": {"max_temp": "13", "min_temp": "1", "weather": "dyq", "wind": "12"}, "11-12": {"max_temp": "14", "min_temp": "2", "weather": "q", "wind": "12"}, "11-13": {"max_temp": "13", "min_temp": "5", "weather": "dy", "wind": "12"}, "11-14": {"max_temp": "13", "min_temp": "5", "weather": "dy", "wind": "12"}, "11-15": {"max_temp": "8", "min_temp": "1", "weather": "xydy", "wind": "34"}, "11-16": {"max_temp": "8", "min_temp": "-1", "weather": "q", "wind": "12"}, "11-17": {"max_temp": "9", "min_temp": "-2", "weather": "dyq", "wind": "12"}, "11-18": {"max_temp": "11", "min_temp": "-3", "weather": "q", "wind": "34"}, "11-19": {"max_temp": "10", "min_temp": "-2", "weather": "qdy", "wind": "12"}, "11-20": {"max_temp": "9", "min_temp": "-1", "weather": "dy", "wind": "12"}, "11-21": {"max_temp": "9", "min_temp": "-3", "weather": "q", "wind": "2"}, "11-22": {"max_temp": "8", "min_temp": "-3", "weather": "qdy", "wind": "1"}, "11-23": {"max_temp": "7", "min_temp": "0", "weather": "dy", "wind": "2"}, "11-24": {"max_temp": "9", "min_temp": "-3", "weather": "qdy", "wind": "2"}, "11-25": {"max_temp": "10", "min_temp": "-3", "weather": "q", "wind": "1"}, "11-26": {"max_temp": "10", "min_temp": "0", "weather": "dy", "wind": "1"}, "11-27": {"max_temp": "9", "min_temp": "-3", "weather": "qdy", "wind": "2"}, "11-28": {"max_temp": "8", "min_temp": "-3", "weather": "q", "wind": "1"}, "11-29": {"max_temp": "7", "min_temp": "-4", "weather": "q", "wind": "1"}, "11-30": {"max_temp": "8", "min_temp": "-3", "weather": "q", "wind": "1"}, "12-01": {"max_temp": "7", "min_temp": "0", "weather": "dy", "wind": "1"}, "12-02": {"max_temp": "9", "min_temp": "2", "weather": "dy", "wind": "1"}, "12-03": {"max_temp": "8", "min_temp": "-3", "weather": "dyq", "wind": "3"}, "12-04": {"max_temp": "4", "min_temp": "-6", "weather": "qdy", "wind": "2"}, "12-05": {"max_temp": "1", "min_temp": "-4", "weather": "dy", "wind": "1"}, "12-06": {"max_temp": "-2", "min_temp": "-9", "weather": "q", "wind": "3"}, "12-07": {"max_temp": "-4", "min_temp": "-10", "weather": "q", "wind": "3"}, "12-08": {"max_temp": "-2", "min_temp": "-10", "weather": "qdy", "wind": "2"}, "12-09": {"max_temp": "-1", "min_temp": "-10", "weather": "dyq", "wind": "1"}} \ No newline at end of file diff --git a/PaddleRec/ctr/dcn/cluster_train.py b/PaddleRec/ctr/dcn/cluster_train.py deleted file mode 100644 index e791727b..00000000 --- a/PaddleRec/ctr/dcn/cluster_train.py +++ /dev/null @@ -1,205 +0,0 @@ -import argparse -import os -import sys -import time -from collections import OrderedDict - -import paddle.fluid as fluid - -from network import DCN -import utils - - -def boolean_string(s): - if s.lower() not in {'false', 'true'}: - raise ValueError('Not a valid boolean string') - return s.lower() == 'true' - - -def parse_args(): - parser = argparse.ArgumentParser("dcn cluster train.") - parser.add_argument( - '--train_data_dir', - type=str, - default='dist_data/dist_train_data', - help='The path of train data') - parser.add_argument( - '--test_valid_data_dir', - type=str, - default='dist_data/dist_test_valid_data', - help='The path of test and valid data') - parser.add_argument( - '--vocab_dir', - type=str, - default='dist_data/vocab', - help='The path of generated vocabs') - parser.add_argument( - '--cat_feat_num', - type=str, - default='dist_data/cat_feature_num.txt', - help='The path of generated cat_feature_num.txt') - parser.add_argument( - '--batch_size', type=int, default=512, help="Batch size") - parser.add_argument('--num_epoch', type=int, default=10, help="train epoch") - parser.add_argument( - '--model_output_dir', - type=str, - default='models', - help='The path for model to store') - parser.add_argument( - '--num_thread', type=int, default=1, help='The number of threads') - parser.add_argument('--test_epoch', type=str, default='1') - parser.add_argument( - '--dnn_hidden_units', - nargs='+', - type=int, - default=[1024, 1024], - help='DNN layers and hidden units') - parser.add_argument( - '--cross_num', - type=int, - default=6, - help='The number of Cross network layers') - parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate') - parser.add_argument( - '--l2_reg_cross', - type=float, - default=1e-5, - help='Cross net l2 regularizer coefficient') - parser.add_argument( - '--use_bn', - type=boolean_string, - default=True, - help='Whether use batch norm in dnn part') - parser.add_argument( - '--is_sparse', - action='store_true', - required=False, - default=False, - help='embedding will use sparse or not, (default: False)') - parser.add_argument( - '--clip_by_norm', type=float, default=100.0, help="gradient clip norm") - parser.add_argument('--print_steps', type=int, default=5) - parser.add_argument('--use_gpu', type=int, default=1) - - # dist params - parser.add_argument('--is_local', type=int, default=1, help='whether local') - parser.add_argument( - '--num_devices', type=int, default=1, help='Number of GPU devices') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The current_endpoint') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - args = parser.parse_args() - return args - - -def train(): - """ do training """ - args = parse_args() - print(args) - - if args.trainer_id == 0 and not os.path.isdir(args.model_output_dir): - os.mkdir(args.model_output_dir) - - cat_feat_dims_dict = OrderedDict() - for line in open(args.cat_feat_num): - spls = line.strip().split() - assert len(spls) == 2 - cat_feat_dims_dict[spls[0]] = int(spls[1]) - - dcn_model = DCN(args.cross_num, args.dnn_hidden_units, args.l2_reg_cross, - args.use_bn, args.clip_by_norm, cat_feat_dims_dict, - args.is_sparse) - dcn_model.build_network() - optimizer = fluid.optimizer.Adam(learning_rate=args.lr) - optimizer.minimize(dcn_model.loss) - - def train_loop(main_program): - """ train network """ - start_time = time.time() - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var(dcn_model.data_list) - pipe_command = 'python reader.py {}'.format(args.vocab_dir) - dataset.set_pipe_command(pipe_command) - dataset.set_batch_size(args.batch_size) - dataset.set_thread(args.num_thread) - train_filelist = [ - os.path.join(args.train_data_dir, fname) - for fname in next(os.walk(args.train_data_dir))[2] - ] - dataset.set_filelist(train_filelist) - - if args.use_gpu == 1: - exe = fluid.Executor(fluid.CUDAPlace(0)) - dataset.set_thread(1) - else: - exe = fluid.Executor(fluid.CPUPlace()) - dataset.set_thread(args.num_thread) - exe.run(fluid.default_startup_program()) - - for epoch_id in range(args.num_epoch): - start = time.time() - sys.stderr.write('\nepoch%d start ...\n' % (epoch_id + 1)) - exe.train_from_dataset( - program=main_program, - dataset=dataset, - fetch_list=[ - dcn_model.loss, dcn_model.avg_logloss, dcn_model.auc_var - ], - fetch_info=['total_loss', 'avg_logloss', 'auc'], - debug=False, - print_period=args.print_steps) - model_dir = os.path.join(args.model_output_dir, - 'epoch_' + str(epoch_id + 1), "checkpoint") - sys.stderr.write('epoch%d is finished and takes %f s\n' % ( - (epoch_id + 1), time.time() - start)) - if args.trainer_id == 0: # only trainer 0 save model - print("save model in {}".format(model_dir)) - fluid.save(main_program, model_dir) - - print("train time cost {:.4f}".format(time.time() - start_time)) - print("finish training") - - if args.is_local: - print("run local training") - train_loop(fluid.default_main_program()) - else: - print("run distribute training") - t = fluid.DistributeTranspiler() - t.transpile( - args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - print("run psever") - pserver_prog, pserver_startup = t.get_pserver_programs( - args.current_endpoint) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif args.role == "trainer": - print("run trainer") - train_loop(t.get_trainer_program()) - - -if __name__ == "__main__": - utils.check_version() - train() diff --git a/PaddleRec/ctr/dcn/cluster_train.sh b/PaddleRec/ctr/dcn/cluster_train.sh deleted file mode 100755 index 4088a8af..00000000 --- a/PaddleRec/ctr/dcn/cluster_train.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -# start pserver0 -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --is_local 0 \ - --is_sparse \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6000 \ - --trainers 2 \ - > pserver0.log 2>&1 & - -# start pserver1 -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --is_local 0 \ - --is_sparse \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6001 \ - --trainers 2 \ - > pserver1.log 2>&1 & - -# start trainer0 -#CUDA_VISIBLE_DEVICES=1 python cluster_train.py \ -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --use_gpu 0 \ - --is_local 0 \ - --is_sparse \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 0 \ - > trainer0.log 2>&1 & - -# start trainer1 -#CUDA_VISIBLE_DEVICES=2 python cluster_train.py \ -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --use_gpu 0 \ - --is_local 0 \ - --is_sparse \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 1 \ - > trainer1.log 2>&1 & - -echo "2 pservers and 2 trainers started." \ No newline at end of file diff --git a/PaddleRec/ctr/dcn/reader.py b/PaddleRec/ctr/dcn/reader.py index 291fc988..d121f9fd 100644 --- a/PaddleRec/ctr/dcn/reader.py +++ b/PaddleRec/ctr/dcn/reader.py @@ -72,8 +72,8 @@ class CriteoDataset(dg.MultiSlotDataGenerator): yield label_feat_list import paddle - batch_iter = paddle.batch( - paddle.reader.buffered( + batch_iter = fluid.io.batch( + fluid.io.buffered( local_iter, size=buf_size), batch_size=batch) return batch_iter diff --git a/PaddleRec/ctr/deepfm/cluster_train.py b/PaddleRec/ctr/deepfm/cluster_train.py deleted file mode 100644 index da565172..00000000 --- a/PaddleRec/ctr/deepfm/cluster_train.py +++ /dev/null @@ -1,193 +0,0 @@ -import argparse -import os -import sys -import time -from network_conf import ctr_deepfm_model - -import paddle.fluid as fluid -import utils - - -def parse_args(): - parser = argparse.ArgumentParser("deepfm cluster train.") - - parser.add_argument( - '--train_data_dir', - type=str, - default='dist_data/dist_train_data', - help='The path of train data (default: data/train_data)') - parser.add_argument( - '--test_data_dir', - type=str, - default='dist_data/dist_test_data', - help='The path of test data (default: models)') - parser.add_argument( - '--feat_dict', - type=str, - default='dist_data/aid_data/feat_dict_10.pkl2', - help='The path of feat_dict') - parser.add_argument( - '--batch_size', - type=int, - default=100, - help="The size of mini-batch (default:100)") - parser.add_argument( - '--embedding_size', - type=int, - default=10, - help="The size for embedding layer (default:10)") - parser.add_argument( - '--num_epoch', - type=int, - default=10, - help="The number of epochs to train (default: 50)") - parser.add_argument( - '--model_output_dir', - type=str, - required=True, - help='The path for model to store (default: models)') - parser.add_argument( - '--num_thread', - type=int, - default=1, - help='The number of threads (default: 1)') - parser.add_argument('--test_epoch', type=str, default='1') - parser.add_argument( - '--layer_sizes', - nargs='+', - type=int, - default=[400, 400, 400], - help='The size of each layers (default: [10, 10, 10])') - parser.add_argument( - '--act', - type=str, - default='relu', - help='The activation of each layers (default: relu)') - parser.add_argument( - '--is_sparse', - action='store_true', - required=False, - default=False, - help='embedding will use sparse or not, (default: False)') - parser.add_argument( - '--lr', type=float, default=1e-4, help='Learning rate (default: 1e-4)') - parser.add_argument( - '--reg', type=float, default=1e-4, help=' (default: 1e-4)') - parser.add_argument('--num_field', type=int, default=39) - parser.add_argument('--num_feat', type=int, default=141443) - parser.add_argument('--use_gpu', type=int, default=1) - - # dist params - parser.add_argument('--is_local', type=int, default=1, help='whether local') - parser.add_argument( - '--num_devices', type=int, default=1, help='Number of GPU devices') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The current_endpoint') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - args = parser.parse_args() - return args - - -def train(): - """ do training """ - args = parse_args() - print(args) - - if args.trainer_id == 0 and not os.path.isdir(args.model_output_dir): - os.mkdir(args.model_output_dir) - - loss, auc, data_list, auc_states = ctr_deepfm_model( - args.embedding_size, args.num_field, args.num_feat, args.layer_sizes, - args.act, args.reg, args.is_sparse) - optimizer = fluid.optimizer.SGD( - learning_rate=args.lr, - regularization=fluid.regularizer.L2DecayRegularizer(args.reg)) - optimizer.minimize(loss) - - def train_loop(main_program): - """ train network """ - start_time = time.time() - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var(data_list) - pipe_command = 'python criteo_reader.py {}'.format(args.feat_dict) - dataset.set_pipe_command(pipe_command) - dataset.set_batch_size(args.batch_size) - dataset.set_thread(args.num_thread) - train_filelist = [ - os.path.join(args.train_data_dir, x) - for x in os.listdir(args.train_data_dir) - ] - - if args.use_gpu == 1: - exe = fluid.Executor(fluid.CUDAPlace(0)) - dataset.set_thread(1) - else: - exe = fluid.Executor(fluid.CPUPlace()) - dataset.set_thread(args.num_thread) - exe.run(fluid.default_startup_program()) - - for epoch_id in range(args.num_epoch): - start = time.time() - sys.stderr.write('\nepoch%d start ...\n' % (epoch_id + 1)) - dataset.set_filelist(train_filelist) - exe.train_from_dataset( - program=main_program, - dataset=dataset, - fetch_list=[loss, auc], - fetch_info=['epoch %d batch loss' % (epoch_id + 1), "auc"], - print_period=5, - debug=False) - model_dir = os.path.join(args.model_output_dir, - 'epoch_' + str(epoch_id + 1)) - sys.stderr.write('epoch%d is finished and takes %f s\n' % ( - (epoch_id + 1), time.time() - start)) - if args.trainer_id == 0: # only trainer 0 save model - print("save model in {}".format(model_dir)) - fluid.save(main_program, model_dir) - - print("train time cost {:.4f}".format(time.time() - start_time)) - print("finish training") - - if args.is_local: - print("run local training") - train_loop(fluid.default_main_program()) - else: - print("run distribute training") - t = fluid.DistributeTranspiler() - t.transpile( - args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - print("run psever") - pserver_prog, pserver_startup = t.get_pserver_programs( - args.current_endpoint) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif args.role == "trainer": - print("run trainer") - train_loop(t.get_trainer_program()) - - -if __name__ == "__main__": - utils.check_version() - train() diff --git a/PaddleRec/ctr/deepfm/cluster_train.sh b/PaddleRec/ctr/deepfm/cluster_train.sh deleted file mode 100755 index 4088a8af..00000000 --- a/PaddleRec/ctr/deepfm/cluster_train.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -# start pserver0 -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --is_local 0 \ - --is_sparse \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6000 \ - --trainers 2 \ - > pserver0.log 2>&1 & - -# start pserver1 -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --is_local 0 \ - --is_sparse \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6001 \ - --trainers 2 \ - > pserver1.log 2>&1 & - -# start trainer0 -#CUDA_VISIBLE_DEVICES=1 python cluster_train.py \ -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --use_gpu 0 \ - --is_local 0 \ - --is_sparse \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 0 \ - > trainer0.log 2>&1 & - -# start trainer1 -#CUDA_VISIBLE_DEVICES=2 python cluster_train.py \ -python -u cluster_train.py \ - --train_data_dir dist_data/dist_train_data \ - --model_output_dir cluster_model \ - --use_gpu 0 \ - --is_local 0 \ - --is_sparse \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 1 \ - > trainer1.log 2>&1 & - -echo "2 pservers and 2 trainers started." \ No newline at end of file diff --git a/PaddleRec/ctr/deepfm/infer.py b/PaddleRec/ctr/deepfm/infer.py index 9ff58af7..22b9a95e 100644 --- a/PaddleRec/ctr/deepfm/infer.py +++ b/PaddleRec/ctr/deepfm/infer.py @@ -30,7 +30,7 @@ def infer(): ] criteo_dataset = CriteoDataset() criteo_dataset.setup(args.feat_dict) - test_reader = paddle.batch( + test_reader = fluid.io.batch( criteo_dataset.test(test_files), batch_size=args.batch_size) startup_program = fluid.framework.Program() diff --git a/PaddleRec/ctr/deepfm_dygraph/data_reader.py b/PaddleRec/ctr/deepfm_dygraph/data_reader.py index 7c9d9abc..0cd800cb 100644 --- a/PaddleRec/ctr/deepfm_dygraph/data_reader.py +++ b/PaddleRec/ctr/deepfm_dygraph/data_reader.py @@ -6,6 +6,7 @@ import pickle import random import paddle +import paddle.fluid as fluid class DataGenerator(object): @@ -58,7 +59,7 @@ class DataGenerator(object): if not cycle: break - return paddle.batch(_reader, batch_size=batch_size) + return fluid.io.batch(_reader, batch_size=batch_size) def data_reader(batch_size, diff --git a/PaddleRec/ctr/din/README.md b/PaddleRec/ctr/din/README.md index 8644a75c..ea8585c0 100644 --- a/PaddleRec/ctr/din/README.md +++ b/PaddleRec/ctr/din/README.md @@ -8,8 +8,6 @@ ├── train.py # 训练脚本 ├── infer.py # 预测脚本 ├── network.py # 网络结构 -├── cluster_train.py # 多机训练 -├── cluster_train.sh # 多机训练脚本 ├── reader.py # 和读取数据相关的函数 ├── data/ ├── build_dataset.py # 文本数据转化为paddle数据 @@ -129,12 +127,3 @@ CUDA_VISIBLE_DEVICES=3 python infer.py --model_path 'din_amazon/global_step_4000 ```text 2019-02-22 11:22:58,804 - INFO - TEST --> loss: [0.47005194] auc:0.863794952818 ``` - - -## 多机训练 -可参考cluster_train.py 配置多机环境 - -运行命令本地模拟多机场景 -``` -sh cluster_train.sh -``` diff --git a/PaddleRec/ctr/din/cluster_train.py b/PaddleRec/ctr/din/cluster_train.py deleted file mode 100644 index 8cd7e15c..00000000 --- a/PaddleRec/ctr/din/cluster_train.py +++ /dev/null @@ -1,172 +0,0 @@ -import sys -import logging -import time -import numpy as np -import argparse -import paddle.fluid as fluid -import paddle -import time -import network -import reader -import random - -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("fluid") -logger.setLevel(logging.INFO) - - -def parse_args(): - parser = argparse.ArgumentParser("din") - parser.add_argument( - '--config_path', - type=str, - default='data/config.txt', - help='dir of config') - parser.add_argument( - '--train_dir', - type=str, - default='data/paddle_train.txt', - help='dir of train file') - parser.add_argument( - '--model_dir', - type=str, - default='din_amazon/', - help='dir of saved model') - parser.add_argument( - '--batch_size', type=int, default=16, help='number of batch size') - parser.add_argument( - '--epoch_num', type=int, default=200, help='number of epoch') - parser.add_argument( - '--use_cuda', type=int, default=0, help='whether to use gpu') - parser.add_argument( - '--parallel', - type=int, - default=0, - help='whether to use parallel executor') - parser.add_argument( - '--base_lr', type=float, default=0.85, help='based learning rate') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The current_endpoint') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - args = parser.parse_args() - return args - - -def train(): - args = parse_args() - - config_path = args.config_path - train_path = args.train_dir - epoch_num = args.epoch_num - use_cuda = True if args.use_cuda else False - use_parallel = True if args.parallel else False - - logger.info("reading data begins") - user_count, item_count, cat_count = reader.config_read(config_path) - #data_reader, max_len = reader.prepare_reader(train_path, args.batch_size) - logger.info("reading data completes") - - avg_cost, pred = network.network(item_count, cat_count, 433) - base_lr = args.base_lr - boundaries = [410000] - values = [base_lr, 0.2] - sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.piecewise_decay( - boundaries=boundaries, values=values)) - sgd_optimizer.minimize(avg_cost) - - def train_loop(main_program): - data_reader, max_len = reader.prepare_reader(train_path, - args.batch_size) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - feed_list = [ - "hist_item_seq", "hist_cat_seq", "target_item", "target_cat", - "label", "mask", "target_item_seq", "target_cat_seq" - ] - loader = fluid.io.DataLoader.from_generator( - feed_list=feed_list, capacity=10000, iterable=True) - loader.set_sample_list_generator(data_reader, places=place) - if use_parallel: - train_exe = fluid.ParallelExecutor( - use_cuda=use_cuda, - loss_name=avg_cost.name, - main_program=main_program) - else: - train_exe = exe - logger.info("train begins") - global_step = 0 - PRINT_STEP = 1000 - - start_time = time.time() - loss_sum = 0.0 - for id in range(epoch_num): - epoch = id + 1 - for data in loader(): - global_step += 1 - results = train_exe.run(main_program, - feed=data, - fetch_list=[avg_cost.name, pred.name], - return_numpy=True) - loss_sum += results[0].mean() - - if global_step % PRINT_STEP == 0: - logger.info( - "epoch: %d\tglobal_step: %d\ttrain_loss: %.4f\t\ttime: %.2f" - % (epoch, global_step, loss_sum / PRINT_STEP, - time.time() - start_time)) - start_time = time.time() - loss_sum = 0.0 - - if (global_step > 400000 and - global_step % PRINT_STEP == 0) or ( - global_step < 400000 and - global_step % 50000 == 0): - save_dir = args.model_dir + "/global_step_" + str( - global_step) - feed_var_name = [ - "hist_item_seq", "hist_cat_seq", "target_item", - "target_cat", "label", "mask", "target_item_seq", - "target_cat_seq" - ] - fetch_vars = [avg_cost, pred] - fluid.io.save_inference_model(save_dir, feed_var_name, - fetch_vars, exe) - train_exe.close() - - t = fluid.DistributeTranspiler() - t.transpile( - args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - logger.info("run psever") - prog, startup = t.get_pserver_programs(args.current_endpoint) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(startup) - exe.run(prog) - elif args.role == "trainer": - logger.info("run trainer") - train_loop(t.get_trainer_program()) - - -if __name__ == "__main__": - train() diff --git a/PaddleRec/ctr/din/cluster_train.sh b/PaddleRec/ctr/din/cluster_train.sh deleted file mode 100644 index 76115c82..00000000 --- a/PaddleRec/ctr/din/cluster_train.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -python -u cluster_train.py \ ---config_path 'data/config.txt' \ ---train_dir 'data/paddle_train.txt' \ ---batch_size 32 \ ---epoch_num 100 \ ---use_cuda 0 \ ---parallel 0 \ ---role pserver \ ---endpoints 127.0.0.1:6000,127.0.0.1:6001 \ ---current_endpoint 127.0.0.1:6000 \ ---trainers 2 \ -> pserver0.log 2>&1 & - -python -u cluster_train.py \ ---config_path 'data/config.txt' \ ---train_dir 'data/paddle_train.txt' \ ---batch_size 32 \ ---epoch_num 100 \ ---use_cuda 0 \ ---parallel 0 \ ---role pserver \ ---endpoints 127.0.0.1:6000,127.0.0.1:6001 \ ---current_endpoint 127.0.0.1:6001 \ ---trainers 2 \ -> pserver1.log 2>&1 & - -python -u cluster_train.py \ ---config_path 'data/config.txt' \ ---train_dir 'data/paddle_train.txt' \ ---batch_size 32 \ ---epoch_num 100 \ ---use_cuda 0 \ ---parallel 0 \ ---role trainer \ ---endpoints 127.0.0.1:6000,127.0.0.1:6001 \ ---trainers 2 \ ---trainer_id 0 \ -> trainer0.log 2>&1 & - -python -u cluster_train.py \ ---config_path 'data/config.txt' \ ---train_dir 'data/paddle_train.txt' \ ---batch_size 32 \ ---epoch_num 100 \ ---use_cuda 0 \ ---parallel 0 \ ---role trainer \ ---endpoints 127.0.0.1:6000,127.0.0.1:6001 \ ---trainers 2 \ ---trainer_id 1 \ -> trainer1.log 2>&1 & diff --git a/PaddleRec/ctr/dnn/README.md b/PaddleRec/ctr/dnn/README.md index fdfdb1c4..989de5a0 100644 --- a/PaddleRec/ctr/dnn/README.md +++ b/PaddleRec/ctr/dnn/README.md @@ -154,7 +154,7 @@ def embedding_layer(input): return fluid.layers.embedding( input=input, is_sparse=True, - size=[args.sparse_feature_dim, + size=[args.sparse_feature_dim, args.embedding_size], param_attr=fluid.ParamAttr( name="SparseFeatFactors", @@ -168,7 +168,7 @@ sparse_embed_seq = list(map(embedding_layer, inputs[1:-1])) # [C1~C26] 将离散数据通过embedding查表得到的值,与连续数据的输入进行`concat`操作,合为一个整体输入,作为全链接层的原始输入。我们共设计了3层FC,每层FC的输出维度都为400,每层FC都后接一个`relu`激活函数,每层FC的初始化方式为符合正态分布的随机初始化,标准差与上一层的输出维度的平方根成反比。 ```python concated = fluid.layers.concat(sparse_embed_seq + inputs[0:1], axis=1) - + fc1 = fluid.layers.fc( input=concated, size=400, @@ -271,9 +271,9 @@ continuous_range_ = range(1, 14) categorical_range_ = range(14, 40) class CriteoDataset(dg.MultiSlotDataGenerator): - + def generate_sample(self, line): - + def reader(): features = line.rstrip('\n').split('\t') dense_feature = [] @@ -363,12 +363,12 @@ fleet.init(role) #必不可少的步骤,初始化节点! > PaddleCloudRoleMaker()是怎样判断当前节点所扮演的角色的? -> +> > Paddle参数服务器模式中,使用各个节点机器的环境变量来确定当前节点的角色。为了能准确无误的分配角色,在每个节点上,我们都需要指定如下环境变量: > #### 共有的环境变量 > - export PADDLE_TRAINERS_NUM=2 # 训练节点数 > - export PADDLE_PSERVERS_IP_PORT_LIST="127.0.0.1:36011,127.0.0.1:36012" # 各个pserver的ip:port 组合构成的字符串 -> +> > #### Pserver特有的环境变量 > - export TRAINING_ROLE=PSERVER # 当前节点的角色是PSERVER > - export PADDLE_PORT=36011 # 当前PSERVER的通信端口 @@ -376,7 +376,7 @@ fleet.init(role) #必不可少的步骤,初始化节点! > #### Trainer特有的环境变量 > - export TRAINING_ROLE=TRAINER # 当前节点的角色是TRAINER > - export PADDLE_TRAINER_ID=0 # 当前Trainer节点的编号,范围为[0,PADDLE_TRAINERS_NUM) -> +> > 完成上述环境变量指定后,`PaddleCloudRoleMaker()`便可以正常的运行,决定当前节点的角色。 @@ -388,7 +388,7 @@ Paddle的`参数服务器`模式分布式训练有很多种类型,根据通信 ctr_model = CTR() inputs = ctr_model.input_data(args) avg_cost, auc_var, batch_auc_var = ctr_model.net(inputs,args) - + # 选择反向更新优化策略 optimizer = fluid.optimizer.Adam(args.learning_rate) optimizer.minimize(avg_cost) @@ -431,7 +431,7 @@ if fleet.is_server(): fleet.run_server() ``` - 启动Worker - + 启动训练节点,训练节点首先调用`init_worker()`来完成节点初始化,然后执行`fleet.startup_program`,从服务器端同步参数的初始化值。接着,和本地训练完全一致,通过执行`fleet.main_program`来完成整个训练过程,并保存模型。最后调用`fleet.stop_worker()`关闭训练节点。 ```python elif fleet.is_worker(): @@ -441,7 +441,7 @@ elif fleet.is_worker(): # 初始化含有分布式流程的fleet.startup_program exe.run(fleet.startup_program)) - + # 引入数据读取dataset dataset = get_dataset(inputs,params) @@ -458,10 +458,10 @@ elif fleet.is_worker(): # 默认使用0号节点保存模型 if params.test and fleet.is_first_worker(): model_path = (str(params.model_path) + "/"+"epoch_" + str(epoch)) - fluid.io.save_persistables(executor=exe, dirname=model_path) - + fleet.save_persistables(executor=exe, dirname=model_path) + # 训练结束,调用stop_worker()通知pserver - fleet.stop_worker() + fleet.stop_worker() logger.info("Distribute Train Success!") return train_result ``` @@ -504,7 +504,7 @@ sh local_cluster.sh 便可以开启分布式模拟训练,默认启用2x2的训练模式。Trainer与Pserver的运行日志,存放于`./log/`文件夹,保存的模型位于`./models/`,使用默认配置运行后,理想输出为: - pserver.0.log ```bash -get_pserver_program() is deprecated, call get_pserver_programs() to get pserver main and startup in a single call. + I1126 07:37:49.952580 15056 grpc_server.cc:477] Server listening on 127.0.0.1:36011 successful, selected port: 36011 ``` @@ -558,9 +558,9 @@ I1126 07:38:28.947571 14715 communicator.cc:363] Communicator stop done 2. 在很多应用场景中,分布式训练出的模型与实际上线的模型不一致,仅使用分布式训练出的参数值,参与其他网络的预测,在这样的场景中,就更无必要保存模型结构了。 > 什么是长期变量? -> +> > 在Paddle Fluid中,模型变量可以分为以下几种类型: -> +> > 1. 模型参数:是深度学习模型中被训练和学习的量。由`fluid.framwork.Parameter()`产生,是`fluid.framework.Variable()`的派生类。 > 2. 长期变量 :是在整个训练过程中持续存在,不会因为一个迭代结束而销毁的变量,所有的模型参数都是长期变量,但并非所有的长期变量都是模型参数。长期变量通过将`fluid.framework.Varibale()`中的`psersistable`属性设置为`True`来声明。长期变量是模型的核心参数。 > 3. 临时变量:不属于上述两种类别的所有变量都是临时变量,只在一个训练迭代中存在,在每一个迭代结束后,所有的临时变量都会被销毁,然后在下一个迭代开始时,创建新的临时变量。例如输入的训练数据,中间层layer的输出等等。 @@ -632,7 +632,7 @@ with fluid.framework.program_guard(test_program, startup_program): ``` 这是容易理解的,因为在测试时,我们要从零开始,保证预测program的干净,没有其他的影响因素。 - 在创建预测网络时,我们加入了`with fluid.unique_name.guard():`,它的作用是让所有新建的参数的自动编号再次从零开始。Paddle的参数`Variable`以变量名作为区分手段,保证变量名相同,就可以从保存的模型中找到对应参数。 - + paddle创建的临时变量,编号会自动顺延,如果没有指定变量名,可以观察到这一现象,比如:`fc_1.w_0`->`fc_2.w_0`,想要共享相同的参数,必需要保证编号可以对应。 ### 测试数据的读取 @@ -774,14 +774,14 @@ python -u train.py --is_cloud=1 运行该命令时,若pserver还未就绪,可在日志输出中看到如下信息: > server not ready, wait 3 sec to retry... -> +> > not ready endpoints:['10.89.176.11:36000', '10.89.176.12:36000'] worker进程将持续等待,直到server开始监听,或等待超时。 当pserver都准备就绪后,可以在日志输出看到如下信息: > I0317 11:38:48.099179 16719 communicator.cc:271] Communicator start -> +> > I0317 11:38:49.838711 16719 rpc_client.h:107] init rpc client with trainer_id 0 至此,分布式训练启动完毕,将开始训练,祝您好运。 diff --git a/PaddleRec/ctr/dnn/infer.py b/PaddleRec/ctr/dnn/infer.py index 11bbdbad..19a97e30 100644 --- a/PaddleRec/ctr/dnn/infer.py +++ b/PaddleRec/ctr/dnn/infer.py @@ -30,8 +30,7 @@ logger.setLevel(logging.INFO) def parse_args(): - parser = argparse.ArgumentParser( - description="PaddlePaddle CTR-DNN example") + parser = argparse.ArgumentParser(description="PaddlePaddle CTR-DNN example") # -------------Data & Model Path------------- parser.add_argument( '--test_files_path', @@ -54,8 +53,7 @@ def parse_args(): '--infer_epoch', type=int, default=0, - help='Specify which epoch to run infer' - ) + help='Specify which epoch to run infer') # -------------Network parameter------------- parser.add_argument( '--embedding_size', @@ -68,10 +66,7 @@ def parse_args(): default=1000001, help='sparse feature hashing space for index processing') parser.add_argument( - '--dense_feature_dim', - type=int, - default=13, - help='dense feature shape') + '--dense_feature_dim', type=int, default=13, help='dense feature shape') # -------------device parameter------------- parser.add_argument( @@ -102,10 +97,11 @@ def run_infer(args, model_path): place = fluid.CPUPlace() train_generator = generator.CriteoDataset(args.sparse_feature_dim) file_list = [ - os.path.join(args.test_files_path, x) for x in os.listdir(args.test_files_path) + os.path.join(args.test_files_path, x) + for x in os.listdir(args.test_files_path) ] - test_reader = paddle.batch(train_generator.test(file_list), - batch_size=args.batch_size) + test_reader = fluid.io.batch( + train_generator.test(file_list), batch_size=args.batch_size) startup_program = fluid.framework.Program() test_program = fluid.framework.Program() ctr_model = CTR() @@ -171,13 +167,15 @@ if __name__ == "__main__": model_list = [] for _, dir, _ in os.walk(args.model_path): for model in dir: - if "epoch" in model and args.infer_epoch == int(model.split('_')[-1]): + if "epoch" in model and args.infer_epoch == int( + model.split('_')[-1]): path = os.path.join(args.model_path, model) model_list.append(path) if len(model_list) == 0: - logger.info("There is no satisfactory model {} at path {}, please check your start command & env. ".format( - str("epoch_")+str(args.infer_epoch), args.model_path)) + logger.info( + "There is no satisfactory model {} at path {}, please check your start command & env. ". + format(str("epoch_") + str(args.infer_epoch), args.model_path)) for model in model_list: logger.info("Test model {}".format(model)) diff --git a/PaddleRec/ctr/xdeepfm/cluster_train.py b/PaddleRec/ctr/xdeepfm/cluster_train.py deleted file mode 100644 index 77e1e152..00000000 --- a/PaddleRec/ctr/xdeepfm/cluster_train.py +++ /dev/null @@ -1,198 +0,0 @@ -import argparse -import os -import sys -import time -import network_conf - -import paddle.fluid as fluid -import utils - - -def parse_args(): - parser = argparse.ArgumentParser("xdeepfm cluster train.") - - parser.add_argument( - '--train_data_dir', - type=str, - default='data/train_data', - help='The path of train data (default: data/train_data)') - parser.add_argument( - '--test_data_dir', - type=str, - default='data/test_data', - help='The path of test data (default: models)') - parser.add_argument( - '--batch_size', - type=int, - default=100, - help="The size of mini-batch (default:100)") - parser.add_argument( - '--embedding_size', - type=int, - default=10, - help="The size for embedding layer (default:10)") - parser.add_argument( - '--num_epoch', - type=int, - default=10, - help="The number of epochs to train (default: 10)") - parser.add_argument( - '--model_output_dir', - type=str, - required=True, - help='The path for model to store (default: models)') - parser.add_argument( - '--num_thread', - type=int, - default=1, - help='The number of threads (default: 1)') - parser.add_argument('--test_epoch', type=str, default='1') - parser.add_argument( - '--layer_sizes_dnn', - nargs='+', - type=int, - default=[10, 10, 10], - help='The size of each layers') - parser.add_argument( - '--layer_sizes_cin', - nargs='+', - type=int, - default=[10, 10], - help='The size of each layers') - parser.add_argument( - '--act', - type=str, - default='relu', - help='The activation of each layers (default: relu)') - parser.add_argument( - '--lr', type=float, default=1e-1, help='Learning rate (default: 1e-4)') - parser.add_argument( - '--reg', type=float, default=1e-4, help=' (default: 1e-4)') - parser.add_argument('--num_field', type=int, default=39) - parser.add_argument('--num_feat', type=int, default=28651) - parser.add_argument( - '--model_name', - type=str, - default='ctr_xdeepfm_model', - help='The name of model (default: ctr_xdeepfm_model)') - parser.add_argument('--use_gpu', type=int, default=1) - parser.add_argument('--print_steps', type=int, default=50) - parser.add_argument('--is_local', type=int, default=1, help='whether local') - parser.add_argument( - '--is_sparse', - action='store_true', - required=False, - default=False, - help='embedding will use sparse or not, (default: False)') - - # dist params - parser.add_argument( - '--num_devices', type=int, default=1, help='Number of GPU devices') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The current_endpoint') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - args = parser.parse_args() - return args - - -def train(): - """ do training """ - args = parse_args() - print(args) - - if not os.path.isdir(args.model_output_dir): - os.mkdir(args.model_output_dir) - - loss, auc, data_list, auc_states = eval('network_conf.' + args.model_name)( - args.embedding_size, args.num_field, args.num_feat, - args.layer_sizes_dnn, args.act, args.reg, args.layer_sizes_cin, - args.is_sparse) - optimizer = fluid.optimizer.SGD( - learning_rate=args.lr, - regularization=fluid.regularizer.L2DecayRegularizer(args.reg)) - optimizer.minimize(loss) - - def train_loop(main_program): - """ train network """ - start_time = time.time() - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var(data_list) - dataset.set_pipe_command('python criteo_reader.py') - dataset.set_batch_size(args.batch_size) - dataset.set_filelist([ - os.path.join(args.train_data_dir, x) - for x in os.listdir(args.train_data_dir) - ]) - - if args.use_gpu == 1: - exe = fluid.Executor(fluid.CUDAPlace(0)) - dataset.set_thread(1) - else: - exe = fluid.Executor(fluid.CPUPlace()) - dataset.set_thread(args.num_thread) - exe.run(fluid.default_startup_program()) - - for epoch_id in range(args.num_epoch): - start = time.time() - sys.stderr.write('\nepoch%d start ...\n' % (epoch_id + 1)) - exe.train_from_dataset( - program=main_program, - dataset=dataset, - fetch_list=[loss, auc], - fetch_info=['loss', 'auc'], - debug=False, - print_period=args.print_steps) - model_dir = os.path.join(args.model_output_dir, - 'epoch_' + str(epoch_id + 1), "checkpoint") - sys.stderr.write('epoch%d is finished and takes %f s\n' % ( - (epoch_id + 1), time.time() - start)) - if args.trainer_id == 0: # only trainer 0 save model - print("save model in {}".format(model_dir)) - fluid.save(main_program, model_dir) - - print("train time cost {:.4f}".format(time.time() - start_time)) - print("finish training") - - if args.is_local: - print("run local training") - train_loop(fluid.default_main_program()) - else: - print("run distribute training") - t = fluid.DistributeTranspiler() - t.transpile( - args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - print("run psever") - pserver_prog, pserver_startup = t.get_pserver_programs( - args.current_endpoint) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif args.role == "trainer": - print("run trainer") - train_loop(t.get_trainer_program()) - - -if __name__ == "__main__": - utils.check_version() - train() diff --git a/PaddleRec/ctr/xdeepfm/cluster_train.sh b/PaddleRec/ctr/xdeepfm/cluster_train.sh deleted file mode 100755 index c818a01e..00000000 --- a/PaddleRec/ctr/xdeepfm/cluster_train.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -# start pserver0 -python -u cluster_train.py \ - --train_data_dir data/train_data \ - --model_output_dir cluster_model \ - --is_local 0 \ - --is_sparse \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6000 \ - --trainers 2 \ - > pserver0.log 2>&1 & - -# start pserver1 -python -u cluster_train.py \ - --train_data_dir data/train_data \ - --model_output_dir cluster_model \ - --is_local 0 \ - --is_sparse \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6001 \ - --trainers 2 \ - > pserver1.log 2>&1 & - -# start trainer0 -#CUDA_VISIBLE_DEVICES=1 python cluster_train.py \ -python -u cluster_train.py \ - --train_data_dir data/train_data \ - --model_output_dir cluster_model \ - --use_gpu 0 \ - --is_local 0 \ - --is_sparse \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 0 \ - > trainer0.log 2>&1 & - -# start trainer1 -#CUDA_VISIBLE_DEVICES=2 python cluster_train.py \ -python -u cluster_train.py \ - --train_data_dir data/train_data \ - --model_output_dir cluster_model \ - --use_gpu 0 \ - --is_local 0 \ - --is_sparse \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 1 \ - > trainer1.log 2>&1 & - -echo "2 pservers and 2 trainers started." \ No newline at end of file diff --git a/PaddleRec/ctr/xdeepfm/infer.py b/PaddleRec/ctr/xdeepfm/infer.py index 2b0ac131..cd809fb8 100644 --- a/PaddleRec/ctr/xdeepfm/infer.py +++ b/PaddleRec/ctr/xdeepfm/infer.py @@ -30,7 +30,7 @@ def infer(): for x in os.listdir(args.test_data_dir) ] criteo_dataset = CriteoDataset() - test_reader = paddle.batch( + test_reader = fluid.io.batch( criteo_dataset.test(test_files), batch_size=args.batch_size) startup_program = fluid.framework.Program() diff --git a/PaddleRec/gru4rec/README.md b/PaddleRec/gru4rec/README.md index b070cd76..40ffb90c 100644 --- a/PaddleRec/gru4rec/README.md +++ b/PaddleRec/gru4rec/README.md @@ -11,8 +11,6 @@ ├── infer_sample_neg.py # 预测脚本 sample负例 ├── net.py # 网络结构 ├── text2paddle.py # 文本数据转paddle数据 -├── cluster_train.py # 多机训练 -├── cluster_train.sh # 多机训练脚本 ├── utils # 通用函数 ├── convert_format.py # 转换数据格式 ├── vocab.txt # 小样本字典 @@ -168,7 +166,7 @@ CUDA_VISIBLE_DEVICES=0 python train_sample_neg.py --loss ce --use_cuda 1 可在[net.py](./net.py) `network` 函数中调整网络结构,当前的网络结构如下: ```python -emb = fluid.layers.embedding( +emb = fluid.embedding( input=src, size=[vocab_size, hid_size], param_attr=fluid.ParamAttr( @@ -278,12 +276,3 @@ model:model_r@20/epoch_10 recall@20:0.681 time_cost(s):12.2 ## 多机训练 厂内用户可以参考[wiki](http://wiki.baidu.com/pages/viewpage.action?pageId=628300529)利用paddlecloud 配置多机环境 - -可参考cluster_train.py 配置其他多机环境 - -运行命令本地模拟多机场景, 暂不支持windows -``` -sh cluster_train.sh -``` - -注意本地模拟需要关闭代理 diff --git a/PaddleRec/gru4rec/cluster_train.py b/PaddleRec/gru4rec/cluster_train.py deleted file mode 100644 index f50542bf..00000000 --- a/PaddleRec/gru4rec/cluster_train.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import sys -import time -import six -import numpy as np -import math -import argparse -import paddle.fluid as fluid -import paddle -import time -import utils -import net - -SEED = 102 - - -def parse_args(): - parser = argparse.ArgumentParser("gru4rec benchmark.") - parser.add_argument( - '--train_dir', - type=str, - default='train_data', - help='train file address') - parser.add_argument( - '--vocab_path', - type=str, - default='vocab.txt', - help='vocab file address') - parser.add_argument('--is_local', type=int, default=1, help='whether local') - parser.add_argument('--hid_size', type=int, default=100, help='hid size') - parser.add_argument( - '--model_dir', type=str, default='model_recall20', help='model dir') - parser.add_argument( - '--batch_size', type=int, default=5, help='num of batch size') - parser.add_argument('--pass_num', type=int, default=10, help='num of epoch') - parser.add_argument( - '--print_batch', type=int, default=10, help='num of print batch') - parser.add_argument( - '--use_cuda', type=int, default=0, help='whether use gpu') - parser.add_argument( - '--base_lr', type=float, default=0.01, help='learning rate') - parser.add_argument( - '--num_devices', type=int, default=1, help='Number of GPU devices') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The current_endpoint') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - args = parser.parse_args() - return args - - -def get_cards(args): - return args.num_devices - - -def train(): - """ do training """ - args = parse_args() - hid_size = args.hid_size - train_dir = args.train_dir - vocab_path = args.vocab_path - use_cuda = True if args.use_cuda else False - print("use_cuda:", use_cuda) - batch_size = args.batch_size - vocab_size, train_reader = utils.prepare_data( - train_dir, vocab_path, batch_size=batch_size * get_cards(args),\ - buffer_size=1000, word_freq_threshold=0, is_train=True) - - # Train program - src_wordseq, dst_wordseq, avg_cost, acc = net.all_vocab_network( - vocab_size=vocab_size, hid_size=hid_size) - - # Optimization to minimize lost - sgd_optimizer = fluid.optimizer.SGD(learning_rate=args.base_lr) - sgd_optimizer.minimize(avg_cost) - - def train_loop(main_program): - """ train network """ - pass_num = args.pass_num - model_dir = args.model_dir - fetch_list = [avg_cost.name] - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - total_time = 0.0 - for pass_idx in six.moves.xrange(pass_num): - epoch_idx = pass_idx + 1 - print("epoch_%d start" % epoch_idx) - - t0 = time.time() - i = 0 - newest_ppl = 0 - for data in train_reader(): - i += 1 - lod_src_wordseq = utils.to_lodtensor([dat[0] for dat in data], - place) - lod_dst_wordseq = utils.to_lodtensor([dat[1] for dat in data], - place) - ret_avg_cost = exe.run(main_program, - feed={ - "src_wordseq": lod_src_wordseq, - "dst_wordseq": lod_dst_wordseq - }, - fetch_list=fetch_list) - avg_ppl = np.exp(ret_avg_cost[0]) - newest_ppl = np.mean(avg_ppl) - if i % args.print_batch == 0: - print("step:%d ppl:%.3f" % (i, newest_ppl)) - - t1 = time.time() - total_time += t1 - t0 - print("epoch:%d num_steps:%d time_cost(s):%f" % - (epoch_idx, i, total_time / epoch_idx)) - save_dir = "%s/epoch_%d" % (model_dir, epoch_idx) - feed_var_names = ["src_wordseq", "dst_wordseq"] - fetch_vars = [avg_cost, acc] - if args.trainer_id == 0: - fluid.io.save_inference_model(save_dir, feed_var_names, - fetch_vars, exe) - print("model saved in %s" % save_dir) - print("finish training") - - if args.is_local: - print("run local training") - train_loop(fluid.default_main_program()) - else: - print("run distribute training") - t = fluid.DistributeTranspiler() - t.transpile( - args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - print("run psever") - pserver_prog = t.get_pserver_program(args.current_endpoint) - pserver_startup = t.get_startup_program(args.current_endpoint, - pserver_prog) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif args.role == "trainer": - print("run trainer") - train_loop(t.get_trainer_program()) - - -if __name__ == "__main__": - train() diff --git a/PaddleRec/gru4rec/cluster_train.sh b/PaddleRec/gru4rec/cluster_train.sh deleted file mode 100644 index 2711ffad..00000000 --- a/PaddleRec/gru4rec/cluster_train.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -# start pserver0 -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --is_local 0 \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6000 \ - --trainers 2 \ - > pserver0.log 2>&1 & - -# start pserver1 -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --is_local 0 \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6001 \ - --trainers 2 \ - > pserver1.log 2>&1 & - -# start trainer0 -#CUDA_VISIBLE_DEVICES=1 python cluster_train.py \ -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --print_batch 10 \ - --use_cuda 0 \ - --is_local 0 \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 0 \ - > trainer0.log 2>&1 & - -# start trainer1 -#CUDA_VISIBLE_DEVICES=2 python cluster_train.py \ -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --print_batch 10 \ - --use_cuda 0 \ - --is_local 0 \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 1 \ - > trainer1.log 2>&1 & diff --git a/PaddleRec/gru4rec/utils.py b/PaddleRec/gru4rec/utils.py index 424ebf78..d853770b 100644 --- a/PaddleRec/gru4rec/utils.py +++ b/PaddleRec/gru4rec/utils.py @@ -103,7 +103,7 @@ def prepare_data(file_dir, if is_train and 'ce_mode' not in os.environ: vocab_size = get_vocab_size(vocab_path) reader = sort_batch( - paddle.reader.shuffle( + fluid.io.shuffle( train( file_dir, buffer_size, data_type=DataType.SEQ), buf_size=buffer_size), diff --git a/PaddleRec/multiview_simnet/infer.py b/PaddleRec/multiview_simnet/infer.py index e9136588..89ceb8cb 100644 --- a/PaddleRec/multiview_simnet/infer.py +++ b/PaddleRec/multiview_simnet/infer.py @@ -102,8 +102,8 @@ def parse_args(): def start_infer(args, model_path): dataset = reader.SyntheticDataset(args.sparse_feature_dim, args.query_slots, args.title_slots) - test_reader = paddle.batch( - paddle.reader.shuffle( + test_reader = fluid.io.batch( + fluid.io.shuffle( dataset.valid(), buf_size=args.batch_size * 100), batch_size=args.batch_size) place = fluid.CPUPlace() diff --git a/PaddleRec/multiview_simnet/train.py b/PaddleRec/multiview_simnet/train.py index fd8de506..95c9b24f 100644 --- a/PaddleRec/multiview_simnet/train.py +++ b/PaddleRec/multiview_simnet/train.py @@ -112,8 +112,8 @@ def start_train(args): dataset = reader.SyntheticDataset(args.sparse_feature_dim, args.query_slots, args.title_slots) - train_reader = paddle.batch( - paddle.reader.shuffle( + train_reader = fluid.io.batch( + fluid.io.shuffle( dataset.train(), buf_size=args.batch_size * 100), batch_size=args.batch_size) place = fluid.CPUPlace() diff --git a/PaddleRec/ncf/evaluate.py b/PaddleRec/ncf/evaluate.py index a8becd1b..1a655e45 100644 --- a/PaddleRec/ncf/evaluate.py +++ b/PaddleRec/ncf/evaluate.py @@ -1,5 +1,5 @@ import math -import heapq # for retrieval topK +import heapq # for retrieval topK import multiprocessing import numpy as np from time import time @@ -23,30 +23,36 @@ _K = None _args = None _model_path = None + def run_infer(args, model_path, test_data_path): test_data_generator = utils.CriteoDataset() - + with fluid.scope_guard(fluid.Scope()): - test_reader = paddle.batch(test_data_generator.test(test_data_path, False), batch_size=args.test_batch_size) - + test_reader = fluid.io.batch( + test_data_generator.test(test_data_path, False), + batch_size=args.test_batch_size) + place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(model_path, exe) + infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model( + model_path, exe) for data in test_reader(): user_input = np.array([dat[0] for dat in data]) item_input = np.array([dat[1] for dat in data]) - pred_val = exe.run(infer_program, - feed={"user_input": user_input, - "item_input": item_input}, - fetch_list=fetch_vars, - return_numpy=True) - + pred_val = exe.run( + infer_program, + feed={"user_input": user_input, + "item_input": item_input}, + fetch_list=fetch_vars, + return_numpy=True) + return pred_val[0].reshape(1, -1).tolist()[0] + def evaluate_model(args, testRatings, testNegatives, K, model_path): """ Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation @@ -56,22 +62,23 @@ def evaluate_model(args, testRatings, testNegatives, K, model_path): global _testRatings global _testNegatives global _K - global _model_path + global _model_path global _args - + _args = args - _model_path= model_path + _model_path = model_path _testRatings = testRatings _testNegatives = testNegatives _K = K - - hits, ndcgs = [],[] + + hits, ndcgs = [], [] for idx in range(len(_testRatings)): - (hr,ndcg) = eval_one_rating(idx) + (hr, ndcg) = eval_one_rating(idx) hits.append(hr) - ndcgs.append(ndcg) + ndcgs.append(ndcg) return (hits, ndcgs) + def eval_one_rating(idx): rating = _testRatings[idx] items = _testNegatives[idx] @@ -80,9 +87,9 @@ def eval_one_rating(idx): items.append(gtItem) # Get prediction scores map_item_score = {} - users = np.full(len(items), u, dtype = 'int32') - users = users.reshape(-1,1) - items_array = np.array(items).reshape(-1,1) + users = np.full(len(items), u, dtype='int32') + users = users.reshape(-1, 1) + items_array = np.array(items).reshape(-1, 1) temp = np.hstack((users, items_array)) np.savetxt("Data/test.txt", temp, fmt='%d', delimiter=',') predictions = run_infer(_args, _model_path, _args.test_data_path) @@ -91,7 +98,7 @@ def eval_one_rating(idx): item = items[i] map_item_score[item] = predictions[i] items.pop() - + # Evaluate top rank list ranklist = heapq.nlargest(_K, map_item_score, key=map_item_score.get) hr = getHitRatio(ranklist, gtItem) @@ -99,15 +106,17 @@ def eval_one_rating(idx): return (hr, ndcg) + def getHitRatio(ranklist, gtItem): for item in ranklist: if item == gtItem: return 1 return 0 + def getNDCG(ranklist, gtItem): for i in range(len(ranklist)): item = ranklist[i] if item == gtItem: - return math.log(2) / math.log(i+2) + return math.log(2) / math.log(i + 2) return 0 diff --git a/PaddleRec/ssr/README.md b/PaddleRec/ssr/README.md index 6abc5240..57b3503b 100644 --- a/PaddleRec/ssr/README.md +++ b/PaddleRec/ssr/README.md @@ -43,9 +43,6 @@ cpu 单机多卡训练 CPU_NUM=10 python train.py --train_dir train_data --use_cuda 0 --parallel 1 --batch_size 50 --model_dir model_output --num_devices 10 ``` -本地模拟多机训练, 不支持windows. -``` bash -sh cluster_train.sh ``` ## Inference diff --git a/PaddleRec/ssr/cluster_train.py b/PaddleRec/ssr/cluster_train.py deleted file mode 100644 index 0b76934f..00000000 --- a/PaddleRec/ssr/cluster_train.py +++ /dev/null @@ -1,207 +0,0 @@ -#Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import sys -import time -import argparse -import logging -import paddle.fluid as fluid -import paddle -import utils -import numpy as np -from nets import SequenceSemanticRetrieval - -logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") -logger = logging.getLogger("fluid") -logger.setLevel(logging.INFO) - - -def parse_args(): - parser = argparse.ArgumentParser("sequence semantic retrieval") - parser.add_argument( - "--train_dir", type=str, default='train_data', help="Training file") - parser.add_argument( - "--base_lr", type=float, default=0.01, help="learning rate") - parser.add_argument( - '--vocab_path', type=str, default='vocab.txt', help='vocab file') - parser.add_argument( - "--epochs", type=int, default=10, help="Number of epochs") - parser.add_argument( - '--parallel', type=int, default=0, help='whether parallel') - parser.add_argument( - '--use_cuda', type=int, default=0, help='whether use gpu') - parser.add_argument( - '--print_batch', type=int, default=10, help='num of print batch') - parser.add_argument( - '--model_dir', type=str, default='model_output', help='model dir') - parser.add_argument( - "--hidden_size", type=int, default=128, help="hidden size") - parser.add_argument( - "--batch_size", type=int, default=50, help="number of batch") - parser.add_argument( - "--embedding_dim", type=int, default=128, help="embedding dim") - parser.add_argument( - '--num_devices', type=int, default=1, help='Number of GPU devices') - parser.add_argument( - '--step_num', type=int, default=1000, help='Number of steps') - parser.add_argument( - '--enable_ce', - action='store_true', - help='If set, run the task with continuous evaluation logs.') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The current_endpoint') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - return parser.parse_args() - - -def get_cards(args): - return args.num_devices - - -def train_loop(main_program, avg_cost, acc, train_input_data, place, args, - train_reader): - data_list = [var.name for var in train_input_data] - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - loader = fluid.io.DataLoader.from_generator( - feed_list=train_input_data, capacity=10000, iterable=True) - loader.set_sample_list_generator(train_reader, places=place) - train_exe = exe - - total_time = 0.0 - ce_info = [] - for pass_id in range(args.epochs): - epoch_idx = pass_id + 1 - print("epoch_%d start" % epoch_idx) - t0 = time.time() - i = 0 - for batch_id, data in enumerate(loader()): - i += 1 - loss_val, correct_val = train_exe.run( - feed=data, fetch_list=[avg_cost.name, acc.name]) - ce_info.append(float(np.mean(correct_val)) / args.batch_size) - if i % args.print_batch == 0: - logger.info( - "Train --> pass: {} batch_id: {} avg_cost: {}, acc: {}". - format(pass_id, batch_id, - np.mean(loss_val), - float(np.mean(correct_val)) / args.batch_size)) - if args.enable_ce and i > args.step_num: - break - t1 = time.time() - total_time += t1 - t0 - print("epoch:%d num_steps:%d time_cost(s):%f" % - (epoch_idx, i, total_time / epoch_idx)) - save_dir = "%s/epoch_%d" % (args.model_dir, epoch_idx) - fluid.save(fluid.default_main_program(), save_dir) - print("model saved in %s" % save_dir) - - # only for ce - if args.enable_ce: - ce_acc = 0 - try: - ce_acc = ce_info[-2] - except: - print("ce info error") - epoch_idx = args.epochs - device = get_device(args) - if args.use_cuda: - gpu_num = device[1] - print("kpis\teach_pass_duration_gpu%s\t%s" % - (gpu_num, total_time / epoch_idx)) - print("kpis\ttrain_acc_gpu%s\t%s" % (gpu_num, ce_acc)) - else: - cpu_num = device[1] - threads_num = device[2] - print("kpis\teach_pass_duration_cpu%s_thread%s\t%s" % - (cpu_num, threads_num, total_time / epoch_idx)) - print("kpis\ttrain_acc_cpu%s_thread%s\t%s" % - (cpu_num, threads_num, ce_acc)) - - -def train(args): - if args.enable_ce: - SEED = 102 - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED - use_cuda = True if args.use_cuda else False - parallel = True if args.parallel else False - print("use_cuda:", use_cuda, "parallel:", parallel) - train_reader, vocab_size = utils.construct_train_data( - args.train_dir, args.vocab_path, args.batch_size * get_cards(args)) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - ssr = SequenceSemanticRetrieval(vocab_size, args.embedding_dim, - args.hidden_size) - # Train program - train_input_data, cos_pos, avg_cost, acc = ssr.train() - - # Optimization to minimize lost - optimizer = fluid.optimizer.Adagrad(learning_rate=args.base_lr) - optimizer.minimize(avg_cost) - - print("run distribute training") - t = fluid.DistributeTranspiler() - t.transpile( - args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - print("run psever") - pserver_prog = t.get_pserver_program(args.current_endpoint) - pserver_startup = t.get_startup_program(args.current_endpoint, - pserver_prog) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif args.role == "trainer": - print("run trainer") - train_loop(t.get_trainer_program(), avg_cost, acc, train_input_data, - place, args, train_reader) - - -def get_device(args): - if args.use_cuda: - gpus = os.environ.get("CUDA_VISIBLE_DEVICES", 1) - gpu_num = len(gpus.split(',')) - return "gpu", gpu_num - else: - threads_num = os.environ.get('NUM_THREADS', 1) - cpu_num = os.environ.get('CPU_NUM', 1) - return "cpu", int(cpu_num), int(threads_num) - - -def main(): - args = parse_args() - train(args) - - -if __name__ == "__main__": - main() diff --git a/PaddleRec/ssr/cluster_train.sh b/PaddleRec/ssr/cluster_train.sh deleted file mode 100644 index aeb1d9c5..00000000 --- a/PaddleRec/ssr/cluster_train.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -# start pserver0 -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6000 \ - --trainers 2 \ - > pserver0.log 2>&1 & - -# start pserver1 -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6001 \ - --trainers 2 \ - > pserver1.log 2>&1 & - -# start trainer0 -#CUDA_VISIBLE_DEVICES=1 python cluster_train.py \ -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --print_batch 10 \ - --use_cuda 0 \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 0 \ - > trainer0.log 2>&1 & - -# start trainer1 -#CUDA_VISIBLE_DEVICES=2 python cluster_train.py \ -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --vocab_path vocab.txt \ - --batch_size 5 \ - --print_batch 10 \ - --use_cuda 0 \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 1 \ - > trainer1.log 2>&1 & diff --git a/PaddleRec/ssr/infer.py b/PaddleRec/ssr/infer.py index c1bc8a71..915cea92 100644 --- a/PaddleRec/ssr/infer.py +++ b/PaddleRec/ssr/infer.py @@ -37,16 +37,16 @@ def parse_args(): def model(vocab_size, emb_size, hidden_size): - user_data = fluid.layers.data( - name="user", shape=[1], dtype="int64", lod_level=1) - all_item_data = fluid.layers.data( - name="all_item", shape=[vocab_size, 1], dtype="int64") + user_data = fluid.data( + name="user", shape=[None, 1], dtype="int64", lod_level=1) + all_item_data = fluid.data( + name="all_item", shape=[None, vocab_size, 1], dtype="int64") - user_emb = fluid.layers.embedding( + user_emb = fluid.embedding( input=user_data, size=[vocab_size, emb_size], param_attr="emb.item") - all_item_emb = fluid.layers.embedding( + all_item_emb = fluid.embedding( input=all_item_data, size=[vocab_size, emb_size], param_attr="emb.item") - all_item_emb_re = fluid.layers.reshape(x=all_item_emb, shape=[-1, emb_size]) + all_item_emb_re = all_item_emb user_encoder = net.GrnnEncoder(hidden_size=hidden_size) user_enc = user_encoder.forward(user_emb) @@ -63,7 +63,7 @@ def model(vocab_size, emb_size, hidden_size): bias_attr="item.b") cos_item = fluid.layers.cos_sim(X=all_item_hid, Y=user_re) all_pre_ = fluid.layers.reshape(x=cos_item, shape=[-1, vocab_size]) - pos_label = fluid.layers.data(name="pos_label", shape=[1], dtype="int64") + pos_label = fluid.data(name="pos_label", shape=[None, 1], dtype="int64") acc = fluid.layers.accuracy(input=all_pre_, label=pos_label, k=20) return acc diff --git a/PaddleRec/ssr/utils.py b/PaddleRec/ssr/utils.py index 65571cb0..a5b6decf 100644 --- a/PaddleRec/ssr/utils.py +++ b/PaddleRec/ssr/utils.py @@ -18,7 +18,7 @@ def construct_train_data(file_dir, vocab_path, batch_size): files = [file_dir + '/' + f for f in os.listdir(file_dir)] y_data = reader.YoochooseDataset(vocab_size) train_reader = fluid.io.batch( - paddle.reader.shuffle( + fluid.io.shuffle( y_data.train(files), buf_size=batch_size * 100), batch_size=batch_size) return train_reader, vocab_size diff --git a/PaddleRec/tagspace/README.md b/PaddleRec/tagspace/README.md index 1980b06d..2261e64f 100644 --- a/PaddleRec/tagspace/README.md +++ b/PaddleRec/tagspace/README.md @@ -9,8 +9,6 @@ ├── infer.py # 预测脚本 ├── net.py # 网络结构 ├── text2paddle.py # 文本数据转paddle数据 -├── cluster_train.py # 多机训练 -├── cluster_train.sh # 多机训练脚本 ├── utils # 通用函数 ├── vocab_text.txt # 小样本文本字典 ├── vocab_tag.txt # 小样本类别字典 @@ -89,9 +87,3 @@ python infer.py ``` python infer.py --model_dir big_model --vocab_tag_path big_vocab_tag.txt --test_dir test_big_data/ ``` - -## 本地模拟多机 -运行命令 -``` -sh cluster_train.py -``` diff --git a/PaddleRec/tagspace/cluster_train.py b/PaddleRec/tagspace/cluster_train.py deleted file mode 100644 index 96cdf615..00000000 --- a/PaddleRec/tagspace/cluster_train.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -import sys -import time -import six -import numpy as np -import math -import argparse -import paddle -import paddle.fluid as fluid -import time -import utils -import net - -SEED = 102 - -def parse_args(): - parser = argparse.ArgumentParser("TagSpace benchmark.") - parser.add_argument( - '--neg_size', type=int, default=3, help='neg/pos ratio') - parser.add_argument( - '--train_dir', type=str, default='train_data', help='train file address') - parser.add_argument( - '--vocab_text_path', type=str, default='vocab_text.txt', help='vocab_text file address') - parser.add_argument( - '--vocab_tag_path', type=str, default='vocab_tag.txt', help='vocab_text file address') - parser.add_argument( - '--is_local', type=int, default=1, help='whether local') - parser.add_argument( - '--model_dir', type=str, default='model_', help='model dir') - parser.add_argument( - '--batch_size', type=int, default=5, help='num of batch size') - parser.add_argument( - '--print_batch', type=int, default=10, help='num of print batch') - parser.add_argument( - '--pass_num', type=int, default=10, help='num of epoch') - parser.add_argument( - '--use_cuda', type=int, default=0, help='whether use gpu') - parser.add_argument( - '--base_lr', type=float, default=0.01, help='learning rate') - parser.add_argument( - '--num_devices', type=int, default=1, help='Number of GPU devices') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', type=str, default='127.0.0.1:6000', help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', type=str, default='127.0.0.1:6000', help='The current_endpoint') - parser.add_argument( - '--trainer_id', type=int, default=0, help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', type=int, default=1, help='The num of trianers, (default: 1)') - args = parser.parse_args() - return args - -def get_cards(args): - return args.num_devices - -def train(): - """ do training """ - args = parse_args() - train_dir = args.train_dir - vocab_text_path = args.vocab_text_path - vocab_tag_path = args.vocab_tag_path - use_cuda = True if args.use_cuda else False - batch_size = args.batch_size - neg_size = args.neg_size - vocab_text_size, vocab_tag_size, train_reader = utils.prepare_data( - file_dir=train_dir, vocab_text_path=vocab_text_path, - vocab_tag_path=vocab_tag_path, neg_size=neg_size, - batch_size=batch_size * get_cards(args), - buffer_size=batch_size*100, is_train=True) - """ train network """ - # Train program - avg_cost, correct, cos_pos = net.network(vocab_text_size, vocab_tag_size, neg_size=neg_size) - - # Optimization to minimize lost - sgd_optimizer = fluid.optimizer.SGD(learning_rate=args.base_lr) - sgd_optimizer.minimize(avg_cost) - - def train_loop(main_program): - # Initialize executor - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - pass_num = args.pass_num - model_dir = args.model_dir - fetch_list = [avg_cost.name] - exe.run(fluid.default_startup_program()) - total_time = 0.0 - for pass_idx in range(pass_num): - epoch_idx = pass_idx + 1 - print("epoch_%d start" % epoch_idx) - t0 = time.time() - for batch_id, data in enumerate(train_reader()): - lod_text_seq = utils.to_lodtensor([dat[0] for dat in data], place) - lod_pos_tag = utils.to_lodtensor([dat[1] for dat in data], place) - lod_neg_tag = utils.to_lodtensor([dat[2] for dat in data], place) - loss_val, correct_val = exe.run( - feed={ - "text": lod_text_seq, - "pos_tag": lod_pos_tag, - "neg_tag": lod_neg_tag}, - fetch_list=[avg_cost.name, correct.name]) - if batch_id % args.print_batch == 0: - print("TRAIN --> pass: {} batch_num: {} avg_cost: {}, acc: {}" - .format(pass_idx, (batch_id+10) * batch_size, np.mean(loss_val), - float(np.sum(correct_val)) / batch_size)) - t1 = time.time() - total_time += t1 - t0 - print("epoch:%d num_steps:%d time_cost(s):%f" % - (epoch_idx, batch_id, total_time / epoch_idx)) - save_dir = "%s/epoch_%d" % (model_dir, epoch_idx) - feed_var_names = ["text", "pos_tag"] - fetch_vars = [cos_pos] - fluid.io.save_inference_model(save_dir, feed_var_names, fetch_vars, exe) - print("finish training") - - if args.is_local: - print("run local training") - train_loop(fluid.default_main_program()) - else: - print("run distribute training") - t = fluid.DistributeTranspiler() - t.transpile(args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - print("run psever") - pserver_prog = t.get_pserver_program(args.current_endpoint) - pserver_startup = t.get_startup_program(args.current_endpoint, - pserver_prog) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif args.role == "trainer": - print("run trainer") - train_loop(t.get_trainer_program()) - -if __name__ == "__main__": - train() diff --git a/PaddleRec/tagspace/cluster_train.sh b/PaddleRec/tagspace/cluster_train.sh deleted file mode 100644 index 16698e1d..00000000 --- a/PaddleRec/tagspace/cluster_train.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -# start pserver0 -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --batch_size 5 \ - --is_local 0 \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6000 \ - --trainers 2 \ - > pserver0.log 2>&1 & - -# start pserver1 -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --batch_size 5 \ - --is_local 0 \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6001 \ - --trainers 2 \ - > pserver1.log 2>&1 & - -# start trainer0 -#CUDA_VISIBLE_DEVICES=1 python cluster_train.py \ -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --batch_size 5 \ - --print_batch 10 \ - --use_cuda 0 \ - --is_local 0 \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 0 \ - > trainer0.log 2>&1 & - -# start trainer1 -#CUDA_VISIBLE_DEVICES=2 python cluster_train.py \ -python cluster_train.py \ - --train_dir train_data \ - --model_dir cluster_model \ - --batch_size 5 \ - --print_batch 10 \ - --use_cuda 0 \ - --is_local 0 \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 1 \ - > trainer1.log 2>&1 & diff --git a/PaddleRec/tagspace/utils.py b/PaddleRec/tagspace/utils.py index 7ae71249..5730efea 100644 --- a/PaddleRec/tagspace/utils.py +++ b/PaddleRec/tagspace/utils.py @@ -65,7 +65,7 @@ def prepare_data(file_dir, vocab_text_size = get_vocab_size(vocab_text_path) vocab_tag_size = get_vocab_size(vocab_tag_path) reader = sort_batch( - paddle.reader.shuffle( + fluid.io.shuffle( train( file_dir, vocab_tag_size, diff --git a/PaddleRec/tdm/tdm_demo/README.md b/PaddleRec/tdm/tdm_demo/README.md index 8e9b7f4c..42b36083 100644 --- a/PaddleRec/tdm/tdm_demo/README.md +++ b/PaddleRec/tdm/tdm_demo/README.md @@ -51,7 +51,7 @@ - **Node-Embedding**:注意,此处的Embedding,并非我们已有的item-embedding,而是构建完成的树的节点对应的Embedding,由item-embedding通过规则生成,是我们的网络主要训练的目标。ID范围为所有0->节点数-1。我们同时也需准备一个映射表,来告诉模型,item_id到node_id的映射关系。 - **Travel**:是指叶子节点从root开始直到其自身的遍历路径,如上图,14号节点的Travel:0->1->3->7->14 - **Layer**:指树的层,如上图,共有4层。 - + > Paddle-TDM在训练时,不会改动树的结构,只会改动Node-Embedding。 @@ -156,7 +156,7 @@ TDM的组网,宏观上,可以概括为三个部分 **demo模型,假设输入为两个元素:** > 一、user/query的emb表示,该emb应该来源于特征的组合在某个空间的映射(比如若干特征取emb后concat到一起),或其他预训练模型的处理结果(比如将明文query通过nlp预处理得到emb表示) - + > 二、item的正样本,是发生了实际点击/购买/浏览等行为的item_id,与输入的user/query emb强相关,是我们之后通过预测想得到的结果。 在paddle组网中,我们这样定义上面两个变量: @@ -233,9 +233,9 @@ tdm_sampler的运行逻辑如下: - 在item遍历路径上的node视为正样本,`positive_node_id`由`travel_list[item_id][i]`给出,其他同层的兄弟节点视为负样本,该层节点列表由`layer_list[i]`给出,如果`positive_node_id`不在`layer_list[i]`中,会提示错误。 - 在兄弟节点中进行随机采样,采样N个node,N由`neg_sampling_list[i]`的值决定,如果该值大于兄弟节点的数量,会提示错误。 采样结果不会重复,且不会采样到正样本。 - + - 如果`output_positive=True`,则会同时输出正负样本,否则只输出负采样的结果 - + - 生成该层`label`,shape与采样结果一致,正样本对应的label=1,负样本的label=0 - 生成该层`mask`,如果树是不平衡的,则有些item不会位于树的最后一层,所以遍历路径的实际长度会比其他item少,为了tensor维度一致,travel_list中padding了0。当遇到了padding的0时,tdm_sampler也会输出正常维度的采样结果,采样结果与label都为0。为了区分这部分虚拟的采样结果与真实采样结果,会给虚拟采样结果额外设置mask=0,如果是真实采样结果mask=1 @@ -403,23 +403,23 @@ acc = fluid.layers.accuracy(input=softmax_prob, label=labels_reshape) 在demo网络中,我们设置为从某一层的所有节点开始进行检索。paddle组网对输入定义的实现如下: ```python def input_data(self): - input_emb = fluid.layers.data( + input_emb = fluid.data( name="input_emb", - shape=[self.input_embed_size], + shape=[None, self.input_embed_size], dtype="float32", ) # first_layer 与 first_layer_mask 对应着infer起始的节点 - first_layer = fluid.layers.data( + first_layer = fluid.data( name="first_layer_node", - shape=[1], + shape=[None, 1], dtype="int64", lod_level=1, #支持变长 ) - first_layer_mask = fluid.layers.data( + first_layer_mask = fluid.data( name="first_layer_node_mask", - shape=[1], + shape=[None, 1], dtype="int64", lod_level=1, ) @@ -447,7 +447,7 @@ def create_first_layer(self, args): tdm的检索逻辑类似beamsearch,简单来说:在每一层计算打分,得到topK的节点,将这些节点的孩子节点作为下一层的输入,如此循环,得到最终的topK。但仍然有一些需要注意的细节,下面将详细介绍。 - 问题一:怎么处理`input_emb`? - + - input_emb过`input_fc`,检索中,只需过一次即可: ```python nput_trans_emb = self.input_trans_net.input_fc_infer(input_emb) @@ -663,7 +663,7 @@ if args.save_init_model or not args.load_model: ``` > 为什么每次加载模型手动Set `learning rate`? -> +> > 学习率在paddle的组网中,是以一个`persistable=Ture`的长期变量储存在模型的Variable scope里的。每次使用load_persistables加载模型时,也会使用加载的模型的学习率覆盖本地模型的默认学习率,换言之,加载init_model以后,学习率也是保存init_model时的学习率。对模型的调试会产生不必要的影响,为了保证网络训练如预期,需要这样的手动set步骤。 ### demo的训练运行方法 @@ -706,4 +706,4 @@ Demo代码中给出了基于paddle预测库加载tdm模型,输入emb产出item ``` - 首先需要运行`run_infer.sh`,打开`save_init_model`开关,使用`save_inference_model`产出paddle的推理模型,`predict.py`会加载`infer_model`,进行高速推理。 - 欲想进一步高速推理,需使用含预测库的paddle预测库,可以使用`mkl`及`mkl_dnn`等计算库加速op的计算。相关文档可以参考:[服务器端部署](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/index_cn.html) -- tdm相关op目前仅支持在cpu设备上运行,后续会支持GPU,欢迎关注。 \ No newline at end of file +- tdm相关op目前仅支持在cpu设备上运行,后续会支持GPU,欢迎关注。 diff --git a/PaddleRec/tdm/tdm_demo/dataset_generator.py b/PaddleRec/tdm/tdm_demo/dataset_generator.py index a600ed92..b186a1ae 100644 --- a/PaddleRec/tdm/tdm_demo/dataset_generator.py +++ b/PaddleRec/tdm/tdm_demo/dataset_generator.py @@ -35,6 +35,7 @@ class TDMDataset(dg.MultiSlotStringDataGenerator): """ Read test_data line by line & yield batch """ + def local_iter(): """Read file line by line""" for fname in infer_file_list: @@ -46,13 +47,14 @@ class TDMDataset(dg.MultiSlotStringDataGenerator): yield [input_emb] import paddle - batch_iter = paddle.batch(local_iter, batch) + batch_iter = fluid.io.batch(local_iter, batch) return batch_iter def generate_sample(self, line): """ Read the data line by line and process it as a dictionary """ + def iterator(): """ This function needs to be implemented by the user, based on data format diff --git a/PaddleRec/tdm/tdm_demo/infer_network.py b/PaddleRec/tdm/tdm_demo/infer_network.py index 8c7a41b8..a78364d0 100644 --- a/PaddleRec/tdm/tdm_demo/infer_network.py +++ b/PaddleRec/tdm/tdm_demo/infer_network.py @@ -41,26 +41,23 @@ class TdmInferNet(object): self.input_trans_net = InputTransNet(args) def input_data(self): - input_emb = fluid.layers.data( + input_emb = fluid.data( name="input_emb", - shape=[self.input_embed_size], - dtype="float32", - ) + shape=[None, self.input_embed_size], + dtype="float32", ) # first_layer 与 first_layer_mask 对应着infer起始层的节点 - first_layer = fluid.layers.data( + first_layer = fluid.data( name="first_layer_node", - shape=[1], + shape=[None, 1], dtype="int64", - lod_level=1, - ) + lod_level=1, ) - first_layer_mask = fluid.layers.data( + first_layer_mask = fluid.data( name="first_layer_node_mask", - shape=[1], + shape=[None, 1], dtype="int64", - lod_level=1, - ) + lod_level=1, ) inputs = [input_emb] + [first_layer] + [first_layer_mask] return inputs @@ -125,28 +122,27 @@ class TdmInferNet(object): size=[self.node_nums, self.node_embed_size], param_attr=fluid.ParamAttr(name="TDM_Tree_Emb")) - input_fc_out = self.input_trans_net.layer_fc_infer( - input_trans_emb, layer_idx) + input_fc_out = self.input_trans_net.layer_fc_infer(input_trans_emb, + layer_idx) # 过每一层的分类器 - layer_classifier_res = self.layer_classifier.classifier_layer_infer(input_fc_out, - node_emb, - layer_idx) + layer_classifier_res = self.layer_classifier.classifier_layer_infer( + input_fc_out, node_emb, layer_idx) # 过最终的判别分类器 - tdm_fc = fluid.layers.fc(input=layer_classifier_res, - size=self.label_nums, - act=None, - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name="tdm.cls_fc.weight"), - bias_attr=fluid.ParamAttr(name="tdm.cls_fc.bias")) + tdm_fc = fluid.layers.fc( + input=layer_classifier_res, + size=self.label_nums, + act=None, + num_flatten_dims=2, + param_attr=fluid.ParamAttr(name="tdm.cls_fc.weight"), + bias_attr=fluid.ParamAttr(name="tdm.cls_fc.bias")) prob = fluid.layers.softmax(tdm_fc) positive_prob = fluid.layers.slice( prob, axes=[2], starts=[1], ends=[2]) - prob_re = fluid.layers.reshape( - positive_prob, [-1, current_layer_node_num]) + prob_re = fluid.layers.reshape(positive_prob, + [-1, current_layer_node_num]) # 过滤掉padding产生的无效节点(node_id=0) node_zero_mask = fluid.layers.cast(current_layer_node, 'bool') @@ -161,11 +157,10 @@ class TdmInferNet(object): # index_sample op根据下标索引tensor对应位置的值 # 若paddle版本>2.0,调用方式为paddle.index_sample - top_node = fluid.contrib.layers.index_sample( - current_layer_node, topk_i) + top_node = fluid.contrib.layers.index_sample(current_layer_node, + topk_i) prob_re_mask = prob_re * current_layer_child_mask # 过滤掉非叶子节点 - topk_value = fluid.contrib.layers.index_sample( - prob_re_mask, topk_i) + topk_value = fluid.contrib.layers.index_sample(prob_re_mask, topk_i) node_score.append(topk_value) node_list.append(top_node) @@ -190,7 +185,8 @@ class TdmInferNet(object): res_node = fluid.layers.reshape(res_layer_node, [-1, self.topK, 1]) # 利用Tree_info信息,将node_id转换为item_id - tree_info = fluid.default_main_program().global_block().var("TDM_Tree_Info") + tree_info = fluid.default_main_program().global_block().var( + "TDM_Tree_Info") res_node_emb = fluid.layers.gather_nd(tree_info, res_node) res_item = fluid.layers.slice( diff --git a/PaddleRec/text_matching_on_quora/.run_ce.sh b/PaddleRec/text_matching_on_quora/.run_ce.sh deleted file mode 100755 index f1bb7feb..00000000 --- a/PaddleRec/text_matching_on_quora/.run_ce.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -export MKL_NUM_THREADS=1 -export OMP_NUM_THREADS=1 - -cudaid=${text_matching_on_quora:=0} # use 0-th card as default -export CUDA_VISIBLE_DEVICES=$cudaid - -FLAGS_benchmark=true python train_and_evaluate.py --model_name=cdssmNet --config=cdssm_base --enable_ce --epoch_num=5 | python _ce.py - -cudaid=${text_matching_on_quora_m:=0,1,2,3} # use 0,1,2,3 card as default -export CUDA_VISIBLE_DEVICES=$cudaid - -FLAGS_benchmark=true python train_and_evaluate.py --model_name=cdssmNet --config=cdssm_base --enable_ce --epoch_num=5 | python _ce.py diff --git a/PaddleRec/text_matching_on_quora/README.md b/PaddleRec/text_matching_on_quora/README.md deleted file mode 100644 index eb1346b6..00000000 --- a/PaddleRec/text_matching_on_quora/README.md +++ /dev/null @@ -1,177 +0,0 @@ -# Text matching on Quora qestion-answer pair dataset - -## contents - -* [Introduction](#introduction) - * [a brief review of the Quora Question Pair (QQP) Task](#a-brief-review-of-the-quora-question-pair-qqp-task) - * [Our Work](#our-work) -* [Environment Preparation](#environment-preparation) - * [Install Fluid release 1.0](#install-fluid-release-10) - * [cpu version](#cpu-version) - * [gpu version](#gpu-version) - * [Have I installed Fluid successfully?](#have-i-installed-fluid-successfully) -* [Prepare Data](#prepare-data) -* [Train and evaluate](#train-and-evaluate) -* [Models](#models) -* [Results](#results) - - -## Introduction - -### a brief review of the Quora Question Pair (QQP) Task - -The [Quora Question Pair](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs) dataset contains 400,000 question pairs from [Quora](https://www.quora.com/), where people ask and answer questions related to specific areas. Each sample in the dataset consists of two questions (both English) and a label that represents whether the questions are duplicate. The dataset is well annotated by human. - -Below are two samples from the dataset. The last column indicates whether the two questions are duplicate (1) or not (0). - -|id | qid1 | qid2| question1| question2| is_duplicate -|:---:|:---:|:---:|:---:|:---:|:---:| -|0 |1 |2 |What is the step by step guide to invest in share market in india? |What is the step by step guide to invest in share market? |0| -|1 |3 |4 |What is the story of Kohinoor (Koh-i-Noor) Diamond? | What would happen if the Indian government stole the Kohinoor (Koh-i-Noor) diamond back? |0| - - A [kaggle competition](https://www.kaggle.com/c/quora-question-pairs#description) was held based on this dataset in 2017. The kagglers were given a training dataset (with labels), and requested to make predictions on a test dataset (without labels). The predictions were evaluated by the log-likelihood loss on the test data. - -The kaggle competition has inspired much effective work. However, most of these models are rule-based and difficult to be transferred to new tasks. Researchers are seeking for more general models that work well on this task and other natual language processing (NLP) tasks. - -[Wang _et al._](https://arxiv.org/abs/1702.03814) proposed a bilateral multi-perspective matching (BIMPM) model based on the Quora Question Pair dataset. They splitted the original dataset to [3 parts](https://drive.google.com/file/d/0B0PlTAo--BnaQWlsZl9FZ3l1c28/view?usp=sharing): _train.tsv_ (384,348 samples), _dev.tsv_ (10,000 samples) and _test.tsv_ (10,000 samples). The class distribution of _train.tsv_ is unbalanced (37% positive and 63% negative), while those of _dev.tsv_ and _test.tsv_ are balanced(50% positive and 50% negetive). We used the same splitting method in our experiments. - -### Our Work - -Based on the Quora Question Pair Dataset, we implemented some classic models in the area of neural language understanding (NLU). The accuracy of prediction results are evaluated on the _test.tsv_ from [Wang _et al._](https://arxiv.org/abs/1702.03814). - -## Environment Preparation - -### Install Fluid release 1.0 - -Please follow the [official document in English](http://www.paddlepaddle.org/documentation/docs/en/1.0/build_and_install/pip_install_en.html) or [official document in Chinese](http://www.paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/install/Start.html) to install the Fluid deep learning framework. - -#### Have I installed Fluid successfully? - -Run the following script from your command line: - -```shell -python -c "import paddle" -``` - -If Fluid is installed successfully you should see no error message. Feel free to open issues under the [PaddlePaddle repository](https://github.com/PaddlePaddle/Paddle/issues) for support. - -## Prepare Data - -Please download the Quora dataset from [Google drive](https://drive.google.com/file/d/0B0PlTAo--BnaQWlsZl9FZ3l1c28/view?usp=sharing) and unzip to $HOME/.cache/paddle/dataset. - -Then run _data/prepare_quora_data.sh_ to download the pre-trained _word2vec_ embedding file -- _glove.840B.300d.zip_: - -```shell -sh data/prepare_quora_data.sh -``` - -At this point the dataset directory ($HOME/.cache/paddle/dataset) structure should be: - -```shell - -$HOME/.cache/paddle/dataset - |- Quora_question_pair_partition - |- train.tsv - |- test.tsv - |- dev.tsv - |- readme.txt - |- wordvec.txt - |- glove.840B.300d.txt -``` - -## Train and evaluate - -We provide multiple models and configurations. Details are shown in `models` and `configs` directories. For a quick start, please run the _cdssmNet_ model with the corresponding configuration: - -```shell -python train_and_evaluate.py \ - --model_name=cdssmNet \ - --config=cdssm_base -``` - -Logs will be output to the console. If everything works well, the logging information will have the same formats as the content in _cdssm_base.log_. - -All configurations used in our experiments are as follows: - -|Model|Config|command -|:----:|:----:|:----:| -|cdssmNet|cdssm_base|python train_and_evaluate.py --model_name=cdssmNet --config=cdssm_base -|DecAttNet|decatt_glove|python train_and_evaluate.py --model_name=DecAttNet --config=decatt_glove -|InferSentNet|infer_sent_v1|python train_and_evaluate.py --model_name=InferSentNet --config=infer_sent_v1 -|InferSentNet|infer_sent_v2|python train_and_evaluate.py --model_name=InferSentNet --config=infer_sent_v2 -|SSENet|sse_base|python train_and_evaluate.py --model_name=SSENet --config=sse_base - -## Models - -We implemeted 4 models for now: the convolutional deep-structured semantic model (CDSSM, CNN-based), the InferSent model (RNN-based), the shortcut-stacked encoder (SSE, RNN-based), and the decomposed attention model (DecAtt, attention-based). - -|Model|features|Context Encoder|Match Layer|Classification Layer -|:----:|:----:|:----:|:----:|:----:| -|CDSSM|word|1 layer conv1d|concatenation|MLP -|DecAtt|word|Attention|concatenation|MLP -|InferSent|word|1 layer Bi-LSTM|concatenation/element-wise product/
absolute element-wise difference|MLP -|SSE|word|3 layer Bi-LSTM|concatenation/element-wise product/
absolute element-wise difference|MLP - -### CDSSM - -``` -@inproceedings{shen2014learning, - title={Learning semantic representations using convolutional neural networks for web search}, - author={Shen, Yelong and He, Xiaodong and Gao, Jianfeng and Deng, Li and Mesnil, Gr{\'e}goire}, - booktitle={Proceedings of the 23rd International Conference on World Wide Web}, - pages={373--374}, - year={2014}, - organization={ACM} -} -``` - -### InferSent - -``` -@article{conneau2017supervised, - title={Supervised learning of universal sentence representations from natural language inference data}, - author={Conneau, Alexis and Kiela, Douwe and Schwenk, Holger and Barrault, Loic and Bordes, Antoine}, - journal={arXiv preprint arXiv:1705.02364}, - year={2017} -} -``` - -### SSE - -``` -@article{nie2017shortcut, - title={Shortcut-stacked sentence encoders for multi-domain inference}, - author={Nie, Yixin and Bansal, Mohit}, - journal={arXiv preprint arXiv:1708.02312}, - year={2017} -} -``` - -### DecAtt - -``` -@article{tomar2017neural, - title={Neural paraphrase identification of questions with noisy pretraining}, - author={Tomar, Gaurav Singh and Duque, Thyago and T{\"a}ckstr{\"o}m, Oscar and Uszkoreit, Jakob and Das, Dipanjan}, - journal={arXiv preprint arXiv:1704.04565}, - year={2017} -} -``` - -## Results - -|Model|Config|dev accuracy| test accuracy -|:----:|:----:|:----:|:----:| -|cdssmNet|cdssm_base|83.56%|82.83%| -|DecAttNet|decatt_glove|86.31%|86.22%| -|InferSentNet|infer_sent_v1|87.15%|86.62%| -|InferSentNet|infer_sent_v2|88.55%|88.43%| -|SSENet|sse_base|88.35%|88.25%| - -In our experiment, we found that LSTM-based models outperformed convolution-based models. The DecAtt model has fewer parameters than LSTM-based models, but is sensitive to hyper-parameters. - -

- - test_acc - -

diff --git a/PaddleRec/text_matching_on_quora/__init__.py b/PaddleRec/text_matching_on_quora/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/PaddleRec/text_matching_on_quora/_ce.py b/PaddleRec/text_matching_on_quora/_ce.py deleted file mode 100644 index 930d7142..00000000 --- a/PaddleRec/text_matching_on_quora/_ce.py +++ /dev/null @@ -1,66 +0,0 @@ -# this file is only used for continuous evaluation test! - -import os -import sys -sys.path.append(os.environ['ceroot']) -from kpi import CostKpi -from kpi import DurationKpi - -each_pass_duration_card1_kpi = DurationKpi( - 'each_pass_duration_card1', 0.08, 0, actived=True) -train_avg_cost_card1_kpi = CostKpi('train_avg_cost_card1', 0.08, 0) -train_avg_acc_card1_kpi = CostKpi('train_avg_acc_card1', 0.02, 0) -each_pass_duration_card4_kpi = DurationKpi( - 'each_pass_duration_card4', 0.08, 0, actived=True) -train_avg_cost_card4_kpi = CostKpi('train_avg_cost_card4', 0.08, 0) -train_avg_acc_card4_kpi = CostKpi('train_avg_acc_card4', 0.02, 0) - -tracking_kpis = [ - each_pass_duration_card1_kpi, - train_avg_cost_card1_kpi, - train_avg_acc_card1_kpi, - each_pass_duration_card4_kpi, - train_avg_cost_card4_kpi, - train_avg_acc_card4_kpi, -] - - -def parse_log(log): - ''' - This method should be implemented by model developers. - - The suggestion: - - each line in the log should be key, value, for example: - - " - train_cost\t1.0 - test_cost\t1.0 - train_cost\t1.0 - train_cost\t1.0 - train_acc\t1.2 - " - ''' - for line in log.split('\n'): - fs = line.strip().split('\t') - print(fs) - if len(fs) == 3 and fs[0] == 'kpis': - kpi_name = fs[1] - kpi_value = float(fs[2]) - yield kpi_name, kpi_value - - -def log_to_ce(log): - kpi_tracker = {} - for kpi in tracking_kpis: - kpi_tracker[kpi.name] = kpi - - for (kpi_name, kpi_value) in parse_log(log): - print(kpi_name, kpi_value) - kpi_tracker[kpi_name].add_record(kpi_value) - kpi_tracker[kpi_name].persist() - - -if __name__ == '__main__': - log = sys.stdin.read() - log_to_ce(log) diff --git a/PaddleRec/text_matching_on_quora/cdssm_base.log b/PaddleRec/text_matching_on_quora/cdssm_base.log deleted file mode 100644 index ec298161..00000000 --- a/PaddleRec/text_matching_on_quora/cdssm_base.log +++ /dev/null @@ -1,1834 +0,0 @@ -net_name: cdssmNet -config {'save_dirname': 'model_dir', 'optimizer_type': 'adam', 'duplicate_data': False, 'train_samples_num': 384348, 'droprate_fc': 0.1, 'fc_dim': 128, 'kernel_count': 300, 'mlp_hid_dim': [128, 128], 'OOV_fill': 'uniform', 'class_dim': 2, 'epoch_num': 50, 'lr_decay': 1, 'learning_rate': 0.001, 'batch_size': 128, 'use_lod_tensor': True, 'metric_type': ['accuracy'], 'embedding_norm': False, 'emb_dim': 300, 'droprate_conv': 0.1, 'use_pretrained_word_embedding': True, 'kernel_size': 5, 'dict_dim': 40000} -Generating word dict... -('Vocab size: ', 36057) -loading word2vec from /home/dongdaxiang/.cache/paddle/dataset/glove.840B.300d.txt -preparing pretrained word embedding ... -pretrained_word_embedding to be load: [[-0.086864 0.19161 0.10915 ... -0.01516 0.11108 - 0.2065 ] - [ 0.27204 -0.06203 -0.1884 ... 0.13015 -0.18317 - 0.1323 ] - [-0.20628 0.36716 -0.071933 ... 0.14271 0.50059 - 0.038025 ] - ... - [ 0.03847164 0.01711482 0.01181574 ... 0.03926358 -0.04032813 - -0.02135365] - [ 0.04201478 -0.02560226 -0.02281064 ... 0.00920258 0.04321 - 0.0227482 ] - [-0.04984529 -0.00176931 0.03022346 ... 0.0298265 0.02384543 - 0.00974313]] -param name: emb.w; param shape: (40000L, 300L) -param name: conv1d.w; param shape: (1500L, 300L) -param name: fc1.w; param shape: (300L, 128L) -param name: fc1.b; param shape: (128L,) -param name: fc_2.w_0; param shape: (256L, 128L) -param name: fc_2.b_0; param shape: (128L,) -param name: fc_3.w_0; param shape: (128L, 128L) -param name: fc_3.b_0; param shape: (128L,) -param name: fc_4.w_0; param shape: (128L, 2L) -param name: fc_4.b_0; param shape: (2L,) -loading pretrained word embedding to param -[Wed Oct 10 16:33:18 2018] epoch_id: -1, dev_cost: 0.693804, accuracy: 0.5109 -[Wed Oct 10 16:33:18 2018] epoch_id: -1, test_cost: 0.693670, accuracy: 0.5096 - -[Wed Oct 10 16:33:18 2018] Start Training -[Wed Oct 10 16:33:27 2018] epoch_id: 0, batch_id: 0, cost: 0.699992, acc: 0.515625 -[Wed Oct 10 16:33:30 2018] epoch_id: 0, batch_id: 100, cost: 0.557354, acc: 0.695312 -[Wed Oct 10 16:33:33 2018] epoch_id: 0, batch_id: 200, cost: 0.548301, acc: 0.742188 -[Wed Oct 10 16:33:35 2018] epoch_id: 0, batch_id: 300, cost: 0.528907, acc: 0.742188 -[Wed Oct 10 16:33:39 2018] epoch_id: 0, batch_id: 400, cost: 0.482460, acc: 0.781250 -[Wed Oct 10 16:33:41 2018] epoch_id: 0, batch_id: 500, cost: 0.494885, acc: 0.718750 -[Wed Oct 10 16:33:44 2018] epoch_id: 0, batch_id: 600, cost: 0.600175, acc: 0.695312 -[Wed Oct 10 16:33:46 2018] epoch_id: 0, batch_id: 700, cost: 0.477964, acc: 0.757812 -[Wed Oct 10 16:33:49 2018] epoch_id: 0, batch_id: 800, cost: 0.468172, acc: 0.750000 -[Wed Oct 10 16:33:51 2018] epoch_id: 0, batch_id: 900, cost: 0.394047, acc: 0.835938 -[Wed Oct 10 16:33:54 2018] epoch_id: 0, batch_id: 1000, cost: 0.520142, acc: 0.734375 -[Wed Oct 10 16:33:56 2018] epoch_id: 0, batch_id: 1100, cost: 0.471779, acc: 0.757812 -[Wed Oct 10 16:33:59 2018] epoch_id: 0, batch_id: 1200, cost: 0.407287, acc: 0.789062 -[Wed Oct 10 16:34:01 2018] epoch_id: 0, batch_id: 1300, cost: 0.430800, acc: 0.812500 -[Wed Oct 10 16:34:03 2018] epoch_id: 0, batch_id: 1400, cost: 0.421967, acc: 0.796875 -[Wed Oct 10 16:34:06 2018] epoch_id: 0, batch_id: 1500, cost: 0.388925, acc: 0.835938 -[Wed Oct 10 16:34:08 2018] epoch_id: 0, batch_id: 1600, cost: 0.445022, acc: 0.796875 -[Wed Oct 10 16:34:10 2018] epoch_id: 0, batch_id: 1700, cost: 0.439095, acc: 0.796875 -[Wed Oct 10 16:34:13 2018] epoch_id: 0, batch_id: 1800, cost: 0.448246, acc: 0.765625 -[Wed Oct 10 16:34:15 2018] epoch_id: 0, batch_id: 1900, cost: 0.377162, acc: 0.789062 -[Wed Oct 10 16:34:17 2018] epoch_id: 0, batch_id: 2000, cost: 0.460397, acc: 0.820312 -[Wed Oct 10 16:34:20 2018] epoch_id: 0, batch_id: 2100, cost: 0.416145, acc: 0.812500 -[Wed Oct 10 16:34:22 2018] epoch_id: 0, batch_id: 2200, cost: 0.509166, acc: 0.710938 -[Wed Oct 10 16:34:24 2018] epoch_id: 0, batch_id: 2300, cost: 0.450925, acc: 0.765625 -[Wed Oct 10 16:34:26 2018] epoch_id: 0, batch_id: 2400, cost: 0.457177, acc: 0.796875 -[Wed Oct 10 16:34:29 2018] epoch_id: 0, batch_id: 2500, cost: 0.454368, acc: 0.851562 -[Wed Oct 10 16:34:31 2018] epoch_id: 0, batch_id: 2600, cost: 0.478799, acc: 0.750000 -[Wed Oct 10 16:34:34 2018] epoch_id: 0, batch_id: 2700, cost: 0.521526, acc: 0.757812 -[Wed Oct 10 16:34:36 2018] epoch_id: 0, batch_id: 2800, cost: 0.476336, acc: 0.789062 -[Wed Oct 10 16:34:38 2018] epoch_id: 0, batch_id: 2900, cost: 0.407489, acc: 0.812500 -[Wed Oct 10 16:34:41 2018] epoch_id: 0, batch_id: 3000, cost: 0.404804, acc: 0.820312 - -[Wed Oct 10 16:34:42 2018] epoch_id: 0, train_avg_cost: 0.456508, train_avg_acc: 0.779733 -[Wed Oct 10 16:34:43 2018] epoch_id: 0, dev_cost: 0.469818, accuracy: 0.7691 -[Wed Oct 10 16:34:44 2018] epoch_id: 0, test_cost: 0.462696, accuracy: 0.7734 - -[Wed Oct 10 16:34:53 2018] epoch_id: 1, batch_id: 0, cost: 0.381106, acc: 0.820312 -[Wed Oct 10 16:34:56 2018] epoch_id: 1, batch_id: 100, cost: 0.325008, acc: 0.859375 -[Wed Oct 10 16:34:58 2018] epoch_id: 1, batch_id: 200, cost: 0.318922, acc: 0.843750 -[Wed Oct 10 16:35:00 2018] epoch_id: 1, batch_id: 300, cost: 0.359727, acc: 0.804688 -[Wed Oct 10 16:35:03 2018] epoch_id: 1, batch_id: 400, cost: 0.308632, acc: 0.875000 -[Wed Oct 10 16:35:05 2018] epoch_id: 1, batch_id: 500, cost: 0.326841, acc: 0.851562 -[Wed Oct 10 16:35:09 2018] epoch_id: 1, batch_id: 600, cost: 0.398975, acc: 0.796875 -[Wed Oct 10 16:35:12 2018] epoch_id: 1, batch_id: 700, cost: 0.296837, acc: 0.867188 -[Wed Oct 10 16:35:14 2018] epoch_id: 1, batch_id: 800, cost: 0.289739, acc: 0.867188 -[Wed Oct 10 16:35:17 2018] epoch_id: 1, batch_id: 900, cost: 0.315425, acc: 0.835938 -[Wed Oct 10 16:35:19 2018] epoch_id: 1, batch_id: 1000, cost: 0.340806, acc: 0.828125 -[Wed Oct 10 16:35:22 2018] epoch_id: 1, batch_id: 1100, cost: 0.383585, acc: 0.828125 -[Wed Oct 10 16:35:24 2018] epoch_id: 1, batch_id: 1200, cost: 0.317520, acc: 0.843750 -[Wed Oct 10 16:35:26 2018] epoch_id: 1, batch_id: 1300, cost: 0.308717, acc: 0.875000 -[Wed Oct 10 16:35:29 2018] epoch_id: 1, batch_id: 1400, cost: 0.320688, acc: 0.828125 -[Wed Oct 10 16:35:31 2018] epoch_id: 1, batch_id: 1500, cost: 0.353638, acc: 0.812500 -[Wed Oct 10 16:35:34 2018] epoch_id: 1, batch_id: 1600, cost: 0.379113, acc: 0.804688 -[Wed Oct 10 16:35:36 2018] epoch_id: 1, batch_id: 1700, cost: 0.309887, acc: 0.859375 -[Wed Oct 10 16:35:38 2018] epoch_id: 1, batch_id: 1800, cost: 0.316372, acc: 0.859375 -[Wed Oct 10 16:35:41 2018] epoch_id: 1, batch_id: 1900, cost: 0.405585, acc: 0.804688 -[Wed Oct 10 16:35:43 2018] epoch_id: 1, batch_id: 2000, cost: 0.336917, acc: 0.851562 -[Wed Oct 10 16:35:45 2018] epoch_id: 1, batch_id: 2100, cost: 0.347034, acc: 0.835938 -[Wed Oct 10 16:35:48 2018] epoch_id: 1, batch_id: 2200, cost: 0.379728, acc: 0.835938 -[Wed Oct 10 16:35:50 2018] epoch_id: 1, batch_id: 2300, cost: 0.395257, acc: 0.820312 -[Wed Oct 10 16:35:53 2018] epoch_id: 1, batch_id: 2400, cost: 0.398583, acc: 0.812500 -[Wed Oct 10 16:35:55 2018] epoch_id: 1, batch_id: 2500, cost: 0.356259, acc: 0.859375 -[Wed Oct 10 16:35:57 2018] epoch_id: 1, batch_id: 2600, cost: 0.297765, acc: 0.835938 -[Wed Oct 10 16:35:59 2018] epoch_id: 1, batch_id: 2700, cost: 0.353899, acc: 0.835938 -[Wed Oct 10 16:36:02 2018] epoch_id: 1, batch_id: 2800, cost: 0.377699, acc: 0.820312 -[Wed Oct 10 16:36:04 2018] epoch_id: 1, batch_id: 2900, cost: 0.388959, acc: 0.804688 -[Wed Oct 10 16:36:06 2018] epoch_id: 1, batch_id: 3000, cost: 0.344840, acc: 0.835938 - -[Wed Oct 10 16:36:07 2018] epoch_id: 1, train_avg_cost: 0.346376, train_avg_acc: 0.842572 -[Wed Oct 10 16:36:08 2018] epoch_id: 1, dev_cost: 0.402576, accuracy: 0.8094 -[Wed Oct 10 16:36:09 2018] epoch_id: 1, test_cost: 0.397121, accuracy: 0.8185 - -[Wed Oct 10 16:36:18 2018] epoch_id: 2, batch_id: 0, cost: 0.280530, acc: 0.890625 -[Wed Oct 10 16:36:20 2018] epoch_id: 2, batch_id: 100, cost: 0.233576, acc: 0.906250 -[Wed Oct 10 16:36:22 2018] epoch_id: 2, batch_id: 200, cost: 0.245128, acc: 0.898438 -[Wed Oct 10 16:36:25 2018] epoch_id: 2, batch_id: 300, cost: 0.183943, acc: 0.906250 -[Wed Oct 10 16:36:27 2018] epoch_id: 2, batch_id: 400, cost: 0.270915, acc: 0.882812 -[Wed Oct 10 16:36:30 2018] epoch_id: 2, batch_id: 500, cost: 0.248726, acc: 0.906250 -[Wed Oct 10 16:36:32 2018] epoch_id: 2, batch_id: 600, cost: 0.243351, acc: 0.921875 -[Wed Oct 10 16:36:35 2018] epoch_id: 2, batch_id: 700, cost: 0.314026, acc: 0.812500 -[Wed Oct 10 16:36:38 2018] epoch_id: 2, batch_id: 800, cost: 0.336282, acc: 0.867188 -[Wed Oct 10 16:36:41 2018] epoch_id: 2, batch_id: 900, cost: 0.290222, acc: 0.875000 -[Wed Oct 10 16:36:43 2018] epoch_id: 2, batch_id: 1000, cost: 0.287339, acc: 0.859375 -[Wed Oct 10 16:36:45 2018] epoch_id: 2, batch_id: 1100, cost: 0.225436, acc: 0.890625 -[Wed Oct 10 16:36:48 2018] epoch_id: 2, batch_id: 1200, cost: 0.346974, acc: 0.859375 -[Wed Oct 10 16:36:50 2018] epoch_id: 2, batch_id: 1300, cost: 0.283542, acc: 0.843750 -[Wed Oct 10 16:36:53 2018] epoch_id: 2, batch_id: 1400, cost: 0.203151, acc: 0.921875 -[Wed Oct 10 16:36:55 2018] epoch_id: 2, batch_id: 1500, cost: 0.255483, acc: 0.906250 -[Wed Oct 10 16:36:58 2018] epoch_id: 2, batch_id: 1600, cost: 0.275010, acc: 0.898438 -[Wed Oct 10 16:37:00 2018] epoch_id: 2, batch_id: 1700, cost: 0.264693, acc: 0.867188 -[Wed Oct 10 16:37:03 2018] epoch_id: 2, batch_id: 1800, cost: 0.257360, acc: 0.890625 -[Wed Oct 10 16:37:05 2018] epoch_id: 2, batch_id: 1900, cost: 0.150528, acc: 0.921875 -[Wed Oct 10 16:37:08 2018] epoch_id: 2, batch_id: 2000, cost: 0.229797, acc: 0.906250 -[Wed Oct 10 16:37:11 2018] epoch_id: 2, batch_id: 2100, cost: 0.261790, acc: 0.867188 -[Wed Oct 10 16:37:14 2018] epoch_id: 2, batch_id: 2200, cost: 0.201237, acc: 0.914062 -[Wed Oct 10 16:37:16 2018] epoch_id: 2, batch_id: 2300, cost: 0.296701, acc: 0.875000 -[Wed Oct 10 16:37:19 2018] epoch_id: 2, batch_id: 2400, cost: 0.315291, acc: 0.875000 -[Wed Oct 10 16:37:21 2018] epoch_id: 2, batch_id: 2500, cost: 0.282715, acc: 0.843750 -[Wed Oct 10 16:37:24 2018] epoch_id: 2, batch_id: 2600, cost: 0.296843, acc: 0.843750 -[Wed Oct 10 16:37:26 2018] epoch_id: 2, batch_id: 2700, cost: 0.363040, acc: 0.843750 -[Wed Oct 10 16:37:29 2018] epoch_id: 2, batch_id: 2800, cost: 0.262465, acc: 0.867188 -[Wed Oct 10 16:37:31 2018] epoch_id: 2, batch_id: 2900, cost: 0.208009, acc: 0.906250 -[Wed Oct 10 16:37:34 2018] epoch_id: 2, batch_id: 3000, cost: 0.247068, acc: 0.867188 - -[Wed Oct 10 16:37:34 2018] epoch_id: 2, train_avg_cost: 0.267260, train_avg_acc: 0.884560 -[Wed Oct 10 16:37:36 2018] epoch_id: 2, dev_cost: 0.434485, accuracy: 0.8153 -[Wed Oct 10 16:37:37 2018] epoch_id: 2, test_cost: 0.425083, accuracy: 0.8243 - -[Wed Oct 10 16:37:46 2018] epoch_id: 3, batch_id: 0, cost: 0.130899, acc: 0.945312 -[Wed Oct 10 16:37:49 2018] epoch_id: 3, batch_id: 100, cost: 0.174115, acc: 0.914062 -[Wed Oct 10 16:37:52 2018] epoch_id: 3, batch_id: 200, cost: 0.162655, acc: 0.929688 -[Wed Oct 10 16:37:54 2018] epoch_id: 3, batch_id: 300, cost: 0.156763, acc: 0.937500 -[Wed Oct 10 16:37:56 2018] epoch_id: 3, batch_id: 400, cost: 0.171531, acc: 0.929688 -[Wed Oct 10 16:37:59 2018] epoch_id: 3, batch_id: 500, cost: 0.124120, acc: 0.937500 -[Wed Oct 10 16:38:02 2018] epoch_id: 3, batch_id: 600, cost: 0.172306, acc: 0.929688 -[Wed Oct 10 16:38:04 2018] epoch_id: 3, batch_id: 700, cost: 0.352722, acc: 0.867188 -[Wed Oct 10 16:38:06 2018] epoch_id: 3, batch_id: 800, cost: 0.179998, acc: 0.929688 -[Wed Oct 10 16:38:09 2018] epoch_id: 3, batch_id: 900, cost: 0.197941, acc: 0.921875 -[Wed Oct 10 16:38:11 2018] epoch_id: 3, batch_id: 1000, cost: 0.163592, acc: 0.937500 -[Wed Oct 10 16:38:14 2018] epoch_id: 3, batch_id: 1100, cost: 0.196162, acc: 0.882812 -[Wed Oct 10 16:38:16 2018] epoch_id: 3, batch_id: 1200, cost: 0.201064, acc: 0.929688 -[Wed Oct 10 16:38:19 2018] epoch_id: 3, batch_id: 1300, cost: 0.162742, acc: 0.921875 -[Wed Oct 10 16:38:21 2018] epoch_id: 3, batch_id: 1400, cost: 0.192062, acc: 0.890625 -[Wed Oct 10 16:38:23 2018] epoch_id: 3, batch_id: 1500, cost: 0.215189, acc: 0.914062 -[Wed Oct 10 16:38:26 2018] epoch_id: 3, batch_id: 1600, cost: 0.148390, acc: 0.945312 -[Wed Oct 10 16:38:28 2018] epoch_id: 3, batch_id: 1700, cost: 0.148536, acc: 0.937500 -[Wed Oct 10 16:38:32 2018] epoch_id: 3, batch_id: 1800, cost: 0.122290, acc: 0.960938 -[Wed Oct 10 16:38:34 2018] epoch_id: 3, batch_id: 1900, cost: 0.152864, acc: 0.945312 -[Wed Oct 10 16:38:37 2018] epoch_id: 3, batch_id: 2000, cost: 0.250165, acc: 0.914062 -[Wed Oct 10 16:38:39 2018] epoch_id: 3, batch_id: 2100, cost: 0.197931, acc: 0.929688 -[Wed Oct 10 16:38:42 2018] epoch_id: 3, batch_id: 2200, cost: 0.167291, acc: 0.937500 -[Wed Oct 10 16:38:44 2018] epoch_id: 3, batch_id: 2300, cost: 0.243269, acc: 0.898438 -[Wed Oct 10 16:38:47 2018] epoch_id: 3, batch_id: 2400, cost: 0.170633, acc: 0.921875 -[Wed Oct 10 16:38:49 2018] epoch_id: 3, batch_id: 2500, cost: 0.182344, acc: 0.921875 -[Wed Oct 10 16:38:52 2018] epoch_id: 3, batch_id: 2600, cost: 0.267497, acc: 0.921875 -[Wed Oct 10 16:38:54 2018] epoch_id: 3, batch_id: 2700, cost: 0.170150, acc: 0.929688 -[Wed Oct 10 16:38:56 2018] epoch_id: 3, batch_id: 2800, cost: 0.198175, acc: 0.890625 -[Wed Oct 10 16:38:59 2018] epoch_id: 3, batch_id: 2900, cost: 0.231687, acc: 0.898438 -[Wed Oct 10 16:39:01 2018] epoch_id: 3, batch_id: 3000, cost: 0.280869, acc: 0.882812 - -[Wed Oct 10 16:39:02 2018] epoch_id: 3, train_avg_cost: 0.203352, train_avg_acc: 0.915808 -[Wed Oct 10 16:39:03 2018] epoch_id: 3, dev_cost: 0.413912, accuracy: 0.8304 -[Wed Oct 10 16:39:04 2018] epoch_id: 3, test_cost: 0.409365, accuracy: 0.8341 - -[Wed Oct 10 16:39:13 2018] epoch_id: 4, batch_id: 0, cost: 0.208998, acc: 0.945312 -[Wed Oct 10 16:39:16 2018] epoch_id: 4, batch_id: 100, cost: 0.148128, acc: 0.929688 -[Wed Oct 10 16:39:18 2018] epoch_id: 4, batch_id: 200, cost: 0.079264, acc: 0.976562 -[Wed Oct 10 16:39:21 2018] epoch_id: 4, batch_id: 300, cost: 0.125277, acc: 0.937500 -[Wed Oct 10 16:39:23 2018] epoch_id: 4, batch_id: 400, cost: 0.105227, acc: 0.968750 -[Wed Oct 10 16:39:25 2018] epoch_id: 4, batch_id: 500, cost: 0.063737, acc: 0.984375 -[Wed Oct 10 16:39:28 2018] epoch_id: 4, batch_id: 600, cost: 0.148419, acc: 0.937500 -[Wed Oct 10 16:39:30 2018] epoch_id: 4, batch_id: 700, cost: 0.118386, acc: 0.937500 -[Wed Oct 10 16:39:33 2018] epoch_id: 4, batch_id: 800, cost: 0.236417, acc: 0.898438 -[Wed Oct 10 16:39:35 2018] epoch_id: 4, batch_id: 900, cost: 0.131614, acc: 0.945312 -[Wed Oct 10 16:39:38 2018] epoch_id: 4, batch_id: 1000, cost: 0.134897, acc: 0.953125 -[Wed Oct 10 16:39:40 2018] epoch_id: 4, batch_id: 1100, cost: 0.152974, acc: 0.945312 -[Wed Oct 10 16:39:43 2018] epoch_id: 4, batch_id: 1200, cost: 0.173617, acc: 0.937500 -[Wed Oct 10 16:39:45 2018] epoch_id: 4, batch_id: 1300, cost: 0.128535, acc: 0.937500 -[Wed Oct 10 16:39:48 2018] epoch_id: 4, batch_id: 1400, cost: 0.156204, acc: 0.945312 -[Wed Oct 10 16:39:50 2018] epoch_id: 4, batch_id: 1500, cost: 0.130960, acc: 0.937500 -[Wed Oct 10 16:39:53 2018] epoch_id: 4, batch_id: 1600, cost: 0.185379, acc: 0.914062 -[Wed Oct 10 16:39:55 2018] epoch_id: 4, batch_id: 1700, cost: 0.092890, acc: 0.960938 -[Wed Oct 10 16:39:58 2018] epoch_id: 4, batch_id: 1800, cost: 0.147196, acc: 0.929688 -[Wed Oct 10 16:40:00 2018] epoch_id: 4, batch_id: 1900, cost: 0.153621, acc: 0.953125 -[Wed Oct 10 16:40:03 2018] epoch_id: 4, batch_id: 2000, cost: 0.153048, acc: 0.921875 -[Wed Oct 10 16:40:05 2018] epoch_id: 4, batch_id: 2100, cost: 0.205303, acc: 0.898438 -[Wed Oct 10 16:40:07 2018] epoch_id: 4, batch_id: 2200, cost: 0.139906, acc: 0.960938 -[Wed Oct 10 16:40:10 2018] epoch_id: 4, batch_id: 2300, cost: 0.254768, acc: 0.890625 -[Wed Oct 10 16:40:12 2018] epoch_id: 4, batch_id: 2400, cost: 0.076761, acc: 0.968750 -[Wed Oct 10 16:40:14 2018] epoch_id: 4, batch_id: 2500, cost: 0.199733, acc: 0.906250 -[Wed Oct 10 16:40:16 2018] epoch_id: 4, batch_id: 2600, cost: 0.310914, acc: 0.882812 -[Wed Oct 10 16:40:19 2018] epoch_id: 4, batch_id: 2700, cost: 0.148558, acc: 0.921875 -[Wed Oct 10 16:40:21 2018] epoch_id: 4, batch_id: 2800, cost: 0.164562, acc: 0.921875 -[Wed Oct 10 16:40:23 2018] epoch_id: 4, batch_id: 2900, cost: 0.177139, acc: 0.921875 -[Wed Oct 10 16:40:26 2018] epoch_id: 4, batch_id: 3000, cost: 0.112299, acc: 0.968750 - -[Wed Oct 10 16:40:27 2018] epoch_id: 4, train_avg_cost: 0.156220, train_avg_acc: 0.937780 -[Wed Oct 10 16:40:28 2018] epoch_id: 4, dev_cost: 0.468851, accuracy: 0.8348 -[Wed Oct 10 16:40:29 2018] epoch_id: 4, test_cost: 0.468213, accuracy: 0.8368 - -[Wed Oct 10 16:40:38 2018] epoch_id: 5, batch_id: 0, cost: 0.084071, acc: 0.976562 -[Wed Oct 10 16:40:41 2018] epoch_id: 5, batch_id: 100, cost: 0.052093, acc: 0.968750 -[Wed Oct 10 16:40:43 2018] epoch_id: 5, batch_id: 200, cost: 0.193576, acc: 0.929688 -[Wed Oct 10 16:40:46 2018] epoch_id: 5, batch_id: 300, cost: 0.075502, acc: 0.968750 -[Wed Oct 10 16:40:48 2018] epoch_id: 5, batch_id: 400, cost: 0.079619, acc: 0.976562 -[Wed Oct 10 16:40:51 2018] epoch_id: 5, batch_id: 500, cost: 0.124719, acc: 0.945312 -[Wed Oct 10 16:40:53 2018] epoch_id: 5, batch_id: 600, cost: 0.157322, acc: 0.929688 -[Wed Oct 10 16:40:56 2018] epoch_id: 5, batch_id: 700, cost: 0.100680, acc: 0.945312 -[Wed Oct 10 16:40:58 2018] epoch_id: 5, batch_id: 800, cost: 0.164627, acc: 0.937500 -[Wed Oct 10 16:41:00 2018] epoch_id: 5, batch_id: 900, cost: 0.113826, acc: 0.960938 -[Wed Oct 10 16:41:03 2018] epoch_id: 5, batch_id: 1000, cost: 0.122406, acc: 0.953125 -[Wed Oct 10 16:41:05 2018] epoch_id: 5, batch_id: 1100, cost: 0.098428, acc: 0.960938 -[Wed Oct 10 16:41:08 2018] epoch_id: 5, batch_id: 1200, cost: 0.175987, acc: 0.914062 -[Wed Oct 10 16:41:10 2018] epoch_id: 5, batch_id: 1300, cost: 0.161037, acc: 0.929688 -[Wed Oct 10 16:41:12 2018] epoch_id: 5, batch_id: 1400, cost: 0.058083, acc: 0.976562 -[Wed Oct 10 16:41:14 2018] epoch_id: 5, batch_id: 1500, cost: 0.099512, acc: 0.953125 -[Wed Oct 10 16:41:17 2018] epoch_id: 5, batch_id: 1600, cost: 0.155458, acc: 0.929688 -[Wed Oct 10 16:41:19 2018] epoch_id: 5, batch_id: 1700, cost: 0.149099, acc: 0.953125 -[Wed Oct 10 16:41:21 2018] epoch_id: 5, batch_id: 1800, cost: 0.184663, acc: 0.945312 -[Wed Oct 10 16:41:24 2018] epoch_id: 5, batch_id: 1900, cost: 0.153789, acc: 0.945312 -[Wed Oct 10 16:41:26 2018] epoch_id: 5, batch_id: 2000, cost: 0.135054, acc: 0.945312 -[Wed Oct 10 16:41:28 2018] epoch_id: 5, batch_id: 2100, cost: 0.091075, acc: 0.960938 -[Wed Oct 10 16:41:30 2018] epoch_id: 5, batch_id: 2200, cost: 0.175665, acc: 0.937500 -[Wed Oct 10 16:41:33 2018] epoch_id: 5, batch_id: 2300, cost: 0.092569, acc: 0.976562 -[Wed Oct 10 16:41:35 2018] epoch_id: 5, batch_id: 2400, cost: 0.171366, acc: 0.929688 -[Wed Oct 10 16:41:37 2018] epoch_id: 5, batch_id: 2500, cost: 0.077127, acc: 0.984375 -[Wed Oct 10 16:41:39 2018] epoch_id: 5, batch_id: 2600, cost: 0.133260, acc: 0.960938 -[Wed Oct 10 16:41:43 2018] epoch_id: 5, batch_id: 2700, cost: 0.130742, acc: 0.953125 -[Wed Oct 10 16:41:45 2018] epoch_id: 5, batch_id: 2800, cost: 0.165412, acc: 0.945312 -[Wed Oct 10 16:41:48 2018] epoch_id: 5, batch_id: 2900, cost: 0.099631, acc: 0.953125 -[Wed Oct 10 16:41:50 2018] epoch_id: 5, batch_id: 3000, cost: 0.191953, acc: 0.929688 - -[Wed Oct 10 16:41:51 2018] epoch_id: 5, train_avg_cost: 0.122534, train_avg_acc: 0.952647 -[Wed Oct 10 16:41:52 2018] epoch_id: 5, dev_cost: 0.517809, accuracy: 0.8338 -[Wed Oct 10 16:41:53 2018] epoch_id: 5, test_cost: 0.516574, accuracy: 0.8379 - -[Wed Oct 10 16:42:02 2018] epoch_id: 6, batch_id: 0, cost: 0.108672, acc: 0.953125 -[Wed Oct 10 16:42:04 2018] epoch_id: 6, batch_id: 100, cost: 0.055064, acc: 0.984375 -[Wed Oct 10 16:42:07 2018] epoch_id: 6, batch_id: 200, cost: 0.070521, acc: 0.976562 -[Wed Oct 10 16:42:09 2018] epoch_id: 6, batch_id: 300, cost: 0.044554, acc: 0.992188 -[Wed Oct 10 16:42:12 2018] epoch_id: 6, batch_id: 400, cost: 0.140199, acc: 0.968750 -[Wed Oct 10 16:42:14 2018] epoch_id: 6, batch_id: 500, cost: 0.074043, acc: 0.984375 -[Wed Oct 10 16:42:17 2018] epoch_id: 6, batch_id: 600, cost: 0.072380, acc: 0.960938 -[Wed Oct 10 16:42:19 2018] epoch_id: 6, batch_id: 700, cost: 0.089520, acc: 0.968750 -[Wed Oct 10 16:42:21 2018] epoch_id: 6, batch_id: 800, cost: 0.154753, acc: 0.937500 -[Wed Oct 10 16:42:24 2018] epoch_id: 6, batch_id: 900, cost: 0.137237, acc: 0.945312 -[Wed Oct 10 16:42:26 2018] epoch_id: 6, batch_id: 1000, cost: 0.155418, acc: 0.953125 -[Wed Oct 10 16:42:28 2018] epoch_id: 6, batch_id: 1100, cost: 0.102754, acc: 0.968750 -[Wed Oct 10 16:42:31 2018] epoch_id: 6, batch_id: 1200, cost: 0.171521, acc: 0.929688 -[Wed Oct 10 16:42:33 2018] epoch_id: 6, batch_id: 1300, cost: 0.089853, acc: 0.984375 -[Wed Oct 10 16:42:36 2018] epoch_id: 6, batch_id: 1400, cost: 0.117480, acc: 0.953125 -[Wed Oct 10 16:42:38 2018] epoch_id: 6, batch_id: 1500, cost: 0.144428, acc: 0.953125 -[Wed Oct 10 16:42:40 2018] epoch_id: 6, batch_id: 1600, cost: 0.100815, acc: 0.945312 -[Wed Oct 10 16:42:43 2018] epoch_id: 6, batch_id: 1700, cost: 0.096131, acc: 0.960938 -[Wed Oct 10 16:42:45 2018] epoch_id: 6, batch_id: 1800, cost: 0.083034, acc: 0.968750 -[Wed Oct 10 16:42:47 2018] epoch_id: 6, batch_id: 1900, cost: 0.144603, acc: 0.937500 -[Wed Oct 10 16:42:50 2018] epoch_id: 6, batch_id: 2000, cost: 0.125068, acc: 0.960938 -[Wed Oct 10 16:42:52 2018] epoch_id: 6, batch_id: 2100, cost: 0.096932, acc: 0.945312 -[Wed Oct 10 16:42:54 2018] epoch_id: 6, batch_id: 2200, cost: 0.187626, acc: 0.906250 -[Wed Oct 10 16:42:58 2018] epoch_id: 6, batch_id: 2300, cost: 0.086040, acc: 0.953125 -[Wed Oct 10 16:43:00 2018] epoch_id: 6, batch_id: 2400, cost: 0.112231, acc: 0.960938 -[Wed Oct 10 16:43:03 2018] epoch_id: 6, batch_id: 2500, cost: 0.086397, acc: 0.976562 -[Wed Oct 10 16:43:05 2018] epoch_id: 6, batch_id: 2600, cost: 0.093871, acc: 0.960938 -[Wed Oct 10 16:43:07 2018] epoch_id: 6, batch_id: 2700, cost: 0.143658, acc: 0.953125 -[Wed Oct 10 16:43:10 2018] epoch_id: 6, batch_id: 2800, cost: 0.144744, acc: 0.945312 -[Wed Oct 10 16:43:12 2018] epoch_id: 6, batch_id: 2900, cost: 0.127995, acc: 0.945312 -[Wed Oct 10 16:43:14 2018] epoch_id: 6, batch_id: 3000, cost: 0.201635, acc: 0.929688 - -[Wed Oct 10 16:43:15 2018] epoch_id: 6, train_avg_cost: 0.100383, train_avg_acc: 0.961683 -[Wed Oct 10 16:43:16 2018] epoch_id: 6, dev_cost: 0.622004, accuracy: 0.833 -[Wed Oct 10 16:43:17 2018] epoch_id: 6, test_cost: 0.604546, accuracy: 0.836 - -[Wed Oct 10 16:43:25 2018] epoch_id: 7, batch_id: 0, cost: 0.092909, acc: 0.968750 -[Wed Oct 10 16:43:28 2018] epoch_id: 7, batch_id: 100, cost: 0.048849, acc: 0.976562 -[Wed Oct 10 16:43:31 2018] epoch_id: 7, batch_id: 200, cost: 0.123149, acc: 0.960938 -[Wed Oct 10 16:43:33 2018] epoch_id: 7, batch_id: 300, cost: 0.043434, acc: 0.992188 -[Wed Oct 10 16:43:35 2018] epoch_id: 7, batch_id: 400, cost: 0.057082, acc: 0.976562 -[Wed Oct 10 16:43:38 2018] epoch_id: 7, batch_id: 500, cost: 0.043290, acc: 0.976562 -[Wed Oct 10 16:43:40 2018] epoch_id: 7, batch_id: 600, cost: 0.061600, acc: 0.976562 -[Wed Oct 10 16:43:42 2018] epoch_id: 7, batch_id: 700, cost: 0.077328, acc: 0.968750 -[Wed Oct 10 16:43:45 2018] epoch_id: 7, batch_id: 800, cost: 0.139978, acc: 0.953125 -[Wed Oct 10 16:43:48 2018] epoch_id: 7, batch_id: 900, cost: 0.099730, acc: 0.960938 -[Wed Oct 10 16:43:51 2018] epoch_id: 7, batch_id: 1000, cost: 0.072699, acc: 0.976562 -[Wed Oct 10 16:43:53 2018] epoch_id: 7, batch_id: 1100, cost: 0.031092, acc: 0.992188 -[Wed Oct 10 16:43:55 2018] epoch_id: 7, batch_id: 1200, cost: 0.118547, acc: 0.960938 -[Wed Oct 10 16:43:57 2018] epoch_id: 7, batch_id: 1300, cost: 0.061420, acc: 0.976562 -[Wed Oct 10 16:44:00 2018] epoch_id: 7, batch_id: 1400, cost: 0.096040, acc: 0.968750 -[Wed Oct 10 16:44:02 2018] epoch_id: 7, batch_id: 1500, cost: 0.052711, acc: 0.992188 -[Wed Oct 10 16:44:04 2018] epoch_id: 7, batch_id: 1600, cost: 0.150460, acc: 0.929688 -[Wed Oct 10 16:44:07 2018] epoch_id: 7, batch_id: 1700, cost: 0.097628, acc: 0.976562 -[Wed Oct 10 16:44:09 2018] epoch_id: 7, batch_id: 1800, cost: 0.081382, acc: 0.976562 -[Wed Oct 10 16:44:11 2018] epoch_id: 7, batch_id: 1900, cost: 0.089064, acc: 0.953125 -[Wed Oct 10 16:44:14 2018] epoch_id: 7, batch_id: 2000, cost: 0.084270, acc: 0.968750 -[Wed Oct 10 16:44:16 2018] epoch_id: 7, batch_id: 2100, cost: 0.097173, acc: 0.968750 -[Wed Oct 10 16:44:18 2018] epoch_id: 7, batch_id: 2200, cost: 0.112953, acc: 0.960938 -[Wed Oct 10 16:44:20 2018] epoch_id: 7, batch_id: 2300, cost: 0.116143, acc: 0.953125 -[Wed Oct 10 16:44:23 2018] epoch_id: 7, batch_id: 2400, cost: 0.098675, acc: 0.968750 -[Wed Oct 10 16:44:25 2018] epoch_id: 7, batch_id: 2500, cost: 0.150993, acc: 0.945312 -[Wed Oct 10 16:44:27 2018] epoch_id: 7, batch_id: 2600, cost: 0.076421, acc: 0.968750 -[Wed Oct 10 16:44:29 2018] epoch_id: 7, batch_id: 2700, cost: 0.088665, acc: 0.968750 -[Wed Oct 10 16:44:32 2018] epoch_id: 7, batch_id: 2800, cost: 0.142891, acc: 0.937500 -[Wed Oct 10 16:44:34 2018] epoch_id: 7, batch_id: 2900, cost: 0.088820, acc: 0.968750 -[Wed Oct 10 16:44:36 2018] epoch_id: 7, batch_id: 3000, cost: 0.100579, acc: 0.968750 - -[Wed Oct 10 16:44:37 2018] epoch_id: 7, train_avg_cost: 0.084162, train_avg_acc: 0.968487 -[Wed Oct 10 16:44:38 2018] epoch_id: 7, dev_cost: 0.655423, accuracy: 0.8369 -[Wed Oct 10 16:44:39 2018] epoch_id: 7, test_cost: 0.663061, accuracy: 0.8352 - -[Wed Oct 10 16:44:47 2018] epoch_id: 8, batch_id: 0, cost: 0.037309, acc: 0.992188 -[Wed Oct 10 16:44:50 2018] epoch_id: 8, batch_id: 100, cost: 0.043888, acc: 0.976562 -[Wed Oct 10 16:44:52 2018] epoch_id: 8, batch_id: 200, cost: 0.099702, acc: 0.960938 -[Wed Oct 10 16:44:54 2018] epoch_id: 8, batch_id: 300, cost: 0.080207, acc: 0.976562 -[Wed Oct 10 16:44:56 2018] epoch_id: 8, batch_id: 400, cost: 0.049319, acc: 0.976562 -[Wed Oct 10 16:44:59 2018] epoch_id: 8, batch_id: 500, cost: 0.041202, acc: 0.976562 -[Wed Oct 10 16:45:01 2018] epoch_id: 8, batch_id: 600, cost: 0.061663, acc: 0.968750 -[Wed Oct 10 16:45:03 2018] epoch_id: 8, batch_id: 700, cost: 0.065126, acc: 0.984375 -[Wed Oct 10 16:45:05 2018] epoch_id: 8, batch_id: 800, cost: 0.057770, acc: 0.976562 -[Wed Oct 10 16:45:07 2018] epoch_id: 8, batch_id: 900, cost: 0.136513, acc: 0.929688 -[Wed Oct 10 16:45:10 2018] epoch_id: 8, batch_id: 1000, cost: 0.054884, acc: 0.968750 -[Wed Oct 10 16:45:12 2018] epoch_id: 8, batch_id: 1100, cost: 0.046854, acc: 0.992188 -[Wed Oct 10 16:45:14 2018] epoch_id: 8, batch_id: 1200, cost: 0.031739, acc: 1.000000 -[Wed Oct 10 16:45:17 2018] epoch_id: 8, batch_id: 1300, cost: 0.127405, acc: 0.953125 -[Wed Oct 10 16:45:19 2018] epoch_id: 8, batch_id: 1400, cost: 0.052842, acc: 0.976562 -[Wed Oct 10 16:45:21 2018] epoch_id: 8, batch_id: 1500, cost: 0.117588, acc: 0.960938 -[Wed Oct 10 16:45:23 2018] epoch_id: 8, batch_id: 1600, cost: 0.078688, acc: 0.968750 -[Wed Oct 10 16:45:26 2018] epoch_id: 8, batch_id: 1700, cost: 0.069420, acc: 0.976562 -[Wed Oct 10 16:45:28 2018] epoch_id: 8, batch_id: 1800, cost: 0.055502, acc: 0.976562 -[Wed Oct 10 16:45:31 2018] epoch_id: 8, batch_id: 1900, cost: 0.161759, acc: 0.945312 -[Wed Oct 10 16:45:34 2018] epoch_id: 8, batch_id: 2000, cost: 0.063610, acc: 0.984375 -[Wed Oct 10 16:45:36 2018] epoch_id: 8, batch_id: 2100, cost: 0.103227, acc: 0.937500 -[Wed Oct 10 16:45:38 2018] epoch_id: 8, batch_id: 2200, cost: 0.065949, acc: 0.976562 -[Wed Oct 10 16:45:40 2018] epoch_id: 8, batch_id: 2300, cost: 0.060299, acc: 0.968750 -[Wed Oct 10 16:45:43 2018] epoch_id: 8, batch_id: 2400, cost: 0.089557, acc: 0.976562 -[Wed Oct 10 16:45:45 2018] epoch_id: 8, batch_id: 2500, cost: 0.095753, acc: 0.968750 -[Wed Oct 10 16:45:47 2018] epoch_id: 8, batch_id: 2600, cost: 0.111113, acc: 0.968750 -[Wed Oct 10 16:45:49 2018] epoch_id: 8, batch_id: 2700, cost: 0.074921, acc: 0.960938 -[Wed Oct 10 16:45:52 2018] epoch_id: 8, batch_id: 2800, cost: 0.105058, acc: 0.945312 -[Wed Oct 10 16:45:54 2018] epoch_id: 8, batch_id: 2900, cost: 0.173304, acc: 0.921875 -[Wed Oct 10 16:45:56 2018] epoch_id: 8, batch_id: 3000, cost: 0.077586, acc: 0.984375 - -[Wed Oct 10 16:45:56 2018] epoch_id: 8, train_avg_cost: 0.072280, train_avg_acc: 0.973521 -[Wed Oct 10 16:45:57 2018] epoch_id: 8, dev_cost: 0.629243, accuracy: 0.8373 -[Wed Oct 10 16:45:58 2018] epoch_id: 8, test_cost: 0.661630, accuracy: 0.8352 - -[Wed Oct 10 16:46:07 2018] epoch_id: 9, batch_id: 0, cost: 0.044024, acc: 0.984375 -[Wed Oct 10 16:46:09 2018] epoch_id: 9, batch_id: 100, cost: 0.033798, acc: 0.992188 -[Wed Oct 10 16:46:11 2018] epoch_id: 9, batch_id: 200, cost: 0.077856, acc: 0.976562 -[Wed Oct 10 16:46:14 2018] epoch_id: 9, batch_id: 300, cost: 0.119995, acc: 0.953125 -[Wed Oct 10 16:46:16 2018] epoch_id: 9, batch_id: 400, cost: 0.006741, acc: 1.000000 -[Wed Oct 10 16:46:18 2018] epoch_id: 9, batch_id: 500, cost: 0.097501, acc: 0.953125 -[Wed Oct 10 16:46:20 2018] epoch_id: 9, batch_id: 600, cost: 0.097540, acc: 0.960938 -[Wed Oct 10 16:46:22 2018] epoch_id: 9, batch_id: 700, cost: 0.085677, acc: 0.976562 -[Wed Oct 10 16:46:25 2018] epoch_id: 9, batch_id: 800, cost: 0.131135, acc: 0.960938 -[Wed Oct 10 16:46:27 2018] epoch_id: 9, batch_id: 900, cost: 0.058706, acc: 0.960938 -[Wed Oct 10 16:46:29 2018] epoch_id: 9, batch_id: 1000, cost: 0.081857, acc: 0.968750 -[Wed Oct 10 16:46:31 2018] epoch_id: 9, batch_id: 1100, cost: 0.035656, acc: 0.992188 -[Wed Oct 10 16:46:34 2018] epoch_id: 9, batch_id: 1200, cost: 0.023980, acc: 0.992188 -[Wed Oct 10 16:46:36 2018] epoch_id: 9, batch_id: 1300, cost: 0.104535, acc: 0.976562 -[Wed Oct 10 16:46:38 2018] epoch_id: 9, batch_id: 1400, cost: 0.052738, acc: 0.960938 -[Wed Oct 10 16:46:40 2018] epoch_id: 9, batch_id: 1500, cost: 0.049284, acc: 0.984375 -[Wed Oct 10 16:46:43 2018] epoch_id: 9, batch_id: 1600, cost: 0.040960, acc: 0.976562 -[Wed Oct 10 16:46:45 2018] epoch_id: 9, batch_id: 1700, cost: 0.054090, acc: 0.976562 -[Wed Oct 10 16:46:47 2018] epoch_id: 9, batch_id: 1800, cost: 0.030307, acc: 0.992188 -[Wed Oct 10 16:46:49 2018] epoch_id: 9, batch_id: 1900, cost: 0.152908, acc: 0.960938 -[Wed Oct 10 16:46:52 2018] epoch_id: 9, batch_id: 2000, cost: 0.133532, acc: 0.945312 -[Wed Oct 10 16:46:54 2018] epoch_id: 9, batch_id: 2100, cost: 0.162579, acc: 0.929688 -[Wed Oct 10 16:46:56 2018] epoch_id: 9, batch_id: 2200, cost: 0.037171, acc: 0.984375 -[Wed Oct 10 16:46:58 2018] epoch_id: 9, batch_id: 2300, cost: 0.036093, acc: 0.992188 -[Wed Oct 10 16:47:00 2018] epoch_id: 9, batch_id: 2400, cost: 0.066371, acc: 0.976562 -[Wed Oct 10 16:47:02 2018] epoch_id: 9, batch_id: 2500, cost: 0.047459, acc: 0.984375 -[Wed Oct 10 16:47:04 2018] epoch_id: 9, batch_id: 2600, cost: 0.031411, acc: 0.992188 -[Wed Oct 10 16:47:06 2018] epoch_id: 9, batch_id: 2700, cost: 0.107300, acc: 0.953125 -[Wed Oct 10 16:47:09 2018] epoch_id: 9, batch_id: 2800, cost: 0.041434, acc: 0.984375 -[Wed Oct 10 16:47:11 2018] epoch_id: 9, batch_id: 2900, cost: 0.081185, acc: 0.960938 -[Wed Oct 10 16:47:13 2018] epoch_id: 9, batch_id: 3000, cost: 0.096274, acc: 0.960938 - -[Wed Oct 10 16:47:13 2018] epoch_id: 9, train_avg_cost: 0.063124, train_avg_acc: 0.976961 -[Wed Oct 10 16:47:14 2018] epoch_id: 9, dev_cost: 0.678009, accuracy: 0.8403 -[Wed Oct 10 16:47:15 2018] epoch_id: 9, test_cost: 0.707977, accuracy: 0.8359 - -[Wed Oct 10 16:47:24 2018] epoch_id: 10, batch_id: 0, cost: 0.053481, acc: 0.968750 -[Wed Oct 10 16:47:26 2018] epoch_id: 10, batch_id: 100, cost: 0.024990, acc: 0.984375 -[Wed Oct 10 16:47:29 2018] epoch_id: 10, batch_id: 200, cost: 0.025989, acc: 0.992188 -[Wed Oct 10 16:47:31 2018] epoch_id: 10, batch_id: 300, cost: 0.016467, acc: 0.992188 -[Wed Oct 10 16:47:33 2018] epoch_id: 10, batch_id: 400, cost: 0.013582, acc: 1.000000 -[Wed Oct 10 16:47:35 2018] epoch_id: 10, batch_id: 500, cost: 0.062821, acc: 0.984375 -[Wed Oct 10 16:47:38 2018] epoch_id: 10, batch_id: 600, cost: 0.018919, acc: 0.992188 -[Wed Oct 10 16:47:40 2018] epoch_id: 10, batch_id: 700, cost: 0.113543, acc: 0.937500 -[Wed Oct 10 16:47:43 2018] epoch_id: 10, batch_id: 800, cost: 0.042273, acc: 0.984375 -[Wed Oct 10 16:47:45 2018] epoch_id: 10, batch_id: 900, cost: 0.040787, acc: 0.976562 -[Wed Oct 10 16:47:47 2018] epoch_id: 10, batch_id: 1000, cost: 0.013215, acc: 1.000000 -[Wed Oct 10 16:47:50 2018] epoch_id: 10, batch_id: 1100, cost: 0.056862, acc: 0.984375 -[Wed Oct 10 16:47:52 2018] epoch_id: 10, batch_id: 1200, cost: 0.114343, acc: 0.960938 -[Wed Oct 10 16:47:54 2018] epoch_id: 10, batch_id: 1300, cost: 0.068139, acc: 0.968750 -[Wed Oct 10 16:47:57 2018] epoch_id: 10, batch_id: 1400, cost: 0.036262, acc: 0.984375 -[Wed Oct 10 16:47:59 2018] epoch_id: 10, batch_id: 1500, cost: 0.031832, acc: 0.984375 -[Wed Oct 10 16:48:01 2018] epoch_id: 10, batch_id: 1600, cost: 0.098699, acc: 0.953125 -[Wed Oct 10 16:48:03 2018] epoch_id: 10, batch_id: 1700, cost: 0.073122, acc: 0.976562 -[Wed Oct 10 16:48:06 2018] epoch_id: 10, batch_id: 1800, cost: 0.035890, acc: 0.984375 -[Wed Oct 10 16:48:08 2018] epoch_id: 10, batch_id: 1900, cost: 0.036370, acc: 0.968750 -[Wed Oct 10 16:48:10 2018] epoch_id: 10, batch_id: 2000, cost: 0.073071, acc: 0.976562 -[Wed Oct 10 16:48:12 2018] epoch_id: 10, batch_id: 2100, cost: 0.017344, acc: 1.000000 -[Wed Oct 10 16:48:15 2018] epoch_id: 10, batch_id: 2200, cost: 0.146855, acc: 0.953125 -[Wed Oct 10 16:48:17 2018] epoch_id: 10, batch_id: 2300, cost: 0.068342, acc: 0.968750 -[Wed Oct 10 16:48:19 2018] epoch_id: 10, batch_id: 2400, cost: 0.026733, acc: 0.992188 -[Wed Oct 10 16:48:21 2018] epoch_id: 10, batch_id: 2500, cost: 0.085184, acc: 0.976562 -[Wed Oct 10 16:48:23 2018] epoch_id: 10, batch_id: 2600, cost: 0.065530, acc: 0.984375 -[Wed Oct 10 16:48:26 2018] epoch_id: 10, batch_id: 2700, cost: 0.111871, acc: 0.968750 -[Wed Oct 10 16:48:29 2018] epoch_id: 10, batch_id: 2800, cost: 0.063721, acc: 0.968750 -[Wed Oct 10 16:48:31 2018] epoch_id: 10, batch_id: 2900, cost: 0.026759, acc: 0.992188 -[Wed Oct 10 16:48:34 2018] epoch_id: 10, batch_id: 3000, cost: 0.031338, acc: 0.992188 - -[Wed Oct 10 16:48:34 2018] epoch_id: 10, train_avg_cost: 0.055555, train_avg_acc: 0.979852 -[Wed Oct 10 16:48:35 2018] epoch_id: 10, dev_cost: 0.782007, accuracy: 0.8366 -[Wed Oct 10 16:48:36 2018] epoch_id: 10, test_cost: 0.795087, accuracy: 0.8369 - -[Wed Oct 10 16:48:44 2018] epoch_id: 11, batch_id: 0, cost: 0.032797, acc: 0.992188 -[Wed Oct 10 16:48:47 2018] epoch_id: 11, batch_id: 100, cost: 0.011773, acc: 0.992188 -[Wed Oct 10 16:48:49 2018] epoch_id: 11, batch_id: 200, cost: 0.012297, acc: 1.000000 -[Wed Oct 10 16:48:51 2018] epoch_id: 11, batch_id: 300, cost: 0.032454, acc: 0.992188 -[Wed Oct 10 16:48:53 2018] epoch_id: 11, batch_id: 400, cost: 0.100247, acc: 0.976562 -[Wed Oct 10 16:48:55 2018] epoch_id: 11, batch_id: 500, cost: 0.035470, acc: 0.992188 -[Wed Oct 10 16:48:58 2018] epoch_id: 11, batch_id: 600, cost: 0.032553, acc: 0.984375 -[Wed Oct 10 16:49:00 2018] epoch_id: 11, batch_id: 700, cost: 0.035226, acc: 0.984375 -[Wed Oct 10 16:49:02 2018] epoch_id: 11, batch_id: 800, cost: 0.010961, acc: 1.000000 -[Wed Oct 10 16:49:04 2018] epoch_id: 11, batch_id: 900, cost: 0.033747, acc: 0.984375 -[Wed Oct 10 16:49:07 2018] epoch_id: 11, batch_id: 1000, cost: 0.052710, acc: 0.976562 -[Wed Oct 10 16:49:09 2018] epoch_id: 11, batch_id: 1100, cost: 0.021664, acc: 0.992188 -[Wed Oct 10 16:49:11 2018] epoch_id: 11, batch_id: 1200, cost: 0.056635, acc: 0.984375 -[Wed Oct 10 16:49:14 2018] epoch_id: 11, batch_id: 1300, cost: 0.007764, acc: 1.000000 -[Wed Oct 10 16:49:16 2018] epoch_id: 11, batch_id: 1400, cost: 0.042336, acc: 0.976562 -[Wed Oct 10 16:49:18 2018] epoch_id: 11, batch_id: 1500, cost: 0.077117, acc: 0.976562 -[Wed Oct 10 16:49:20 2018] epoch_id: 11, batch_id: 1600, cost: 0.082522, acc: 0.976562 -[Wed Oct 10 16:49:22 2018] epoch_id: 11, batch_id: 1700, cost: 0.022290, acc: 1.000000 -[Wed Oct 10 16:49:25 2018] epoch_id: 11, batch_id: 1800, cost: 0.033992, acc: 0.984375 -[Wed Oct 10 16:49:27 2018] epoch_id: 11, batch_id: 1900, cost: 0.027460, acc: 0.992188 -[Wed Oct 10 16:49:29 2018] epoch_id: 11, batch_id: 2000, cost: 0.032003, acc: 0.992188 -[Wed Oct 10 16:49:31 2018] epoch_id: 11, batch_id: 2100, cost: 0.070170, acc: 0.976562 -[Wed Oct 10 16:49:33 2018] epoch_id: 11, batch_id: 2200, cost: 0.017124, acc: 0.992188 -[Wed Oct 10 16:49:36 2018] epoch_id: 11, batch_id: 2300, cost: 0.037207, acc: 0.984375 -[Wed Oct 10 16:49:39 2018] epoch_id: 11, batch_id: 2400, cost: 0.018202, acc: 1.000000 -[Wed Oct 10 16:49:41 2018] epoch_id: 11, batch_id: 2500, cost: 0.059570, acc: 0.976562 -[Wed Oct 10 16:49:43 2018] epoch_id: 11, batch_id: 2600, cost: 0.009950, acc: 1.000000 -[Wed Oct 10 16:49:46 2018] epoch_id: 11, batch_id: 2700, cost: 0.015869, acc: 1.000000 -[Wed Oct 10 16:49:48 2018] epoch_id: 11, batch_id: 2800, cost: 0.049429, acc: 0.984375 -[Wed Oct 10 16:49:50 2018] epoch_id: 11, batch_id: 2900, cost: 0.061248, acc: 0.976562 -[Wed Oct 10 16:49:52 2018] epoch_id: 11, batch_id: 3000, cost: 0.007281, acc: 1.000000 - -[Wed Oct 10 16:49:53 2018] epoch_id: 11, train_avg_cost: 0.049100, train_avg_acc: 0.982414 -[Wed Oct 10 16:49:54 2018] epoch_id: 11, dev_cost: 0.919803, accuracy: 0.8392 -[Wed Oct 10 16:49:55 2018] epoch_id: 11, test_cost: 0.963836, accuracy: 0.8354 - -[Wed Oct 10 16:50:03 2018] epoch_id: 12, batch_id: 0, cost: 0.021594, acc: 0.992188 -[Wed Oct 10 16:50:05 2018] epoch_id: 12, batch_id: 100, cost: 0.003167, acc: 1.000000 -[Wed Oct 10 16:50:08 2018] epoch_id: 12, batch_id: 200, cost: 0.034331, acc: 0.984375 -[Wed Oct 10 16:50:10 2018] epoch_id: 12, batch_id: 300, cost: 0.044300, acc: 0.984375 -[Wed Oct 10 16:50:12 2018] epoch_id: 12, batch_id: 400, cost: 0.010300, acc: 1.000000 -[Wed Oct 10 16:50:15 2018] epoch_id: 12, batch_id: 500, cost: 0.071121, acc: 0.968750 -[Wed Oct 10 16:50:17 2018] epoch_id: 12, batch_id: 600, cost: 0.027463, acc: 0.984375 -[Wed Oct 10 16:50:19 2018] epoch_id: 12, batch_id: 700, cost: 0.023278, acc: 0.992188 -[Wed Oct 10 16:50:22 2018] epoch_id: 12, batch_id: 800, cost: 0.024731, acc: 0.992188 -[Wed Oct 10 16:50:25 2018] epoch_id: 12, batch_id: 900, cost: 0.033520, acc: 0.992188 -[Wed Oct 10 16:50:27 2018] epoch_id: 12, batch_id: 1000, cost: 0.066168, acc: 0.984375 -[Wed Oct 10 16:50:29 2018] epoch_id: 12, batch_id: 1100, cost: 0.086032, acc: 0.976562 -[Wed Oct 10 16:50:32 2018] epoch_id: 12, batch_id: 1200, cost: 0.041718, acc: 0.968750 -[Wed Oct 10 16:50:34 2018] epoch_id: 12, batch_id: 1300, cost: 0.085903, acc: 0.968750 -[Wed Oct 10 16:50:36 2018] epoch_id: 12, batch_id: 1400, cost: 0.022963, acc: 0.992188 -[Wed Oct 10 16:50:38 2018] epoch_id: 12, batch_id: 1500, cost: 0.008185, acc: 1.000000 -[Wed Oct 10 16:50:41 2018] epoch_id: 12, batch_id: 1600, cost: 0.057872, acc: 0.968750 -[Wed Oct 10 16:50:43 2018] epoch_id: 12, batch_id: 1700, cost: 0.011306, acc: 1.000000 -[Wed Oct 10 16:50:45 2018] epoch_id: 12, batch_id: 1800, cost: 0.030697, acc: 0.984375 -[Wed Oct 10 16:50:47 2018] epoch_id: 12, batch_id: 1900, cost: 0.049713, acc: 0.984375 -[Wed Oct 10 16:50:50 2018] epoch_id: 12, batch_id: 2000, cost: 0.050341, acc: 0.976562 -[Wed Oct 10 16:50:52 2018] epoch_id: 12, batch_id: 2100, cost: 0.024994, acc: 0.992188 -[Wed Oct 10 16:50:54 2018] epoch_id: 12, batch_id: 2200, cost: 0.046852, acc: 0.968750 -[Wed Oct 10 16:50:56 2018] epoch_id: 12, batch_id: 2300, cost: 0.055520, acc: 0.976562 -[Wed Oct 10 16:50:59 2018] epoch_id: 12, batch_id: 2400, cost: 0.085991, acc: 0.968750 -[Wed Oct 10 16:51:01 2018] epoch_id: 12, batch_id: 2500, cost: 0.044263, acc: 0.984375 -[Wed Oct 10 16:51:03 2018] epoch_id: 12, batch_id: 2600, cost: 0.071548, acc: 0.976562 -[Wed Oct 10 16:51:05 2018] epoch_id: 12, batch_id: 2700, cost: 0.039594, acc: 0.976562 -[Wed Oct 10 16:51:08 2018] epoch_id: 12, batch_id: 2800, cost: 0.058939, acc: 0.984375 -[Wed Oct 10 16:51:10 2018] epoch_id: 12, batch_id: 2900, cost: 0.070956, acc: 0.968750 -[Wed Oct 10 16:51:12 2018] epoch_id: 12, batch_id: 3000, cost: 0.059941, acc: 0.960938 - -[Wed Oct 10 16:51:13 2018] epoch_id: 12, train_avg_cost: 0.044984, train_avg_acc: 0.983741 -[Wed Oct 10 16:51:14 2018] epoch_id: 12, dev_cost: 0.742705, accuracy: 0.8364 -[Wed Oct 10 16:51:14 2018] epoch_id: 12, test_cost: 0.765290, accuracy: 0.8355 - -[Wed Oct 10 16:51:23 2018] epoch_id: 13, batch_id: 0, cost: 0.054822, acc: 0.968750 -[Wed Oct 10 16:51:25 2018] epoch_id: 13, batch_id: 100, cost: 0.066483, acc: 0.976562 -[Wed Oct 10 16:51:28 2018] epoch_id: 13, batch_id: 200, cost: 0.007064, acc: 1.000000 -[Wed Oct 10 16:51:30 2018] epoch_id: 13, batch_id: 300, cost: 0.050190, acc: 0.984375 -[Wed Oct 10 16:51:32 2018] epoch_id: 13, batch_id: 400, cost: 0.044636, acc: 0.984375 -[Wed Oct 10 16:51:34 2018] epoch_id: 13, batch_id: 500, cost: 0.040963, acc: 0.984375 -[Wed Oct 10 16:51:37 2018] epoch_id: 13, batch_id: 600, cost: 0.029529, acc: 0.992188 -[Wed Oct 10 16:51:39 2018] epoch_id: 13, batch_id: 700, cost: 0.011587, acc: 1.000000 -[Wed Oct 10 16:51:41 2018] epoch_id: 13, batch_id: 800, cost: 0.039673, acc: 0.984375 -[Wed Oct 10 16:51:43 2018] epoch_id: 13, batch_id: 900, cost: 0.028793, acc: 0.984375 -[Wed Oct 10 16:51:46 2018] epoch_id: 13, batch_id: 1000, cost: 0.055973, acc: 0.968750 -[Wed Oct 10 16:51:48 2018] epoch_id: 13, batch_id: 1100, cost: 0.016087, acc: 0.992188 -[Wed Oct 10 16:51:50 2018] epoch_id: 13, batch_id: 1200, cost: 0.096423, acc: 0.960938 -[Wed Oct 10 16:51:52 2018] epoch_id: 13, batch_id: 1300, cost: 0.019652, acc: 0.992188 -[Wed Oct 10 16:51:55 2018] epoch_id: 13, batch_id: 1400, cost: 0.018604, acc: 0.992188 -[Wed Oct 10 16:51:57 2018] epoch_id: 13, batch_id: 1500, cost: 0.060169, acc: 0.960938 -[Wed Oct 10 16:51:59 2018] epoch_id: 13, batch_id: 1600, cost: 0.014124, acc: 0.992188 -[Wed Oct 10 16:52:01 2018] epoch_id: 13, batch_id: 1700, cost: 0.029843, acc: 0.984375 -[Wed Oct 10 16:52:05 2018] epoch_id: 13, batch_id: 1800, cost: 0.063125, acc: 0.976562 -[Wed Oct 10 16:52:07 2018] epoch_id: 13, batch_id: 1900, cost: 0.070910, acc: 0.953125 -[Wed Oct 10 16:52:09 2018] epoch_id: 13, batch_id: 2000, cost: 0.042864, acc: 0.984375 -[Wed Oct 10 16:52:11 2018] epoch_id: 13, batch_id: 2100, cost: 0.014658, acc: 0.992188 -[Wed Oct 10 16:52:14 2018] epoch_id: 13, batch_id: 2200, cost: 0.075003, acc: 0.968750 -[Wed Oct 10 16:52:16 2018] epoch_id: 13, batch_id: 2300, cost: 0.034856, acc: 0.976562 -[Wed Oct 10 16:52:18 2018] epoch_id: 13, batch_id: 2400, cost: 0.040518, acc: 0.976562 -[Wed Oct 10 16:52:20 2018] epoch_id: 13, batch_id: 2500, cost: 0.040826, acc: 0.976562 -[Wed Oct 10 16:52:23 2018] epoch_id: 13, batch_id: 2600, cost: 0.043420, acc: 0.968750 -[Wed Oct 10 16:52:25 2018] epoch_id: 13, batch_id: 2700, cost: 0.027364, acc: 0.984375 -[Wed Oct 10 16:52:27 2018] epoch_id: 13, batch_id: 2800, cost: 0.030051, acc: 0.984375 -[Wed Oct 10 16:52:30 2018] epoch_id: 13, batch_id: 2900, cost: 0.040024, acc: 0.984375 -[Wed Oct 10 16:52:32 2018] epoch_id: 13, batch_id: 3000, cost: 0.054583, acc: 0.968750 - -[Wed Oct 10 16:52:32 2018] epoch_id: 13, train_avg_cost: 0.041237, train_avg_acc: 0.985349 -[Wed Oct 10 16:52:33 2018] epoch_id: 13, dev_cost: 1.078762, accuracy: 0.8411 -[Wed Oct 10 16:52:34 2018] epoch_id: 13, test_cost: 1.111191, accuracy: 0.8358 - -[Wed Oct 10 16:52:43 2018] epoch_id: 14, batch_id: 0, cost: 0.003011, acc: 1.000000 -[Wed Oct 10 16:52:45 2018] epoch_id: 14, batch_id: 100, cost: 0.006236, acc: 1.000000 -[Wed Oct 10 16:52:48 2018] epoch_id: 14, batch_id: 200, cost: 0.017501, acc: 0.992188 -[Wed Oct 10 16:52:50 2018] epoch_id: 14, batch_id: 300, cost: 0.062686, acc: 0.976562 -[Wed Oct 10 16:52:52 2018] epoch_id: 14, batch_id: 400, cost: 0.008696, acc: 1.000000 -[Wed Oct 10 16:52:54 2018] epoch_id: 14, batch_id: 500, cost: 0.033238, acc: 0.984375 -[Wed Oct 10 16:52:57 2018] epoch_id: 14, batch_id: 600, cost: 0.086478, acc: 0.976562 -[Wed Oct 10 16:52:59 2018] epoch_id: 14, batch_id: 700, cost: 0.009820, acc: 0.992188 -[Wed Oct 10 16:53:01 2018] epoch_id: 14, batch_id: 800, cost: 0.066287, acc: 0.992188 -[Wed Oct 10 16:53:03 2018] epoch_id: 14, batch_id: 900, cost: 0.004043, acc: 1.000000 -[Wed Oct 10 16:53:05 2018] epoch_id: 14, batch_id: 1000, cost: 0.007859, acc: 1.000000 -[Wed Oct 10 16:53:08 2018] epoch_id: 14, batch_id: 1100, cost: 0.040856, acc: 0.976562 -[Wed Oct 10 16:53:10 2018] epoch_id: 14, batch_id: 1200, cost: 0.038995, acc: 0.984375 -[Wed Oct 10 16:53:12 2018] epoch_id: 14, batch_id: 1300, cost: 0.026738, acc: 0.992188 -[Wed Oct 10 16:53:14 2018] epoch_id: 14, batch_id: 1400, cost: 0.048141, acc: 0.968750 -[Wed Oct 10 16:53:16 2018] epoch_id: 14, batch_id: 1500, cost: 0.081051, acc: 0.976562 -[Wed Oct 10 16:53:19 2018] epoch_id: 14, batch_id: 1600, cost: 0.017602, acc: 0.992188 -[Wed Oct 10 16:53:21 2018] epoch_id: 14, batch_id: 1700, cost: 0.018175, acc: 0.992188 -[Wed Oct 10 16:53:23 2018] epoch_id: 14, batch_id: 1800, cost: 0.076890, acc: 0.968750 -[Wed Oct 10 16:53:25 2018] epoch_id: 14, batch_id: 1900, cost: 0.060768, acc: 0.976562 -[Wed Oct 10 16:53:28 2018] epoch_id: 14, batch_id: 2000, cost: 0.020131, acc: 0.984375 -[Wed Oct 10 16:53:30 2018] epoch_id: 14, batch_id: 2100, cost: 0.077612, acc: 0.976562 -[Wed Oct 10 16:53:32 2018] epoch_id: 14, batch_id: 2200, cost: 0.101997, acc: 0.960938 -[Wed Oct 10 16:53:34 2018] epoch_id: 14, batch_id: 2300, cost: 0.061213, acc: 0.976562 -[Wed Oct 10 16:53:37 2018] epoch_id: 14, batch_id: 2400, cost: 0.048987, acc: 0.976562 -[Wed Oct 10 16:53:39 2018] epoch_id: 14, batch_id: 2500, cost: 0.037741, acc: 0.984375 -[Wed Oct 10 16:53:41 2018] epoch_id: 14, batch_id: 2600, cost: 0.011101, acc: 1.000000 -[Wed Oct 10 16:53:43 2018] epoch_id: 14, batch_id: 2700, cost: 0.019846, acc: 0.992188 -[Wed Oct 10 16:53:45 2018] epoch_id: 14, batch_id: 2800, cost: 0.026633, acc: 1.000000 -[Wed Oct 10 16:53:48 2018] epoch_id: 14, batch_id: 2900, cost: 0.048637, acc: 0.976562 -[Wed Oct 10 16:53:50 2018] epoch_id: 14, batch_id: 3000, cost: 0.056658, acc: 0.992188 - -[Wed Oct 10 16:53:50 2018] epoch_id: 14, train_avg_cost: 0.037520, train_avg_acc: 0.986595 -[Wed Oct 10 16:53:51 2018] epoch_id: 14, dev_cost: 0.958707, accuracy: 0.8367 -[Wed Oct 10 16:53:52 2018] epoch_id: 14, test_cost: 0.974553, accuracy: 0.8382 - -[Wed Oct 10 16:54:01 2018] epoch_id: 15, batch_id: 0, cost: 0.015232, acc: 1.000000 -[Wed Oct 10 16:54:04 2018] epoch_id: 15, batch_id: 100, cost: 0.007195, acc: 1.000000 -[Wed Oct 10 16:54:06 2018] epoch_id: 15, batch_id: 200, cost: 0.017140, acc: 0.992188 -[Wed Oct 10 16:54:08 2018] epoch_id: 15, batch_id: 300, cost: 0.003196, acc: 1.000000 -[Wed Oct 10 16:54:10 2018] epoch_id: 15, batch_id: 400, cost: 0.046839, acc: 0.976562 -[Wed Oct 10 16:54:13 2018] epoch_id: 15, batch_id: 500, cost: 0.038533, acc: 0.992188 -[Wed Oct 10 16:54:15 2018] epoch_id: 15, batch_id: 600, cost: 0.016502, acc: 0.992188 -[Wed Oct 10 16:54:17 2018] epoch_id: 15, batch_id: 700, cost: 0.041825, acc: 0.976562 -[Wed Oct 10 16:54:20 2018] epoch_id: 15, batch_id: 800, cost: 0.083583, acc: 0.968750 -[Wed Oct 10 16:54:22 2018] epoch_id: 15, batch_id: 900, cost: 0.013552, acc: 0.992188 -[Wed Oct 10 16:54:24 2018] epoch_id: 15, batch_id: 1000, cost: 0.015114, acc: 1.000000 -[Wed Oct 10 16:54:26 2018] epoch_id: 15, batch_id: 1100, cost: 0.020185, acc: 0.992188 -[Wed Oct 10 16:54:29 2018] epoch_id: 15, batch_id: 1200, cost: 0.023274, acc: 0.984375 -[Wed Oct 10 16:54:31 2018] epoch_id: 15, batch_id: 1300, cost: 0.013836, acc: 1.000000 -[Wed Oct 10 16:54:33 2018] epoch_id: 15, batch_id: 1400, cost: 0.091024, acc: 0.984375 -[Wed Oct 10 16:54:36 2018] epoch_id: 15, batch_id: 1500, cost: 0.047340, acc: 0.976562 -[Wed Oct 10 16:54:38 2018] epoch_id: 15, batch_id: 1600, cost: 0.030423, acc: 0.992188 -[Wed Oct 10 16:54:40 2018] epoch_id: 15, batch_id: 1700, cost: 0.014750, acc: 0.992188 -[Wed Oct 10 16:54:42 2018] epoch_id: 15, batch_id: 1800, cost: 0.090613, acc: 0.968750 -[Wed Oct 10 16:54:45 2018] epoch_id: 15, batch_id: 1900, cost: 0.030791, acc: 0.984375 -[Wed Oct 10 16:54:47 2018] epoch_id: 15, batch_id: 2000, cost: 0.046719, acc: 0.976562 -[Wed Oct 10 16:54:49 2018] epoch_id: 15, batch_id: 2100, cost: 0.043871, acc: 0.984375 -[Wed Oct 10 16:54:51 2018] epoch_id: 15, batch_id: 2200, cost: 0.078455, acc: 0.968750 -[Wed Oct 10 16:54:53 2018] epoch_id: 15, batch_id: 2300, cost: 0.029536, acc: 0.976562 -[Wed Oct 10 16:54:56 2018] epoch_id: 15, batch_id: 2400, cost: 0.028696, acc: 0.984375 -[Wed Oct 10 16:54:58 2018] epoch_id: 15, batch_id: 2500, cost: 0.007129, acc: 0.992188 -[Wed Oct 10 16:55:00 2018] epoch_id: 15, batch_id: 2600, cost: 0.049990, acc: 0.976562 -[Wed Oct 10 16:55:03 2018] epoch_id: 15, batch_id: 2700, cost: 0.040309, acc: 0.984375 -[Wed Oct 10 16:55:06 2018] epoch_id: 15, batch_id: 2800, cost: 0.098748, acc: 0.976562 -[Wed Oct 10 16:55:08 2018] epoch_id: 15, batch_id: 2900, cost: 0.005371, acc: 1.000000 -[Wed Oct 10 16:55:10 2018] epoch_id: 15, batch_id: 3000, cost: 0.060264, acc: 0.960938 - -[Wed Oct 10 16:55:11 2018] epoch_id: 15, train_avg_cost: 0.034637, train_avg_acc: 0.987582 -[Wed Oct 10 16:55:12 2018] epoch_id: 15, dev_cost: 0.858216, accuracy: 0.8365 -[Wed Oct 10 16:55:13 2018] epoch_id: 15, test_cost: 0.874420, accuracy: 0.8411 - -[Wed Oct 10 16:55:21 2018] epoch_id: 16, batch_id: 0, cost: 0.013283, acc: 1.000000 -[Wed Oct 10 16:55:23 2018] epoch_id: 16, batch_id: 100, cost: 0.038128, acc: 0.984375 -[Wed Oct 10 16:55:25 2018] epoch_id: 16, batch_id: 200, cost: 0.031110, acc: 0.976562 -[Wed Oct 10 16:55:28 2018] epoch_id: 16, batch_id: 300, cost: 0.005346, acc: 1.000000 -[Wed Oct 10 16:55:30 2018] epoch_id: 16, batch_id: 400, cost: 0.027634, acc: 0.984375 -[Wed Oct 10 16:55:32 2018] epoch_id: 16, batch_id: 500, cost: 0.065929, acc: 0.976562 -[Wed Oct 10 16:55:35 2018] epoch_id: 16, batch_id: 600, cost: 0.012638, acc: 0.992188 -[Wed Oct 10 16:55:37 2018] epoch_id: 16, batch_id: 700, cost: 0.057962, acc: 0.984375 -[Wed Oct 10 16:55:39 2018] epoch_id: 16, batch_id: 800, cost: 0.064390, acc: 0.976562 -[Wed Oct 10 16:55:42 2018] epoch_id: 16, batch_id: 900, cost: 0.018866, acc: 0.992188 -[Wed Oct 10 16:55:44 2018] epoch_id: 16, batch_id: 1000, cost: 0.004791, acc: 1.000000 -[Wed Oct 10 16:55:46 2018] epoch_id: 16, batch_id: 1100, cost: 0.012691, acc: 0.992188 -[Wed Oct 10 16:55:49 2018] epoch_id: 16, batch_id: 1200, cost: 0.033199, acc: 0.992188 -[Wed Oct 10 16:55:51 2018] epoch_id: 16, batch_id: 1300, cost: 0.007757, acc: 1.000000 -[Wed Oct 10 16:55:53 2018] epoch_id: 16, batch_id: 1400, cost: 0.016653, acc: 0.992188 -[Wed Oct 10 16:55:55 2018] epoch_id: 16, batch_id: 1500, cost: 0.034653, acc: 0.968750 -[Wed Oct 10 16:55:58 2018] epoch_id: 16, batch_id: 1600, cost: 0.051049, acc: 0.976562 -[Wed Oct 10 16:56:00 2018] epoch_id: 16, batch_id: 1700, cost: 0.001466, acc: 1.000000 -[Wed Oct 10 16:56:02 2018] epoch_id: 16, batch_id: 1800, cost: 0.035508, acc: 0.992188 -[Wed Oct 10 16:56:05 2018] epoch_id: 16, batch_id: 1900, cost: 0.022919, acc: 0.984375 -[Wed Oct 10 16:56:07 2018] epoch_id: 16, batch_id: 2000, cost: 0.102175, acc: 0.976562 -[Wed Oct 10 16:56:09 2018] epoch_id: 16, batch_id: 2100, cost: 0.012663, acc: 1.000000 -[Wed Oct 10 16:56:11 2018] epoch_id: 16, batch_id: 2200, cost: 0.026142, acc: 0.984375 -[Wed Oct 10 16:56:15 2018] epoch_id: 16, batch_id: 2300, cost: 0.007566, acc: 1.000000 -[Wed Oct 10 16:56:17 2018] epoch_id: 16, batch_id: 2400, cost: 0.043235, acc: 0.976562 -[Wed Oct 10 16:56:20 2018] epoch_id: 16, batch_id: 2500, cost: 0.039383, acc: 0.984375 -[Wed Oct 10 16:56:22 2018] epoch_id: 16, batch_id: 2600, cost: 0.009917, acc: 1.000000 -[Wed Oct 10 16:56:24 2018] epoch_id: 16, batch_id: 2700, cost: 0.036917, acc: 0.984375 -[Wed Oct 10 16:56:26 2018] epoch_id: 16, batch_id: 2800, cost: 0.012813, acc: 1.000000 -[Wed Oct 10 16:56:29 2018] epoch_id: 16, batch_id: 2900, cost: 0.033933, acc: 0.984375 -[Wed Oct 10 16:56:31 2018] epoch_id: 16, batch_id: 3000, cost: 0.007463, acc: 1.000000 - -[Wed Oct 10 16:56:32 2018] epoch_id: 16, train_avg_cost: 0.031971, train_avg_acc: 0.988555 -[Wed Oct 10 16:56:33 2018] epoch_id: 16, dev_cost: 0.955907, accuracy: 0.8389 -[Wed Oct 10 16:56:34 2018] epoch_id: 16, test_cost: 0.953062, accuracy: 0.8389 - -[Wed Oct 10 16:56:42 2018] epoch_id: 17, batch_id: 0, cost: 0.031323, acc: 0.992188 -[Wed Oct 10 16:56:44 2018] epoch_id: 17, batch_id: 100, cost: 0.010965, acc: 1.000000 -[Wed Oct 10 16:56:46 2018] epoch_id: 17, batch_id: 200, cost: 0.056771, acc: 0.976562 -[Wed Oct 10 16:56:49 2018] epoch_id: 17, batch_id: 300, cost: 0.026509, acc: 0.992188 -[Wed Oct 10 16:56:51 2018] epoch_id: 17, batch_id: 400, cost: 0.039409, acc: 0.992188 -[Wed Oct 10 16:56:53 2018] epoch_id: 17, batch_id: 500, cost: 0.063554, acc: 0.976562 -[Wed Oct 10 16:56:55 2018] epoch_id: 17, batch_id: 600, cost: 0.035896, acc: 0.976562 -[Wed Oct 10 16:56:58 2018] epoch_id: 17, batch_id: 700, cost: 0.022053, acc: 0.992188 -[Wed Oct 10 16:57:00 2018] epoch_id: 17, batch_id: 800, cost: 0.024150, acc: 0.976562 -[Wed Oct 10 16:57:03 2018] epoch_id: 17, batch_id: 900, cost: 0.009064, acc: 0.992188 -[Wed Oct 10 16:57:05 2018] epoch_id: 17, batch_id: 1000, cost: 0.037311, acc: 0.976562 -[Wed Oct 10 16:57:08 2018] epoch_id: 17, batch_id: 1100, cost: 0.036577, acc: 0.976562 -[Wed Oct 10 16:57:10 2018] epoch_id: 17, batch_id: 1200, cost: 0.020783, acc: 0.992188 -[Wed Oct 10 16:57:12 2018] epoch_id: 17, batch_id: 1300, cost: 0.017610, acc: 0.992188 -[Wed Oct 10 16:57:14 2018] epoch_id: 17, batch_id: 1400, cost: 0.027604, acc: 0.976562 -[Wed Oct 10 16:57:17 2018] epoch_id: 17, batch_id: 1500, cost: 0.040730, acc: 0.992188 -[Wed Oct 10 16:57:19 2018] epoch_id: 17, batch_id: 1600, cost: 0.077946, acc: 0.984375 -[Wed Oct 10 16:57:21 2018] epoch_id: 17, batch_id: 1700, cost: 0.021349, acc: 0.984375 -[Wed Oct 10 16:57:24 2018] epoch_id: 17, batch_id: 1800, cost: 0.016132, acc: 0.992188 -[Wed Oct 10 16:57:26 2018] epoch_id: 17, batch_id: 1900, cost: 0.018797, acc: 0.984375 -[Wed Oct 10 16:57:28 2018] epoch_id: 17, batch_id: 2000, cost: 0.009052, acc: 1.000000 -[Wed Oct 10 16:57:30 2018] epoch_id: 17, batch_id: 2100, cost: 0.028399, acc: 0.992188 -[Wed Oct 10 16:57:33 2018] epoch_id: 17, batch_id: 2200, cost: 0.009593, acc: 1.000000 -[Wed Oct 10 16:57:35 2018] epoch_id: 17, batch_id: 2300, cost: 0.018474, acc: 0.992188 -[Wed Oct 10 16:57:37 2018] epoch_id: 17, batch_id: 2400, cost: 0.007873, acc: 1.000000 -[Wed Oct 10 16:57:40 2018] epoch_id: 17, batch_id: 2500, cost: 0.054923, acc: 0.976562 -[Wed Oct 10 16:57:42 2018] epoch_id: 17, batch_id: 2600, cost: 0.019036, acc: 0.992188 -[Wed Oct 10 16:57:44 2018] epoch_id: 17, batch_id: 2700, cost: 0.017081, acc: 1.000000 -[Wed Oct 10 16:57:46 2018] epoch_id: 17, batch_id: 2800, cost: 0.045522, acc: 0.976562 -[Wed Oct 10 16:57:49 2018] epoch_id: 17, batch_id: 2900, cost: 0.034922, acc: 0.984375 -[Wed Oct 10 16:57:51 2018] epoch_id: 17, batch_id: 3000, cost: 0.039566, acc: 0.984375 - -[Wed Oct 10 16:57:51 2018] epoch_id: 17, train_avg_cost: 0.030061, train_avg_acc: 0.989478 -[Wed Oct 10 16:57:52 2018] epoch_id: 17, dev_cost: 1.184997, accuracy: 0.8406 -[Wed Oct 10 16:57:53 2018] epoch_id: 17, test_cost: 1.175792, accuracy: 0.8372 - -[Wed Oct 10 16:58:02 2018] epoch_id: 18, batch_id: 0, cost: 0.015059, acc: 0.992188 -[Wed Oct 10 16:58:04 2018] epoch_id: 18, batch_id: 100, cost: 0.023421, acc: 0.992188 -[Wed Oct 10 16:58:06 2018] epoch_id: 18, batch_id: 200, cost: 0.007234, acc: 1.000000 -[Wed Oct 10 16:58:08 2018] epoch_id: 18, batch_id: 300, cost: 0.007139, acc: 1.000000 -[Wed Oct 10 16:58:10 2018] epoch_id: 18, batch_id: 400, cost: 0.007934, acc: 1.000000 -[Wed Oct 10 16:58:13 2018] epoch_id: 18, batch_id: 500, cost: 0.004312, acc: 1.000000 -[Wed Oct 10 16:58:15 2018] epoch_id: 18, batch_id: 600, cost: 0.001806, acc: 1.000000 -[Wed Oct 10 16:58:17 2018] epoch_id: 18, batch_id: 700, cost: 0.004790, acc: 1.000000 -[Wed Oct 10 16:58:20 2018] epoch_id: 18, batch_id: 800, cost: 0.048477, acc: 0.992188 -[Wed Oct 10 16:58:22 2018] epoch_id: 18, batch_id: 900, cost: 0.066390, acc: 0.992188 -[Wed Oct 10 16:58:24 2018] epoch_id: 18, batch_id: 1000, cost: 0.014440, acc: 0.992188 -[Wed Oct 10 16:58:26 2018] epoch_id: 18, batch_id: 1100, cost: 0.020435, acc: 0.992188 -[Wed Oct 10 16:58:29 2018] epoch_id: 18, batch_id: 1200, cost: 0.007474, acc: 0.992188 -[Wed Oct 10 16:58:31 2018] epoch_id: 18, batch_id: 1300, cost: 0.036209, acc: 0.984375 -[Wed Oct 10 16:58:33 2018] epoch_id: 18, batch_id: 1400, cost: 0.026540, acc: 0.984375 -[Wed Oct 10 16:58:35 2018] epoch_id: 18, batch_id: 1500, cost: 0.019448, acc: 0.992188 -[Wed Oct 10 16:58:38 2018] epoch_id: 18, batch_id: 1600, cost: 0.052421, acc: 0.968750 -[Wed Oct 10 16:58:40 2018] epoch_id: 18, batch_id: 1700, cost: 0.022365, acc: 0.992188 -[Wed Oct 10 16:58:42 2018] epoch_id: 18, batch_id: 1800, cost: 0.135754, acc: 0.984375 -[Wed Oct 10 16:58:45 2018] epoch_id: 18, batch_id: 1900, cost: 0.037197, acc: 0.992188 -[Wed Oct 10 16:58:48 2018] epoch_id: 18, batch_id: 2000, cost: 0.010672, acc: 0.992188 -[Wed Oct 10 16:58:50 2018] epoch_id: 18, batch_id: 2100, cost: 0.012909, acc: 1.000000 -[Wed Oct 10 16:58:52 2018] epoch_id: 18, batch_id: 2200, cost: 0.061615, acc: 0.976562 -[Wed Oct 10 16:58:55 2018] epoch_id: 18, batch_id: 2300, cost: 0.081252, acc: 0.960938 -[Wed Oct 10 16:58:57 2018] epoch_id: 18, batch_id: 2400, cost: 0.009792, acc: 1.000000 -[Wed Oct 10 16:58:59 2018] epoch_id: 18, batch_id: 2500, cost: 0.039835, acc: 0.984375 -[Wed Oct 10 16:59:02 2018] epoch_id: 18, batch_id: 2600, cost: 0.002643, acc: 1.000000 -[Wed Oct 10 16:59:04 2018] epoch_id: 18, batch_id: 2700, cost: 0.017633, acc: 0.992188 -[Wed Oct 10 16:59:06 2018] epoch_id: 18, batch_id: 2800, cost: 0.050407, acc: 0.976562 -[Wed Oct 10 16:59:08 2018] epoch_id: 18, batch_id: 2900, cost: 0.066672, acc: 0.960938 -[Wed Oct 10 16:59:11 2018] epoch_id: 18, batch_id: 3000, cost: 0.023438, acc: 0.984375 - -[Wed Oct 10 16:59:11 2018] epoch_id: 18, train_avg_cost: 0.028777, train_avg_acc: 0.989884 -[Wed Oct 10 16:59:12 2018] epoch_id: 18, dev_cost: 1.191979, accuracy: 0.8346 -[Wed Oct 10 16:59:13 2018] epoch_id: 18, test_cost: 1.159855, accuracy: 0.8344 - -[Wed Oct 10 16:59:22 2018] epoch_id: 19, batch_id: 0, cost: 0.023233, acc: 0.992188 -[Wed Oct 10 16:59:24 2018] epoch_id: 19, batch_id: 100, cost: 0.006624, acc: 1.000000 -[Wed Oct 10 16:59:26 2018] epoch_id: 19, batch_id: 200, cost: 0.018784, acc: 0.992188 -[Wed Oct 10 16:59:28 2018] epoch_id: 19, batch_id: 300, cost: 0.012745, acc: 0.992188 -[Wed Oct 10 16:59:31 2018] epoch_id: 19, batch_id: 400, cost: 0.010857, acc: 1.000000 -[Wed Oct 10 16:59:33 2018] epoch_id: 19, batch_id: 500, cost: 0.006066, acc: 1.000000 -[Wed Oct 10 16:59:35 2018] epoch_id: 19, batch_id: 600, cost: 0.014349, acc: 0.992188 -[Wed Oct 10 16:59:38 2018] epoch_id: 19, batch_id: 700, cost: 0.016725, acc: 0.992188 -[Wed Oct 10 16:59:40 2018] epoch_id: 19, batch_id: 800, cost: 0.069121, acc: 0.984375 -[Wed Oct 10 16:59:42 2018] epoch_id: 19, batch_id: 900, cost: 0.018849, acc: 0.984375 -[Wed Oct 10 16:59:44 2018] epoch_id: 19, batch_id: 1000, cost: 0.031679, acc: 0.984375 -[Wed Oct 10 16:59:47 2018] epoch_id: 19, batch_id: 1100, cost: 0.010815, acc: 0.992188 -[Wed Oct 10 16:59:49 2018] epoch_id: 19, batch_id: 1200, cost: 0.015778, acc: 0.992188 -[Wed Oct 10 16:59:51 2018] epoch_id: 19, batch_id: 1300, cost: 0.055160, acc: 0.984375 -[Wed Oct 10 16:59:53 2018] epoch_id: 19, batch_id: 1400, cost: 0.009311, acc: 0.992188 -[Wed Oct 10 16:59:55 2018] epoch_id: 19, batch_id: 1500, cost: 0.014874, acc: 0.992188 -[Wed Oct 10 16:59:58 2018] epoch_id: 19, batch_id: 1600, cost: 0.038188, acc: 0.992188 -[Wed Oct 10 17:00:00 2018] epoch_id: 19, batch_id: 1700, cost: 0.001565, acc: 1.000000 -[Wed Oct 10 17:00:02 2018] epoch_id: 19, batch_id: 1800, cost: 0.013963, acc: 0.992188 -[Wed Oct 10 17:00:04 2018] epoch_id: 19, batch_id: 1900, cost: 0.028362, acc: 0.992188 -[Wed Oct 10 17:00:06 2018] epoch_id: 19, batch_id: 2000, cost: 0.006552, acc: 1.000000 -[Wed Oct 10 17:00:09 2018] epoch_id: 19, batch_id: 2100, cost: 0.045230, acc: 0.992188 -[Wed Oct 10 17:00:11 2018] epoch_id: 19, batch_id: 2200, cost: 0.029525, acc: 0.984375 -[Wed Oct 10 17:00:13 2018] epoch_id: 19, batch_id: 2300, cost: 0.009774, acc: 0.992188 -[Wed Oct 10 17:00:15 2018] epoch_id: 19, batch_id: 2400, cost: 0.003385, acc: 1.000000 -[Wed Oct 10 17:00:18 2018] epoch_id: 19, batch_id: 2500, cost: 0.030629, acc: 0.984375 -[Wed Oct 10 17:00:20 2018] epoch_id: 19, batch_id: 2600, cost: 0.039615, acc: 0.992188 -[Wed Oct 10 17:00:22 2018] epoch_id: 19, batch_id: 2700, cost: 0.016678, acc: 0.992188 -[Wed Oct 10 17:00:24 2018] epoch_id: 19, batch_id: 2800, cost: 0.004723, acc: 1.000000 -[Wed Oct 10 17:00:26 2018] epoch_id: 19, batch_id: 2900, cost: 0.018062, acc: 0.992188 -[Wed Oct 10 17:00:29 2018] epoch_id: 19, batch_id: 3000, cost: 0.032904, acc: 0.984375 - -[Wed Oct 10 17:00:29 2018] epoch_id: 19, train_avg_cost: 0.026175, train_avg_acc: 0.991055 -[Wed Oct 10 17:00:30 2018] epoch_id: 19, dev_cost: 1.013367, accuracy: 0.8388 -[Wed Oct 10 17:00:31 2018] epoch_id: 19, test_cost: 1.016906, accuracy: 0.8335 - -[Wed Oct 10 17:00:40 2018] epoch_id: 20, batch_id: 0, cost: 0.019038, acc: 0.992188 -[Wed Oct 10 17:00:42 2018] epoch_id: 20, batch_id: 100, cost: 0.001216, acc: 1.000000 -[Wed Oct 10 17:00:44 2018] epoch_id: 20, batch_id: 200, cost: 0.006635, acc: 1.000000 -[Wed Oct 10 17:00:47 2018] epoch_id: 20, batch_id: 300, cost: 0.051503, acc: 0.984375 -[Wed Oct 10 17:00:49 2018] epoch_id: 20, batch_id: 400, cost: 0.044815, acc: 0.992188 -[Wed Oct 10 17:00:51 2018] epoch_id: 20, batch_id: 500, cost: 0.041529, acc: 0.992188 -[Wed Oct 10 17:00:53 2018] epoch_id: 20, batch_id: 600, cost: 0.010035, acc: 1.000000 -[Wed Oct 10 17:00:56 2018] epoch_id: 20, batch_id: 700, cost: 0.019799, acc: 0.992188 -[Wed Oct 10 17:00:58 2018] epoch_id: 20, batch_id: 800, cost: 0.062296, acc: 0.984375 -[Wed Oct 10 17:01:00 2018] epoch_id: 20, batch_id: 900, cost: 0.015680, acc: 0.992188 -[Wed Oct 10 17:01:03 2018] epoch_id: 20, batch_id: 1000, cost: 0.051963, acc: 0.984375 -[Wed Oct 10 17:01:05 2018] epoch_id: 20, batch_id: 1100, cost: 0.023968, acc: 0.984375 -[Wed Oct 10 17:01:07 2018] epoch_id: 20, batch_id: 1200, cost: 0.079527, acc: 0.984375 -[Wed Oct 10 17:01:09 2018] epoch_id: 20, batch_id: 1300, cost: 0.039612, acc: 0.992188 -[Wed Oct 10 17:01:12 2018] epoch_id: 20, batch_id: 1400, cost: 0.010211, acc: 1.000000 -[Wed Oct 10 17:01:14 2018] epoch_id: 20, batch_id: 1500, cost: 0.012661, acc: 0.992188 -[Wed Oct 10 17:01:16 2018] epoch_id: 20, batch_id: 1600, cost: 0.051475, acc: 0.984375 -[Wed Oct 10 17:01:18 2018] epoch_id: 20, batch_id: 1700, cost: 0.013513, acc: 1.000000 -[Wed Oct 10 17:01:21 2018] epoch_id: 20, batch_id: 1800, cost: 0.006646, acc: 1.000000 -[Wed Oct 10 17:01:23 2018] epoch_id: 20, batch_id: 1900, cost: 0.013369, acc: 0.992188 -[Wed Oct 10 17:01:25 2018] epoch_id: 20, batch_id: 2000, cost: 0.030614, acc: 0.984375 -[Wed Oct 10 17:01:27 2018] epoch_id: 20, batch_id: 2100, cost: 0.003242, acc: 1.000000 -[Wed Oct 10 17:01:30 2018] epoch_id: 20, batch_id: 2200, cost: 0.051409, acc: 0.984375 -[Wed Oct 10 17:01:32 2018] epoch_id: 20, batch_id: 2300, cost: 0.005996, acc: 1.000000 -[Wed Oct 10 17:01:34 2018] epoch_id: 20, batch_id: 2400, cost: 0.049493, acc: 0.976562 -[Wed Oct 10 17:01:36 2018] epoch_id: 20, batch_id: 2500, cost: 0.013635, acc: 0.992188 -[Wed Oct 10 17:01:38 2018] epoch_id: 20, batch_id: 2600, cost: 0.019265, acc: 1.000000 -[Wed Oct 10 17:01:41 2018] epoch_id: 20, batch_id: 2700, cost: 0.040467, acc: 0.976562 -[Wed Oct 10 17:01:44 2018] epoch_id: 20, batch_id: 2800, cost: 0.029407, acc: 0.992188 -[Wed Oct 10 17:01:46 2018] epoch_id: 20, batch_id: 2900, cost: 0.036886, acc: 0.976562 -[Wed Oct 10 17:01:49 2018] epoch_id: 20, batch_id: 3000, cost: 0.018317, acc: 0.992188 - -[Wed Oct 10 17:01:49 2018] epoch_id: 20, train_avg_cost: 0.025258, train_avg_acc: 0.991367 -[Wed Oct 10 17:01:50 2018] epoch_id: 20, dev_cost: 1.125290, accuracy: 0.8358 -[Wed Oct 10 17:01:51 2018] epoch_id: 20, test_cost: 1.148761, accuracy: 0.832 - -[Wed Oct 10 17:01:59 2018] epoch_id: 21, batch_id: 0, cost: 0.020581, acc: 0.992188 -[Wed Oct 10 17:02:02 2018] epoch_id: 21, batch_id: 100, cost: 0.021132, acc: 0.992188 -[Wed Oct 10 17:02:04 2018] epoch_id: 21, batch_id: 200, cost: 0.040257, acc: 0.976562 -[Wed Oct 10 17:02:06 2018] epoch_id: 21, batch_id: 300, cost: 0.013450, acc: 1.000000 -[Wed Oct 10 17:02:08 2018] epoch_id: 21, batch_id: 400, cost: 0.027469, acc: 0.992188 -[Wed Oct 10 17:02:11 2018] epoch_id: 21, batch_id: 500, cost: 0.007088, acc: 0.992188 -[Wed Oct 10 17:02:13 2018] epoch_id: 21, batch_id: 600, cost: 0.028169, acc: 0.992188 -[Wed Oct 10 17:02:15 2018] epoch_id: 21, batch_id: 700, cost: 0.067799, acc: 0.984375 -[Wed Oct 10 17:02:17 2018] epoch_id: 21, batch_id: 800, cost: 0.003184, acc: 1.000000 -[Wed Oct 10 17:02:20 2018] epoch_id: 21, batch_id: 900, cost: 0.011056, acc: 0.992188 -[Wed Oct 10 17:02:22 2018] epoch_id: 21, batch_id: 1000, cost: 0.012187, acc: 1.000000 -[Wed Oct 10 17:02:24 2018] epoch_id: 21, batch_id: 1100, cost: 0.009409, acc: 0.992188 -[Wed Oct 10 17:02:26 2018] epoch_id: 21, batch_id: 1200, cost: 0.000739, acc: 1.000000 -[Wed Oct 10 17:02:29 2018] epoch_id: 21, batch_id: 1300, cost: 0.002971, acc: 1.000000 -[Wed Oct 10 17:02:31 2018] epoch_id: 21, batch_id: 1400, cost: 0.031287, acc: 0.984375 -[Wed Oct 10 17:02:33 2018] epoch_id: 21, batch_id: 1500, cost: 0.023455, acc: 0.992188 -[Wed Oct 10 17:02:36 2018] epoch_id: 21, batch_id: 1600, cost: 0.007438, acc: 1.000000 -[Wed Oct 10 17:02:38 2018] epoch_id: 21, batch_id: 1700, cost: 0.035499, acc: 0.968750 -[Wed Oct 10 17:02:40 2018] epoch_id: 21, batch_id: 1800, cost: 0.012515, acc: 1.000000 -[Wed Oct 10 17:02:42 2018] epoch_id: 21, batch_id: 1900, cost: 0.008550, acc: 1.000000 -[Wed Oct 10 17:02:45 2018] epoch_id: 21, batch_id: 2000, cost: 0.051551, acc: 0.992188 -[Wed Oct 10 17:02:47 2018] epoch_id: 21, batch_id: 2100, cost: 0.004980, acc: 1.000000 -[Wed Oct 10 17:02:49 2018] epoch_id: 21, batch_id: 2200, cost: 0.006854, acc: 1.000000 -[Wed Oct 10 17:02:51 2018] epoch_id: 21, batch_id: 2300, cost: 0.071025, acc: 0.968750 -[Wed Oct 10 17:02:55 2018] epoch_id: 21, batch_id: 2400, cost: 0.013599, acc: 1.000000 -[Wed Oct 10 17:02:57 2018] epoch_id: 21, batch_id: 2500, cost: 0.025085, acc: 0.992188 -[Wed Oct 10 17:02:59 2018] epoch_id: 21, batch_id: 2600, cost: 0.018276, acc: 0.984375 -[Wed Oct 10 17:03:01 2018] epoch_id: 21, batch_id: 2700, cost: 0.040565, acc: 0.984375 -[Wed Oct 10 17:03:04 2018] epoch_id: 21, batch_id: 2800, cost: 0.099454, acc: 0.968750 -[Wed Oct 10 17:03:06 2018] epoch_id: 21, batch_id: 2900, cost: 0.017812, acc: 0.992188 -[Wed Oct 10 17:03:08 2018] epoch_id: 21, batch_id: 3000, cost: 0.019825, acc: 0.992188 - -[Wed Oct 10 17:03:09 2018] epoch_id: 21, train_avg_cost: 0.024180, train_avg_acc: 0.991505 -[Wed Oct 10 17:03:10 2018] epoch_id: 21, dev_cost: 1.413867, accuracy: 0.836 -[Wed Oct 10 17:03:11 2018] epoch_id: 21, test_cost: 1.380237, accuracy: 0.8353 - -[Wed Oct 10 17:03:19 2018] epoch_id: 22, batch_id: 0, cost: 0.001493, acc: 1.000000 -[Wed Oct 10 17:03:21 2018] epoch_id: 22, batch_id: 100, cost: 0.017211, acc: 0.984375 -[Wed Oct 10 17:03:23 2018] epoch_id: 22, batch_id: 200, cost: 0.015626, acc: 0.992188 -[Wed Oct 10 17:03:25 2018] epoch_id: 22, batch_id: 300, cost: 0.002411, acc: 1.000000 -[Wed Oct 10 17:03:28 2018] epoch_id: 22, batch_id: 400, cost: 0.098118, acc: 0.984375 -[Wed Oct 10 17:03:30 2018] epoch_id: 22, batch_id: 500, cost: 0.031192, acc: 0.992188 -[Wed Oct 10 17:03:32 2018] epoch_id: 22, batch_id: 600, cost: 0.002122, acc: 1.000000 -[Wed Oct 10 17:03:34 2018] epoch_id: 22, batch_id: 700, cost: 0.006148, acc: 1.000000 -[Wed Oct 10 17:03:38 2018] epoch_id: 22, batch_id: 800, cost: 0.007830, acc: 1.000000 -[Wed Oct 10 17:03:40 2018] epoch_id: 22, batch_id: 900, cost: 0.009371, acc: 1.000000 -[Wed Oct 10 17:03:43 2018] epoch_id: 22, batch_id: 1000, cost: 0.024280, acc: 0.984375 -[Wed Oct 10 17:03:45 2018] epoch_id: 22, batch_id: 1100, cost: 0.067847, acc: 0.984375 -[Wed Oct 10 17:03:47 2018] epoch_id: 22, batch_id: 1200, cost: 0.024875, acc: 0.984375 -[Wed Oct 10 17:03:50 2018] epoch_id: 22, batch_id: 1300, cost: 0.004252, acc: 1.000000 -[Wed Oct 10 17:03:52 2018] epoch_id: 22, batch_id: 1400, cost: 0.014934, acc: 0.992188 -[Wed Oct 10 17:03:54 2018] epoch_id: 22, batch_id: 1500, cost: 0.008299, acc: 1.000000 -[Wed Oct 10 17:03:56 2018] epoch_id: 22, batch_id: 1600, cost: 0.007932, acc: 1.000000 -[Wed Oct 10 17:03:59 2018] epoch_id: 22, batch_id: 1700, cost: 0.007008, acc: 1.000000 -[Wed Oct 10 17:04:01 2018] epoch_id: 22, batch_id: 1800, cost: 0.028636, acc: 0.984375 -[Wed Oct 10 17:04:03 2018] epoch_id: 22, batch_id: 1900, cost: 0.012712, acc: 0.992188 -[Wed Oct 10 17:04:05 2018] epoch_id: 22, batch_id: 2000, cost: 0.027561, acc: 0.992188 -[Wed Oct 10 17:04:08 2018] epoch_id: 22, batch_id: 2100, cost: 0.017589, acc: 0.992188 -[Wed Oct 10 17:04:10 2018] epoch_id: 22, batch_id: 2200, cost: 0.016391, acc: 0.992188 -[Wed Oct 10 17:04:12 2018] epoch_id: 22, batch_id: 2300, cost: 0.042172, acc: 0.984375 -[Wed Oct 10 17:04:14 2018] epoch_id: 22, batch_id: 2400, cost: 0.024060, acc: 0.984375 -[Wed Oct 10 17:04:17 2018] epoch_id: 22, batch_id: 2500, cost: 0.014206, acc: 1.000000 -[Wed Oct 10 17:04:19 2018] epoch_id: 22, batch_id: 2600, cost: 0.028562, acc: 0.992188 -[Wed Oct 10 17:04:21 2018] epoch_id: 22, batch_id: 2700, cost: 0.013936, acc: 0.992188 -[Wed Oct 10 17:04:23 2018] epoch_id: 22, batch_id: 2800, cost: 0.023205, acc: 0.984375 -[Wed Oct 10 17:04:26 2018] epoch_id: 22, batch_id: 2900, cost: 0.031024, acc: 0.984375 -[Wed Oct 10 17:04:28 2018] epoch_id: 22, batch_id: 3000, cost: 0.004115, acc: 1.000000 - -[Wed Oct 10 17:04:29 2018] epoch_id: 22, train_avg_cost: 0.022458, train_avg_acc: 0.992184 -[Wed Oct 10 17:04:30 2018] epoch_id: 22, dev_cost: 1.388674, accuracy: 0.8329 -[Wed Oct 10 17:04:31 2018] epoch_id: 22, test_cost: 1.366122, accuracy: 0.8359 - -[Wed Oct 10 17:04:39 2018] epoch_id: 23, batch_id: 0, cost: 0.012273, acc: 0.992188 -[Wed Oct 10 17:04:41 2018] epoch_id: 23, batch_id: 100, cost: 0.010904, acc: 0.992188 -[Wed Oct 10 17:04:44 2018] epoch_id: 23, batch_id: 200, cost: 0.001967, acc: 1.000000 -[Wed Oct 10 17:04:46 2018] epoch_id: 23, batch_id: 300, cost: 0.006554, acc: 1.000000 -[Wed Oct 10 17:04:48 2018] epoch_id: 23, batch_id: 400, cost: 0.005179, acc: 1.000000 -[Wed Oct 10 17:04:50 2018] epoch_id: 23, batch_id: 500, cost: 0.014761, acc: 0.992188 -[Wed Oct 10 17:04:53 2018] epoch_id: 23, batch_id: 600, cost: 0.015971, acc: 0.992188 -[Wed Oct 10 17:04:55 2018] epoch_id: 23, batch_id: 700, cost: 0.058416, acc: 0.984375 -[Wed Oct 10 17:04:57 2018] epoch_id: 23, batch_id: 800, cost: 0.005064, acc: 1.000000 -[Wed Oct 10 17:04:59 2018] epoch_id: 23, batch_id: 900, cost: 0.003761, acc: 1.000000 -[Wed Oct 10 17:05:02 2018] epoch_id: 23, batch_id: 1000, cost: 0.002844, acc: 1.000000 -[Wed Oct 10 17:05:04 2018] epoch_id: 23, batch_id: 1100, cost: 0.010259, acc: 1.000000 -[Wed Oct 10 17:05:06 2018] epoch_id: 23, batch_id: 1200, cost: 0.005445, acc: 1.000000 -[Wed Oct 10 17:05:09 2018] epoch_id: 23, batch_id: 1300, cost: 0.018197, acc: 0.992188 -[Wed Oct 10 17:05:11 2018] epoch_id: 23, batch_id: 1400, cost: 0.016600, acc: 0.992188 -[Wed Oct 10 17:05:13 2018] epoch_id: 23, batch_id: 1500, cost: 0.047691, acc: 0.992188 -[Wed Oct 10 17:05:15 2018] epoch_id: 23, batch_id: 1600, cost: 0.084442, acc: 0.984375 -[Wed Oct 10 17:05:18 2018] epoch_id: 23, batch_id: 1700, cost: 0.044283, acc: 0.992188 -[Wed Oct 10 17:05:21 2018] epoch_id: 23, batch_id: 1800, cost: 0.120200, acc: 0.984375 -[Wed Oct 10 17:05:23 2018] epoch_id: 23, batch_id: 1900, cost: 0.013874, acc: 0.992188 -[Wed Oct 10 17:05:26 2018] epoch_id: 23, batch_id: 2000, cost: 0.027709, acc: 0.984375 -[Wed Oct 10 17:05:28 2018] epoch_id: 23, batch_id: 2100, cost: 0.017088, acc: 0.992188 -[Wed Oct 10 17:05:30 2018] epoch_id: 23, batch_id: 2200, cost: 0.049081, acc: 0.976562 -[Wed Oct 10 17:05:32 2018] epoch_id: 23, batch_id: 2300, cost: 0.013016, acc: 0.992188 -[Wed Oct 10 17:05:35 2018] epoch_id: 23, batch_id: 2400, cost: 0.015467, acc: 0.992188 -[Wed Oct 10 17:05:37 2018] epoch_id: 23, batch_id: 2500, cost: 0.002745, acc: 1.000000 -[Wed Oct 10 17:05:39 2018] epoch_id: 23, batch_id: 2600, cost: 0.002618, acc: 1.000000 -[Wed Oct 10 17:05:42 2018] epoch_id: 23, batch_id: 2700, cost: 0.010789, acc: 1.000000 -[Wed Oct 10 17:05:44 2018] epoch_id: 23, batch_id: 2800, cost: 0.026513, acc: 0.984375 -[Wed Oct 10 17:05:46 2018] epoch_id: 23, batch_id: 2900, cost: 0.056513, acc: 0.984375 -[Wed Oct 10 17:05:49 2018] epoch_id: 23, batch_id: 3000, cost: 0.007607, acc: 1.000000 - -[Wed Oct 10 17:05:49 2018] epoch_id: 23, train_avg_cost: 0.021786, train_avg_acc: 0.992707 -[Wed Oct 10 17:05:50 2018] epoch_id: 23, dev_cost: 1.181561, accuracy: 0.8368 -[Wed Oct 10 17:05:51 2018] epoch_id: 23, test_cost: 1.209735, accuracy: 0.8339 - -[Wed Oct 10 17:06:00 2018] epoch_id: 24, batch_id: 0, cost: 0.005431, acc: 1.000000 -[Wed Oct 10 17:06:02 2018] epoch_id: 24, batch_id: 100, cost: 0.017588, acc: 0.984375 -[Wed Oct 10 17:06:04 2018] epoch_id: 24, batch_id: 200, cost: 0.078571, acc: 0.976562 -[Wed Oct 10 17:06:06 2018] epoch_id: 24, batch_id: 300, cost: 0.003192, acc: 1.000000 -[Wed Oct 10 17:06:09 2018] epoch_id: 24, batch_id: 400, cost: 0.008610, acc: 1.000000 -[Wed Oct 10 17:06:11 2018] epoch_id: 24, batch_id: 500, cost: 0.010603, acc: 0.992188 -[Wed Oct 10 17:06:13 2018] epoch_id: 24, batch_id: 600, cost: 0.068159, acc: 0.984375 -[Wed Oct 10 17:06:15 2018] epoch_id: 24, batch_id: 700, cost: 0.031611, acc: 0.992188 -[Wed Oct 10 17:06:18 2018] epoch_id: 24, batch_id: 800, cost: 0.005276, acc: 1.000000 -[Wed Oct 10 17:06:20 2018] epoch_id: 24, batch_id: 900, cost: 0.019978, acc: 0.992188 -[Wed Oct 10 17:06:22 2018] epoch_id: 24, batch_id: 1000, cost: 0.061957, acc: 0.992188 -[Wed Oct 10 17:06:25 2018] epoch_id: 24, batch_id: 1100, cost: 0.015165, acc: 0.992188 -[Wed Oct 10 17:06:27 2018] epoch_id: 24, batch_id: 1200, cost: 0.052448, acc: 0.976562 -[Wed Oct 10 17:06:29 2018] epoch_id: 24, batch_id: 1300, cost: 0.003287, acc: 1.000000 -[Wed Oct 10 17:06:31 2018] epoch_id: 24, batch_id: 1400, cost: 0.027564, acc: 0.992188 -[Wed Oct 10 17:06:34 2018] epoch_id: 24, batch_id: 1500, cost: 0.002861, acc: 1.000000 -[Wed Oct 10 17:06:36 2018] epoch_id: 24, batch_id: 1600, cost: 0.022500, acc: 0.992188 -[Wed Oct 10 17:06:38 2018] epoch_id: 24, batch_id: 1700, cost: 0.041690, acc: 0.984375 -[Wed Oct 10 17:06:40 2018] epoch_id: 24, batch_id: 1800, cost: 0.016889, acc: 0.992188 -[Wed Oct 10 17:06:43 2018] epoch_id: 24, batch_id: 1900, cost: 0.026357, acc: 0.992188 -[Wed Oct 10 17:06:45 2018] epoch_id: 24, batch_id: 2000, cost: 0.035357, acc: 0.984375 -[Wed Oct 10 17:06:47 2018] epoch_id: 24, batch_id: 2100, cost: 0.070517, acc: 0.960938 -[Wed Oct 10 17:06:49 2018] epoch_id: 24, batch_id: 2200, cost: 0.021093, acc: 0.984375 -[Wed Oct 10 17:06:52 2018] epoch_id: 24, batch_id: 2300, cost: 0.003296, acc: 1.000000 -[Wed Oct 10 17:06:54 2018] epoch_id: 24, batch_id: 2400, cost: 0.002669, acc: 1.000000 -[Wed Oct 10 17:06:56 2018] epoch_id: 24, batch_id: 2500, cost: 0.047008, acc: 0.976562 -[Wed Oct 10 17:06:58 2018] epoch_id: 24, batch_id: 2600, cost: 0.015561, acc: 0.992188 -[Wed Oct 10 17:07:00 2018] epoch_id: 24, batch_id: 2700, cost: 0.074711, acc: 0.984375 -[Wed Oct 10 17:07:03 2018] epoch_id: 24, batch_id: 2800, cost: 0.021376, acc: 0.992188 -[Wed Oct 10 17:07:05 2018] epoch_id: 24, batch_id: 2900, cost: 0.013928, acc: 1.000000 -[Wed Oct 10 17:07:07 2018] epoch_id: 24, batch_id: 3000, cost: 0.019474, acc: 0.992188 - -[Wed Oct 10 17:07:07 2018] epoch_id: 24, train_avg_cost: 0.020611, train_avg_acc: 0.992913 -[Wed Oct 10 17:07:08 2018] epoch_id: 24, dev_cost: 1.249092, accuracy: 0.8329 -[Wed Oct 10 17:07:09 2018] epoch_id: 24, test_cost: 1.206091, accuracy: 0.8348 - -[Wed Oct 10 17:07:18 2018] epoch_id: 25, batch_id: 0, cost: 0.009832, acc: 1.000000 -[Wed Oct 10 17:07:21 2018] epoch_id: 25, batch_id: 100, cost: 0.007028, acc: 1.000000 -[Wed Oct 10 17:07:23 2018] epoch_id: 25, batch_id: 200, cost: 0.029548, acc: 0.984375 -[Wed Oct 10 17:07:25 2018] epoch_id: 25, batch_id: 300, cost: 0.001753, acc: 1.000000 -[Wed Oct 10 17:07:28 2018] epoch_id: 25, batch_id: 400, cost: 0.001457, acc: 1.000000 -[Wed Oct 10 17:07:30 2018] epoch_id: 25, batch_id: 500, cost: 0.004209, acc: 1.000000 -[Wed Oct 10 17:07:32 2018] epoch_id: 25, batch_id: 600, cost: 0.002758, acc: 1.000000 -[Wed Oct 10 17:07:35 2018] epoch_id: 25, batch_id: 700, cost: 0.039204, acc: 0.984375 -[Wed Oct 10 17:07:37 2018] epoch_id: 25, batch_id: 800, cost: 0.004454, acc: 1.000000 -[Wed Oct 10 17:07:39 2018] epoch_id: 25, batch_id: 900, cost: 0.005273, acc: 1.000000 -[Wed Oct 10 17:07:41 2018] epoch_id: 25, batch_id: 1000, cost: 0.008021, acc: 0.992188 -[Wed Oct 10 17:07:44 2018] epoch_id: 25, batch_id: 1100, cost: 0.037441, acc: 0.976562 -[Wed Oct 10 17:07:46 2018] epoch_id: 25, batch_id: 1200, cost: 0.011153, acc: 1.000000 -[Wed Oct 10 17:07:48 2018] epoch_id: 25, batch_id: 1300, cost: 0.064342, acc: 0.992188 -[Wed Oct 10 17:07:50 2018] epoch_id: 25, batch_id: 1400, cost: 0.036600, acc: 0.992188 -[Wed Oct 10 17:07:53 2018] epoch_id: 25, batch_id: 1500, cost: 0.046661, acc: 0.992188 -[Wed Oct 10 17:07:55 2018] epoch_id: 25, batch_id: 1600, cost: 0.015580, acc: 1.000000 -[Wed Oct 10 17:07:57 2018] epoch_id: 25, batch_id: 1700, cost: 0.008311, acc: 1.000000 -[Wed Oct 10 17:07:59 2018] epoch_id: 25, batch_id: 1800, cost: 0.004560, acc: 1.000000 -[Wed Oct 10 17:08:02 2018] epoch_id: 25, batch_id: 1900, cost: 0.012200, acc: 1.000000 -[Wed Oct 10 17:08:04 2018] epoch_id: 25, batch_id: 2000, cost: 0.006555, acc: 1.000000 -[Wed Oct 10 17:08:06 2018] epoch_id: 25, batch_id: 2100, cost: 0.028259, acc: 0.992188 -[Wed Oct 10 17:08:08 2018] epoch_id: 25, batch_id: 2200, cost: 0.003801, acc: 1.000000 -[Wed Oct 10 17:08:11 2018] epoch_id: 25, batch_id: 2300, cost: 0.004532, acc: 1.000000 -[Wed Oct 10 17:08:13 2018] epoch_id: 25, batch_id: 2400, cost: 0.008551, acc: 1.000000 -[Wed Oct 10 17:08:15 2018] epoch_id: 25, batch_id: 2500, cost: 0.013781, acc: 0.992188 -[Wed Oct 10 17:08:17 2018] epoch_id: 25, batch_id: 2600, cost: 0.024098, acc: 0.992188 -[Wed Oct 10 17:08:21 2018] epoch_id: 25, batch_id: 2700, cost: 0.009117, acc: 0.992188 -[Wed Oct 10 17:08:23 2018] epoch_id: 25, batch_id: 2800, cost: 0.032231, acc: 0.984375 -[Wed Oct 10 17:08:25 2018] epoch_id: 25, batch_id: 2900, cost: 0.004502, acc: 1.000000 -[Wed Oct 10 17:08:28 2018] epoch_id: 25, batch_id: 3000, cost: 0.006727, acc: 1.000000 - -[Wed Oct 10 17:08:28 2018] epoch_id: 25, train_avg_cost: 0.020529, train_avg_acc: 0.993019 -[Wed Oct 10 17:08:29 2018] epoch_id: 25, dev_cost: 1.238637, accuracy: 0.8323 -[Wed Oct 10 17:08:30 2018] epoch_id: 25, test_cost: 1.213099, accuracy: 0.8345 - -[Wed Oct 10 17:08:38 2018] epoch_id: 26, batch_id: 0, cost: 0.040923, acc: 0.992188 -[Wed Oct 10 17:08:40 2018] epoch_id: 26, batch_id: 100, cost: 0.003892, acc: 1.000000 -[Wed Oct 10 17:08:43 2018] epoch_id: 26, batch_id: 200, cost: 0.005719, acc: 1.000000 -[Wed Oct 10 17:08:45 2018] epoch_id: 26, batch_id: 300, cost: 0.011791, acc: 1.000000 -[Wed Oct 10 17:08:47 2018] epoch_id: 26, batch_id: 400, cost: 0.015297, acc: 0.992188 -[Wed Oct 10 17:08:49 2018] epoch_id: 26, batch_id: 500, cost: 0.067796, acc: 0.984375 -[Wed Oct 10 17:08:52 2018] epoch_id: 26, batch_id: 600, cost: 0.041215, acc: 0.992188 -[Wed Oct 10 17:08:54 2018] epoch_id: 26, batch_id: 700, cost: 0.017786, acc: 0.984375 -[Wed Oct 10 17:08:56 2018] epoch_id: 26, batch_id: 800, cost: 0.033173, acc: 0.992188 -[Wed Oct 10 17:08:59 2018] epoch_id: 26, batch_id: 900, cost: 0.007282, acc: 0.992188 -[Wed Oct 10 17:09:01 2018] epoch_id: 26, batch_id: 1000, cost: 0.028577, acc: 0.992188 -[Wed Oct 10 17:09:03 2018] epoch_id: 26, batch_id: 1100, cost: 0.017994, acc: 0.992188 -[Wed Oct 10 17:09:05 2018] epoch_id: 26, batch_id: 1200, cost: 0.005319, acc: 1.000000 -[Wed Oct 10 17:09:08 2018] epoch_id: 26, batch_id: 1300, cost: 0.030209, acc: 0.992188 -[Wed Oct 10 17:09:10 2018] epoch_id: 26, batch_id: 1400, cost: 0.012992, acc: 0.992188 -[Wed Oct 10 17:09:12 2018] epoch_id: 26, batch_id: 1500, cost: 0.014228, acc: 0.992188 -[Wed Oct 10 17:09:15 2018] epoch_id: 26, batch_id: 1600, cost: 0.008148, acc: 1.000000 -[Wed Oct 10 17:09:17 2018] epoch_id: 26, batch_id: 1700, cost: 0.003299, acc: 1.000000 -[Wed Oct 10 17:09:19 2018] epoch_id: 26, batch_id: 1800, cost: 0.026134, acc: 0.992188 -[Wed Oct 10 17:09:22 2018] epoch_id: 26, batch_id: 1900, cost: 0.016610, acc: 1.000000 -[Wed Oct 10 17:09:24 2018] epoch_id: 26, batch_id: 2000, cost: 0.019105, acc: 0.992188 -[Wed Oct 10 17:09:26 2018] epoch_id: 26, batch_id: 2100, cost: 0.004593, acc: 1.000000 -[Wed Oct 10 17:09:28 2018] epoch_id: 26, batch_id: 2200, cost: 0.036595, acc: 0.992188 -[Wed Oct 10 17:09:32 2018] epoch_id: 26, batch_id: 2300, cost: 0.003857, acc: 1.000000 -[Wed Oct 10 17:09:34 2018] epoch_id: 26, batch_id: 2400, cost: 0.002700, acc: 1.000000 -[Wed Oct 10 17:09:36 2018] epoch_id: 26, batch_id: 2500, cost: 0.002269, acc: 1.000000 -[Wed Oct 10 17:09:38 2018] epoch_id: 26, batch_id: 2600, cost: 0.022186, acc: 0.992188 -[Wed Oct 10 17:09:41 2018] epoch_id: 26, batch_id: 2700, cost: 0.035991, acc: 0.976562 -[Wed Oct 10 17:09:43 2018] epoch_id: 26, batch_id: 2800, cost: 0.005430, acc: 1.000000 -[Wed Oct 10 17:09:45 2018] epoch_id: 26, batch_id: 2900, cost: 0.017578, acc: 0.992188 -[Wed Oct 10 17:09:47 2018] epoch_id: 26, batch_id: 3000, cost: 0.030596, acc: 0.984375 - -[Wed Oct 10 17:09:48 2018] epoch_id: 26, train_avg_cost: 0.019528, train_avg_acc: 0.993425 -[Wed Oct 10 17:09:49 2018] epoch_id: 26, dev_cost: 1.452644, accuracy: 0.8334 -[Wed Oct 10 17:09:50 2018] epoch_id: 26, test_cost: 1.449995, accuracy: 0.8329 - -[Wed Oct 10 17:09:58 2018] epoch_id: 27, batch_id: 0, cost: 0.006640, acc: 1.000000 -[Wed Oct 10 17:10:00 2018] epoch_id: 27, batch_id: 100, cost: 0.001101, acc: 1.000000 -[Wed Oct 10 17:10:02 2018] epoch_id: 27, batch_id: 200, cost: 0.019329, acc: 0.992188 -[Wed Oct 10 17:10:05 2018] epoch_id: 27, batch_id: 300, cost: 0.002996, acc: 1.000000 -[Wed Oct 10 17:10:07 2018] epoch_id: 27, batch_id: 400, cost: 0.002077, acc: 1.000000 -[Wed Oct 10 17:10:09 2018] epoch_id: 27, batch_id: 500, cost: 0.007058, acc: 1.000000 -[Wed Oct 10 17:10:11 2018] epoch_id: 27, batch_id: 600, cost: 0.002119, acc: 1.000000 -[Wed Oct 10 17:10:14 2018] epoch_id: 27, batch_id: 700, cost: 0.039876, acc: 0.984375 -[Wed Oct 10 17:10:16 2018] epoch_id: 27, batch_id: 800, cost: 0.010680, acc: 1.000000 -[Wed Oct 10 17:10:19 2018] epoch_id: 27, batch_id: 900, cost: 0.004508, acc: 1.000000 -[Wed Oct 10 17:10:21 2018] epoch_id: 27, batch_id: 1000, cost: 0.029683, acc: 0.984375 -[Wed Oct 10 17:10:24 2018] epoch_id: 27, batch_id: 1100, cost: 0.011985, acc: 1.000000 -[Wed Oct 10 17:10:26 2018] epoch_id: 27, batch_id: 1200, cost: 0.004091, acc: 1.000000 -[Wed Oct 10 17:10:28 2018] epoch_id: 27, batch_id: 1300, cost: 0.028585, acc: 0.984375 -[Wed Oct 10 17:10:30 2018] epoch_id: 27, batch_id: 1400, cost: 0.001462, acc: 1.000000 -[Wed Oct 10 17:10:33 2018] epoch_id: 27, batch_id: 1500, cost: 0.033079, acc: 0.992188 -[Wed Oct 10 17:10:35 2018] epoch_id: 27, batch_id: 1600, cost: 0.017679, acc: 0.992188 -[Wed Oct 10 17:10:37 2018] epoch_id: 27, batch_id: 1700, cost: 0.000921, acc: 1.000000 -[Wed Oct 10 17:10:39 2018] epoch_id: 27, batch_id: 1800, cost: 0.029850, acc: 0.984375 -[Wed Oct 10 17:10:42 2018] epoch_id: 27, batch_id: 1900, cost: 0.005679, acc: 1.000000 -[Wed Oct 10 17:10:44 2018] epoch_id: 27, batch_id: 2000, cost: 0.007635, acc: 0.992188 -[Wed Oct 10 17:10:46 2018] epoch_id: 27, batch_id: 2100, cost: 0.056935, acc: 0.984375 -[Wed Oct 10 17:10:48 2018] epoch_id: 27, batch_id: 2200, cost: 0.014361, acc: 1.000000 -[Wed Oct 10 17:10:51 2018] epoch_id: 27, batch_id: 2300, cost: 0.040282, acc: 0.984375 -[Wed Oct 10 17:10:53 2018] epoch_id: 27, batch_id: 2400, cost: 0.004073, acc: 1.000000 -[Wed Oct 10 17:10:55 2018] epoch_id: 27, batch_id: 2500, cost: 0.013922, acc: 0.984375 -[Wed Oct 10 17:10:57 2018] epoch_id: 27, batch_id: 2600, cost: 0.018309, acc: 0.992188 -[Wed Oct 10 17:10:59 2018] epoch_id: 27, batch_id: 2700, cost: 0.011584, acc: 0.992188 -[Wed Oct 10 17:11:02 2018] epoch_id: 27, batch_id: 2800, cost: 0.018637, acc: 0.992188 -[Wed Oct 10 17:11:04 2018] epoch_id: 27, batch_id: 2900, cost: 0.013617, acc: 0.992188 -[Wed Oct 10 17:11:06 2018] epoch_id: 27, batch_id: 3000, cost: 0.079333, acc: 0.976562 - -[Wed Oct 10 17:11:07 2018] epoch_id: 27, train_avg_cost: 0.018039, train_avg_acc: 0.993701 -[Wed Oct 10 17:11:08 2018] epoch_id: 27, dev_cost: 1.463991, accuracy: 0.8333 -[Wed Oct 10 17:11:09 2018] epoch_id: 27, test_cost: 1.450415, accuracy: 0.8334 - -[Wed Oct 10 17:11:17 2018] epoch_id: 28, batch_id: 0, cost: 0.023539, acc: 0.984375 -[Wed Oct 10 17:11:20 2018] epoch_id: 28, batch_id: 100, cost: 0.005577, acc: 1.000000 -[Wed Oct 10 17:11:22 2018] epoch_id: 28, batch_id: 200, cost: 0.001478, acc: 1.000000 -[Wed Oct 10 17:11:24 2018] epoch_id: 28, batch_id: 300, cost: 0.005870, acc: 1.000000 -[Wed Oct 10 17:11:26 2018] epoch_id: 28, batch_id: 400, cost: 0.021292, acc: 0.992188 -[Wed Oct 10 17:11:29 2018] epoch_id: 28, batch_id: 500, cost: 0.032081, acc: 0.984375 -[Wed Oct 10 17:11:31 2018] epoch_id: 28, batch_id: 600, cost: 0.004568, acc: 1.000000 -[Wed Oct 10 17:11:33 2018] epoch_id: 28, batch_id: 700, cost: 0.006552, acc: 1.000000 -[Wed Oct 10 17:11:35 2018] epoch_id: 28, batch_id: 800, cost: 0.012579, acc: 0.992188 -[Wed Oct 10 17:11:38 2018] epoch_id: 28, batch_id: 900, cost: 0.004214, acc: 1.000000 -[Wed Oct 10 17:11:40 2018] epoch_id: 28, batch_id: 1000, cost: 0.023843, acc: 0.984375 -[Wed Oct 10 17:11:42 2018] epoch_id: 28, batch_id: 1100, cost: 0.017869, acc: 0.992188 -[Wed Oct 10 17:11:44 2018] epoch_id: 28, batch_id: 1200, cost: 0.045617, acc: 0.984375 -[Wed Oct 10 17:11:46 2018] epoch_id: 28, batch_id: 1300, cost: 0.012739, acc: 0.992188 -[Wed Oct 10 17:11:49 2018] epoch_id: 28, batch_id: 1400, cost: 0.020053, acc: 0.992188 -[Wed Oct 10 17:11:51 2018] epoch_id: 28, batch_id: 1500, cost: 0.006956, acc: 1.000000 -[Wed Oct 10 17:11:53 2018] epoch_id: 28, batch_id: 1600, cost: 0.022830, acc: 0.984375 -[Wed Oct 10 17:11:55 2018] epoch_id: 28, batch_id: 1700, cost: 0.008924, acc: 1.000000 -[Wed Oct 10 17:11:58 2018] epoch_id: 28, batch_id: 1800, cost: 0.013902, acc: 0.992188 -[Wed Oct 10 17:12:01 2018] epoch_id: 28, batch_id: 1900, cost: 0.026418, acc: 0.984375 -[Wed Oct 10 17:12:03 2018] epoch_id: 28, batch_id: 2000, cost: 0.006809, acc: 1.000000 -[Wed Oct 10 17:12:05 2018] epoch_id: 28, batch_id: 2100, cost: 0.041039, acc: 0.984375 -[Wed Oct 10 17:12:08 2018] epoch_id: 28, batch_id: 2200, cost: 0.023235, acc: 0.992188 -[Wed Oct 10 17:12:10 2018] epoch_id: 28, batch_id: 2300, cost: 0.057685, acc: 0.976562 -[Wed Oct 10 17:12:12 2018] epoch_id: 28, batch_id: 2400, cost: 0.012688, acc: 1.000000 -[Wed Oct 10 17:12:14 2018] epoch_id: 28, batch_id: 2500, cost: 0.010697, acc: 0.992188 -[Wed Oct 10 17:12:16 2018] epoch_id: 28, batch_id: 2600, cost: 0.025213, acc: 0.992188 -[Wed Oct 10 17:12:19 2018] epoch_id: 28, batch_id: 2700, cost: 0.011269, acc: 0.992188 -[Wed Oct 10 17:12:21 2018] epoch_id: 28, batch_id: 2800, cost: 0.001141, acc: 1.000000 -[Wed Oct 10 17:12:23 2018] epoch_id: 28, batch_id: 2900, cost: 0.049410, acc: 0.984375 -[Wed Oct 10 17:12:25 2018] epoch_id: 28, batch_id: 3000, cost: 0.019739, acc: 0.992188 - -[Wed Oct 10 17:12:26 2018] epoch_id: 28, train_avg_cost: 0.018105, train_avg_acc: 0.993756 -[Wed Oct 10 17:12:27 2018] epoch_id: 28, dev_cost: 1.200318, accuracy: 0.8345 -[Wed Oct 10 17:12:28 2018] epoch_id: 28, test_cost: 1.228304, accuracy: 0.8308 - -[Wed Oct 10 17:12:36 2018] epoch_id: 29, batch_id: 0, cost: 0.004694, acc: 1.000000 -[Wed Oct 10 17:12:39 2018] epoch_id: 29, batch_id: 100, cost: 0.008528, acc: 0.992188 -[Wed Oct 10 17:12:41 2018] epoch_id: 29, batch_id: 200, cost: 0.006778, acc: 0.992188 -[Wed Oct 10 17:12:43 2018] epoch_id: 29, batch_id: 300, cost: 0.026610, acc: 0.992188 -[Wed Oct 10 17:12:45 2018] epoch_id: 29, batch_id: 400, cost: 0.008479, acc: 1.000000 -[Wed Oct 10 17:12:47 2018] epoch_id: 29, batch_id: 500, cost: 0.021705, acc: 0.984375 -[Wed Oct 10 17:12:50 2018] epoch_id: 29, batch_id: 600, cost: 0.010583, acc: 0.992188 -[Wed Oct 10 17:12:52 2018] epoch_id: 29, batch_id: 700, cost: 0.056105, acc: 0.992188 -[Wed Oct 10 17:12:54 2018] epoch_id: 29, batch_id: 800, cost: 0.000675, acc: 1.000000 -[Wed Oct 10 17:12:56 2018] epoch_id: 29, batch_id: 900, cost: 0.011277, acc: 1.000000 -[Wed Oct 10 17:12:58 2018] epoch_id: 29, batch_id: 1000, cost: 0.006004, acc: 1.000000 -[Wed Oct 10 17:13:01 2018] epoch_id: 29, batch_id: 1100, cost: 0.000914, acc: 1.000000 -[Wed Oct 10 17:13:03 2018] epoch_id: 29, batch_id: 1200, cost: 0.001097, acc: 1.000000 -[Wed Oct 10 17:13:05 2018] epoch_id: 29, batch_id: 1300, cost: 0.002556, acc: 1.000000 -[Wed Oct 10 17:13:07 2018] epoch_id: 29, batch_id: 1400, cost: 0.005061, acc: 1.000000 -[Wed Oct 10 17:13:10 2018] epoch_id: 29, batch_id: 1500, cost: 0.002417, acc: 1.000000 -[Wed Oct 10 17:13:12 2018] epoch_id: 29, batch_id: 1600, cost: 0.001037, acc: 1.000000 -[Wed Oct 10 17:13:14 2018] epoch_id: 29, batch_id: 1700, cost: 0.003415, acc: 1.000000 -[Wed Oct 10 17:13:16 2018] epoch_id: 29, batch_id: 1800, cost: 0.033230, acc: 0.984375 -[Wed Oct 10 17:13:19 2018] epoch_id: 29, batch_id: 1900, cost: 0.002914, acc: 1.000000 -[Wed Oct 10 17:13:21 2018] epoch_id: 29, batch_id: 2000, cost: 0.036463, acc: 0.984375 -[Wed Oct 10 17:13:23 2018] epoch_id: 29, batch_id: 2100, cost: 0.067978, acc: 0.976562 -[Wed Oct 10 17:13:25 2018] epoch_id: 29, batch_id: 2200, cost: 0.028088, acc: 0.992188 -[Wed Oct 10 17:13:28 2018] epoch_id: 29, batch_id: 2300, cost: 0.013688, acc: 0.992188 -[Wed Oct 10 17:13:30 2018] epoch_id: 29, batch_id: 2400, cost: 0.000238, acc: 1.000000 -[Wed Oct 10 17:13:32 2018] epoch_id: 29, batch_id: 2500, cost: 0.006287, acc: 1.000000 -[Wed Oct 10 17:13:35 2018] epoch_id: 29, batch_id: 2600, cost: 0.058838, acc: 0.992188 -[Wed Oct 10 17:13:37 2018] epoch_id: 29, batch_id: 2700, cost: 0.013440, acc: 0.992188 -[Wed Oct 10 17:13:39 2018] epoch_id: 29, batch_id: 2800, cost: 0.002577, acc: 1.000000 -[Wed Oct 10 17:13:41 2018] epoch_id: 29, batch_id: 2900, cost: 0.020076, acc: 0.992188 -[Wed Oct 10 17:13:43 2018] epoch_id: 29, batch_id: 3000, cost: 0.025126, acc: 0.992188 - -[Wed Oct 10 17:13:44 2018] epoch_id: 29, train_avg_cost: 0.017397, train_avg_acc: 0.994107 -[Wed Oct 10 17:13:45 2018] epoch_id: 29, dev_cost: 1.314838, accuracy: 0.8304 -[Wed Oct 10 17:13:46 2018] epoch_id: 29, test_cost: 1.349980, accuracy: 0.8298 - -[Wed Oct 10 17:13:55 2018] epoch_id: 30, batch_id: 0, cost: 0.063661, acc: 0.984375 -[Wed Oct 10 17:13:57 2018] epoch_id: 30, batch_id: 100, cost: 0.005445, acc: 1.000000 -[Wed Oct 10 17:13:59 2018] epoch_id: 30, batch_id: 200, cost: 0.025451, acc: 0.984375 -[Wed Oct 10 17:14:01 2018] epoch_id: 30, batch_id: 300, cost: 0.019455, acc: 0.992188 -[Wed Oct 10 17:14:04 2018] epoch_id: 30, batch_id: 400, cost: 0.000182, acc: 1.000000 -[Wed Oct 10 17:14:06 2018] epoch_id: 30, batch_id: 500, cost: 0.036089, acc: 0.984375 -[Wed Oct 10 17:14:08 2018] epoch_id: 30, batch_id: 600, cost: 0.003895, acc: 1.000000 -[Wed Oct 10 17:14:10 2018] epoch_id: 30, batch_id: 700, cost: 0.012125, acc: 0.992188 -[Wed Oct 10 17:14:13 2018] epoch_id: 30, batch_id: 800, cost: 0.007463, acc: 1.000000 -[Wed Oct 10 17:14:15 2018] epoch_id: 30, batch_id: 900, cost: 0.043093, acc: 0.992188 -[Wed Oct 10 17:14:17 2018] epoch_id: 30, batch_id: 1000, cost: 0.023025, acc: 0.992188 -[Wed Oct 10 17:14:20 2018] epoch_id: 30, batch_id: 1100, cost: 0.008640, acc: 0.992188 -[Wed Oct 10 17:14:22 2018] epoch_id: 30, batch_id: 1200, cost: 0.023361, acc: 0.984375 -[Wed Oct 10 17:14:24 2018] epoch_id: 30, batch_id: 1300, cost: 0.003226, acc: 1.000000 -[Wed Oct 10 17:14:27 2018] epoch_id: 30, batch_id: 1400, cost: 0.010225, acc: 0.992188 -[Wed Oct 10 17:14:29 2018] epoch_id: 30, batch_id: 1500, cost: 0.009733, acc: 1.000000 -[Wed Oct 10 17:14:31 2018] epoch_id: 30, batch_id: 1600, cost: 0.014048, acc: 0.992188 -[Wed Oct 10 17:14:34 2018] epoch_id: 30, batch_id: 1700, cost: 0.008200, acc: 1.000000 -[Wed Oct 10 17:14:36 2018] epoch_id: 30, batch_id: 1800, cost: 0.035217, acc: 0.992188 -[Wed Oct 10 17:14:38 2018] epoch_id: 30, batch_id: 1900, cost: 0.002707, acc: 1.000000 -[Wed Oct 10 17:14:40 2018] epoch_id: 30, batch_id: 2000, cost: 0.028292, acc: 0.984375 -[Wed Oct 10 17:14:43 2018] epoch_id: 30, batch_id: 2100, cost: 0.003164, acc: 1.000000 -[Wed Oct 10 17:14:45 2018] epoch_id: 30, batch_id: 2200, cost: 0.014421, acc: 0.992188 -[Wed Oct 10 17:14:47 2018] epoch_id: 30, batch_id: 2300, cost: 0.001986, acc: 1.000000 -[Wed Oct 10 17:14:49 2018] epoch_id: 30, batch_id: 2400, cost: 0.038462, acc: 0.992188 -[Wed Oct 10 17:14:52 2018] epoch_id: 30, batch_id: 2500, cost: 0.003580, acc: 1.000000 -[Wed Oct 10 17:14:54 2018] epoch_id: 30, batch_id: 2600, cost: 0.061259, acc: 0.984375 -[Wed Oct 10 17:14:56 2018] epoch_id: 30, batch_id: 2700, cost: 0.042758, acc: 0.992188 -[Wed Oct 10 17:14:59 2018] epoch_id: 30, batch_id: 2800, cost: 0.012991, acc: 0.992188 -[Wed Oct 10 17:15:02 2018] epoch_id: 30, batch_id: 2900, cost: 0.021263, acc: 0.992188 -[Wed Oct 10 17:15:04 2018] epoch_id: 30, batch_id: 3000, cost: 0.046058, acc: 0.992188 - -[Wed Oct 10 17:15:05 2018] epoch_id: 30, train_avg_cost: 0.016908, train_avg_acc: 0.994391 -[Wed Oct 10 17:15:06 2018] epoch_id: 30, dev_cost: 1.214737, accuracy: 0.8343 -[Wed Oct 10 17:15:07 2018] epoch_id: 30, test_cost: 1.247275, accuracy: 0.828 - -[Wed Oct 10 17:15:15 2018] epoch_id: 31, batch_id: 0, cost: 0.019613, acc: 0.992188 -[Wed Oct 10 17:15:17 2018] epoch_id: 31, batch_id: 100, cost: 0.048000, acc: 0.984375 -[Wed Oct 10 17:15:19 2018] epoch_id: 31, batch_id: 200, cost: 0.038604, acc: 0.992188 -[Wed Oct 10 17:15:21 2018] epoch_id: 31, batch_id: 300, cost: 0.003548, acc: 1.000000 -[Wed Oct 10 17:15:24 2018] epoch_id: 31, batch_id: 400, cost: 0.001539, acc: 1.000000 -[Wed Oct 10 17:15:26 2018] epoch_id: 31, batch_id: 500, cost: 0.034219, acc: 0.992188 -[Wed Oct 10 17:15:28 2018] epoch_id: 31, batch_id: 600, cost: 0.005696, acc: 1.000000 -[Wed Oct 10 17:15:31 2018] epoch_id: 31, batch_id: 700, cost: 0.012590, acc: 0.992188 -[Wed Oct 10 17:15:33 2018] epoch_id: 31, batch_id: 800, cost: 0.010021, acc: 0.992188 -[Wed Oct 10 17:15:35 2018] epoch_id: 31, batch_id: 900, cost: 0.004838, acc: 1.000000 -[Wed Oct 10 17:15:38 2018] epoch_id: 31, batch_id: 1000, cost: 0.006327, acc: 1.000000 -[Wed Oct 10 17:15:40 2018] epoch_id: 31, batch_id: 1100, cost: 0.019881, acc: 0.992188 -[Wed Oct 10 17:15:42 2018] epoch_id: 31, batch_id: 1200, cost: 0.006641, acc: 1.000000 -[Wed Oct 10 17:15:44 2018] epoch_id: 31, batch_id: 1300, cost: 0.014323, acc: 0.992188 -[Wed Oct 10 17:15:47 2018] epoch_id: 31, batch_id: 1400, cost: 0.008565, acc: 1.000000 -[Wed Oct 10 17:15:49 2018] epoch_id: 31, batch_id: 1500, cost: 0.003106, acc: 1.000000 -[Wed Oct 10 17:15:51 2018] epoch_id: 31, batch_id: 1600, cost: 0.023656, acc: 0.992188 -[Wed Oct 10 17:15:53 2018] epoch_id: 31, batch_id: 1700, cost: 0.014398, acc: 1.000000 -[Wed Oct 10 17:15:56 2018] epoch_id: 31, batch_id: 1800, cost: 0.005019, acc: 1.000000 -[Wed Oct 10 17:15:58 2018] epoch_id: 31, batch_id: 1900, cost: 0.042051, acc: 0.984375 -[Wed Oct 10 17:16:00 2018] epoch_id: 31, batch_id: 2000, cost: 0.005070, acc: 1.000000 -[Wed Oct 10 17:16:03 2018] epoch_id: 31, batch_id: 2100, cost: 0.071147, acc: 0.984375 -[Wed Oct 10 17:16:05 2018] epoch_id: 31, batch_id: 2200, cost: 0.004077, acc: 1.000000 -[Wed Oct 10 17:16:07 2018] epoch_id: 31, batch_id: 2300, cost: 0.000753, acc: 1.000000 -[Wed Oct 10 17:16:11 2018] epoch_id: 31, batch_id: 2400, cost: 0.007293, acc: 1.000000 -[Wed Oct 10 17:16:13 2018] epoch_id: 31, batch_id: 2500, cost: 0.020403, acc: 0.992188 -[Wed Oct 10 17:16:15 2018] epoch_id: 31, batch_id: 2600, cost: 0.002491, acc: 1.000000 -[Wed Oct 10 17:16:17 2018] epoch_id: 31, batch_id: 2700, cost: 0.001376, acc: 1.000000 -[Wed Oct 10 17:16:20 2018] epoch_id: 31, batch_id: 2800, cost: 0.006589, acc: 1.000000 -[Wed Oct 10 17:16:22 2018] epoch_id: 31, batch_id: 2900, cost: 0.009986, acc: 1.000000 -[Wed Oct 10 17:16:24 2018] epoch_id: 31, batch_id: 3000, cost: 0.004628, acc: 1.000000 - -[Wed Oct 10 17:16:25 2018] epoch_id: 31, train_avg_cost: 0.016863, train_avg_acc: 0.994502 -[Wed Oct 10 17:16:26 2018] epoch_id: 31, dev_cost: 1.237226, accuracy: 0.8348 -[Wed Oct 10 17:16:27 2018] epoch_id: 31, test_cost: 1.256692, accuracy: 0.8327 - -[Wed Oct 10 17:16:35 2018] epoch_id: 32, batch_id: 0, cost: 0.001936, acc: 1.000000 -[Wed Oct 10 17:16:37 2018] epoch_id: 32, batch_id: 100, cost: 0.002628, acc: 1.000000 -[Wed Oct 10 17:16:40 2018] epoch_id: 32, batch_id: 200, cost: 0.006948, acc: 1.000000 -[Wed Oct 10 17:16:42 2018] epoch_id: 32, batch_id: 300, cost: 0.001289, acc: 1.000000 -[Wed Oct 10 17:16:44 2018] epoch_id: 32, batch_id: 400, cost: 0.016850, acc: 1.000000 -[Wed Oct 10 17:16:46 2018] epoch_id: 32, batch_id: 500, cost: 0.001709, acc: 1.000000 -[Wed Oct 10 17:16:49 2018] epoch_id: 32, batch_id: 600, cost: 0.000500, acc: 1.000000 -[Wed Oct 10 17:16:51 2018] epoch_id: 32, batch_id: 700, cost: 0.026876, acc: 0.992188 -[Wed Oct 10 17:16:54 2018] epoch_id: 32, batch_id: 800, cost: 0.032499, acc: 0.992188 -[Wed Oct 10 17:16:56 2018] epoch_id: 32, batch_id: 900, cost: 0.008563, acc: 1.000000 -[Wed Oct 10 17:16:59 2018] epoch_id: 32, batch_id: 1000, cost: 0.033638, acc: 0.992188 -[Wed Oct 10 17:17:01 2018] epoch_id: 32, batch_id: 1100, cost: 0.021626, acc: 0.992188 -[Wed Oct 10 17:17:03 2018] epoch_id: 32, batch_id: 1200, cost: 0.035490, acc: 0.984375 -[Wed Oct 10 17:17:05 2018] epoch_id: 32, batch_id: 1300, cost: 0.064303, acc: 0.992188 -[Wed Oct 10 17:17:08 2018] epoch_id: 32, batch_id: 1400, cost: 0.000839, acc: 1.000000 -[Wed Oct 10 17:17:10 2018] epoch_id: 32, batch_id: 1500, cost: 0.014770, acc: 0.992188 -[Wed Oct 10 17:17:12 2018] epoch_id: 32, batch_id: 1600, cost: 0.067803, acc: 0.992188 -[Wed Oct 10 17:17:14 2018] epoch_id: 32, batch_id: 1700, cost: 0.001507, acc: 1.000000 -[Wed Oct 10 17:17:17 2018] epoch_id: 32, batch_id: 1800, cost: 0.039594, acc: 0.984375 -[Wed Oct 10 17:17:19 2018] epoch_id: 32, batch_id: 1900, cost: 0.016198, acc: 0.992188 -[Wed Oct 10 17:17:21 2018] epoch_id: 32, batch_id: 2000, cost: 0.027783, acc: 0.984375 -[Wed Oct 10 17:17:24 2018] epoch_id: 32, batch_id: 2100, cost: 0.010040, acc: 0.992188 -[Wed Oct 10 17:17:26 2018] epoch_id: 32, batch_id: 2200, cost: 0.043833, acc: 0.992188 -[Wed Oct 10 17:17:28 2018] epoch_id: 32, batch_id: 2300, cost: 0.012850, acc: 0.992188 -[Wed Oct 10 17:17:31 2018] epoch_id: 32, batch_id: 2400, cost: 0.010643, acc: 1.000000 -[Wed Oct 10 17:17:33 2018] epoch_id: 32, batch_id: 2500, cost: 0.013513, acc: 0.992188 -[Wed Oct 10 17:17:35 2018] epoch_id: 32, batch_id: 2600, cost: 0.021498, acc: 0.984375 -[Wed Oct 10 17:17:38 2018] epoch_id: 32, batch_id: 2700, cost: 0.048091, acc: 0.984375 -[Wed Oct 10 17:17:40 2018] epoch_id: 32, batch_id: 2800, cost: 0.054710, acc: 0.984375 -[Wed Oct 10 17:17:42 2018] epoch_id: 32, batch_id: 2900, cost: 0.028200, acc: 0.992188 -[Wed Oct 10 17:17:44 2018] epoch_id: 32, batch_id: 3000, cost: 0.052160, acc: 0.992188 - -[Wed Oct 10 17:17:45 2018] epoch_id: 32, train_avg_cost: 0.016115, train_avg_acc: 0.994599 -[Wed Oct 10 17:17:46 2018] epoch_id: 32, dev_cost: 1.182178, accuracy: 0.8359 -[Wed Oct 10 17:17:47 2018] epoch_id: 32, test_cost: 1.183695, accuracy: 0.8297 - -[Wed Oct 10 17:17:55 2018] epoch_id: 33, batch_id: 0, cost: 0.002170, acc: 1.000000 -[Wed Oct 10 17:17:58 2018] epoch_id: 33, batch_id: 100, cost: 0.000724, acc: 1.000000 -[Wed Oct 10 17:18:00 2018] epoch_id: 33, batch_id: 200, cost: 0.102036, acc: 0.968750 -[Wed Oct 10 17:18:02 2018] epoch_id: 33, batch_id: 300, cost: 0.006967, acc: 1.000000 -[Wed Oct 10 17:18:04 2018] epoch_id: 33, batch_id: 400, cost: 0.004401, acc: 1.000000 -[Wed Oct 10 17:18:07 2018] epoch_id: 33, batch_id: 500, cost: 0.006693, acc: 1.000000 -[Wed Oct 10 17:18:09 2018] epoch_id: 33, batch_id: 600, cost: 0.002759, acc: 1.000000 -[Wed Oct 10 17:18:11 2018] epoch_id: 33, batch_id: 700, cost: 0.000587, acc: 1.000000 -[Wed Oct 10 17:18:13 2018] epoch_id: 33, batch_id: 800, cost: 0.006432, acc: 1.000000 -[Wed Oct 10 17:18:16 2018] epoch_id: 33, batch_id: 900, cost: 0.043751, acc: 0.984375 -[Wed Oct 10 17:18:18 2018] epoch_id: 33, batch_id: 1000, cost: 0.006652, acc: 1.000000 -[Wed Oct 10 17:18:20 2018] epoch_id: 33, batch_id: 1100, cost: 0.008419, acc: 1.000000 -[Wed Oct 10 17:18:23 2018] epoch_id: 33, batch_id: 1200, cost: 0.012309, acc: 0.992188 -[Wed Oct 10 17:18:25 2018] epoch_id: 33, batch_id: 1300, cost: 0.023884, acc: 0.984375 -[Wed Oct 10 17:18:27 2018] epoch_id: 33, batch_id: 1400, cost: 0.011711, acc: 0.992188 -[Wed Oct 10 17:18:29 2018] epoch_id: 33, batch_id: 1500, cost: 0.005948, acc: 1.000000 -[Wed Oct 10 17:18:32 2018] epoch_id: 33, batch_id: 1600, cost: 0.014363, acc: 0.992188 -[Wed Oct 10 17:18:34 2018] epoch_id: 33, batch_id: 1700, cost: 0.000291, acc: 1.000000 -[Wed Oct 10 17:18:37 2018] epoch_id: 33, batch_id: 1800, cost: 0.005694, acc: 1.000000 -[Wed Oct 10 17:18:40 2018] epoch_id: 33, batch_id: 1900, cost: 0.170195, acc: 0.984375 -[Wed Oct 10 17:18:42 2018] epoch_id: 33, batch_id: 2000, cost: 0.001044, acc: 1.000000 -[Wed Oct 10 17:18:44 2018] epoch_id: 33, batch_id: 2100, cost: 0.004921, acc: 1.000000 -[Wed Oct 10 17:18:46 2018] epoch_id: 33, batch_id: 2200, cost: 0.006203, acc: 1.000000 -[Wed Oct 10 17:18:48 2018] epoch_id: 33, batch_id: 2300, cost: 0.038624, acc: 0.984375 -[Wed Oct 10 17:18:51 2018] epoch_id: 33, batch_id: 2400, cost: 0.067313, acc: 0.976562 -[Wed Oct 10 17:18:53 2018] epoch_id: 33, batch_id: 2500, cost: 0.040853, acc: 0.992188 -[Wed Oct 10 17:18:55 2018] epoch_id: 33, batch_id: 2600, cost: 0.039087, acc: 0.984375 -[Wed Oct 10 17:18:57 2018] epoch_id: 33, batch_id: 2700, cost: 0.004672, acc: 1.000000 -[Wed Oct 10 17:19:00 2018] epoch_id: 33, batch_id: 2800, cost: 0.021997, acc: 0.984375 -[Wed Oct 10 17:19:02 2018] epoch_id: 33, batch_id: 2900, cost: 0.013635, acc: 1.000000 -[Wed Oct 10 17:19:04 2018] epoch_id: 33, batch_id: 3000, cost: 0.009055, acc: 0.992188 - -[Wed Oct 10 17:19:05 2018] epoch_id: 33, train_avg_cost: 0.014972, train_avg_acc: 0.995145 -[Wed Oct 10 17:19:06 2018] epoch_id: 33, dev_cost: 1.819085, accuracy: 0.8352 -[Wed Oct 10 17:19:07 2018] epoch_id: 33, test_cost: 1.859041, accuracy: 0.8314 - -[Wed Oct 10 17:19:15 2018] epoch_id: 34, batch_id: 0, cost: 0.026821, acc: 0.992188 -[Wed Oct 10 17:19:17 2018] epoch_id: 34, batch_id: 100, cost: 0.001463, acc: 1.000000 -[Wed Oct 10 17:19:20 2018] epoch_id: 34, batch_id: 200, cost: 0.000579, acc: 1.000000 -[Wed Oct 10 17:19:22 2018] epoch_id: 34, batch_id: 300, cost: 0.000492, acc: 1.000000 -[Wed Oct 10 17:19:24 2018] epoch_id: 34, batch_id: 400, cost: 0.000671, acc: 1.000000 -[Wed Oct 10 17:19:26 2018] epoch_id: 34, batch_id: 500, cost: 0.007763, acc: 1.000000 -[Wed Oct 10 17:19:29 2018] epoch_id: 34, batch_id: 600, cost: 0.018827, acc: 0.992188 -[Wed Oct 10 17:19:31 2018] epoch_id: 34, batch_id: 700, cost: 0.004606, acc: 1.000000 -[Wed Oct 10 17:19:33 2018] epoch_id: 34, batch_id: 800, cost: 0.004697, acc: 1.000000 -[Wed Oct 10 17:19:35 2018] epoch_id: 34, batch_id: 900, cost: 0.003752, acc: 1.000000 -[Wed Oct 10 17:19:38 2018] epoch_id: 34, batch_id: 1000, cost: 0.003546, acc: 1.000000 -[Wed Oct 10 17:19:40 2018] epoch_id: 34, batch_id: 1100, cost: 0.003848, acc: 1.000000 -[Wed Oct 10 17:19:42 2018] epoch_id: 34, batch_id: 1200, cost: 0.010363, acc: 1.000000 -[Wed Oct 10 17:19:44 2018] epoch_id: 34, batch_id: 1300, cost: 0.013875, acc: 0.992188 -[Wed Oct 10 17:19:47 2018] epoch_id: 34, batch_id: 1400, cost: 0.009212, acc: 0.992188 -[Wed Oct 10 17:19:49 2018] epoch_id: 34, batch_id: 1500, cost: 0.047909, acc: 0.992188 -[Wed Oct 10 17:19:51 2018] epoch_id: 34, batch_id: 1600, cost: 0.012809, acc: 0.992188 -[Wed Oct 10 17:19:53 2018] epoch_id: 34, batch_id: 1700, cost: 0.009717, acc: 1.000000 -[Wed Oct 10 17:19:56 2018] epoch_id: 34, batch_id: 1800, cost: 0.026330, acc: 0.984375 -[Wed Oct 10 17:19:58 2018] epoch_id: 34, batch_id: 1900, cost: 0.016982, acc: 0.992188 -[Wed Oct 10 17:20:00 2018] epoch_id: 34, batch_id: 2000, cost: 0.021416, acc: 0.992188 -[Wed Oct 10 17:20:03 2018] epoch_id: 34, batch_id: 2100, cost: 0.001120, acc: 1.000000 -[Wed Oct 10 17:20:05 2018] epoch_id: 34, batch_id: 2200, cost: 0.011436, acc: 1.000000 -[Wed Oct 10 17:20:07 2018] epoch_id: 34, batch_id: 2300, cost: 0.007605, acc: 0.992188 -[Wed Oct 10 17:20:10 2018] epoch_id: 34, batch_id: 2400, cost: 0.026308, acc: 0.992188 -[Wed Oct 10 17:20:12 2018] epoch_id: 34, batch_id: 2500, cost: 0.006798, acc: 1.000000 -[Wed Oct 10 17:20:14 2018] epoch_id: 34, batch_id: 2600, cost: 0.017334, acc: 0.992188 -[Wed Oct 10 17:20:16 2018] epoch_id: 34, batch_id: 2700, cost: 0.030094, acc: 0.992188 -[Wed Oct 10 17:20:18 2018] epoch_id: 34, batch_id: 2800, cost: 0.053259, acc: 0.992188 -[Wed Oct 10 17:20:21 2018] epoch_id: 34, batch_id: 2900, cost: 0.061547, acc: 0.968750 -[Wed Oct 10 17:20:23 2018] epoch_id: 34, batch_id: 3000, cost: 0.002864, acc: 1.000000 - -[Wed Oct 10 17:20:24 2018] epoch_id: 34, train_avg_cost: 0.014813, train_avg_acc: 0.995064 -[Wed Oct 10 17:20:25 2018] epoch_id: 34, dev_cost: 1.697732, accuracy: 0.8346 -[Wed Oct 10 17:20:26 2018] epoch_id: 34, test_cost: 1.721137, accuracy: 0.8341 - -[Wed Oct 10 17:20:34 2018] epoch_id: 35, batch_id: 0, cost: 0.000268, acc: 1.000000 -[Wed Oct 10 17:20:37 2018] epoch_id: 35, batch_id: 100, cost: 0.001389, acc: 1.000000 -[Wed Oct 10 17:20:39 2018] epoch_id: 35, batch_id: 200, cost: 0.003275, acc: 1.000000 -[Wed Oct 10 17:20:41 2018] epoch_id: 35, batch_id: 300, cost: 0.006535, acc: 1.000000 -[Wed Oct 10 17:20:43 2018] epoch_id: 35, batch_id: 400, cost: 0.005316, acc: 1.000000 -[Wed Oct 10 17:20:45 2018] epoch_id: 35, batch_id: 500, cost: 0.017976, acc: 0.992188 -[Wed Oct 10 17:20:48 2018] epoch_id: 35, batch_id: 600, cost: 0.060320, acc: 0.992188 -[Wed Oct 10 17:20:50 2018] epoch_id: 35, batch_id: 700, cost: 0.004358, acc: 1.000000 -[Wed Oct 10 17:20:52 2018] epoch_id: 35, batch_id: 800, cost: 0.003560, acc: 1.000000 -[Wed Oct 10 17:20:55 2018] epoch_id: 35, batch_id: 900, cost: 0.017978, acc: 0.992188 -[Wed Oct 10 17:20:57 2018] epoch_id: 35, batch_id: 1000, cost: 0.007025, acc: 1.000000 -[Wed Oct 10 17:20:59 2018] epoch_id: 35, batch_id: 1100, cost: 0.008777, acc: 0.992188 -[Wed Oct 10 17:21:01 2018] epoch_id: 35, batch_id: 1200, cost: 0.006591, acc: 1.000000 -[Wed Oct 10 17:21:04 2018] epoch_id: 35, batch_id: 1300, cost: 0.008911, acc: 0.992188 -[Wed Oct 10 17:21:06 2018] epoch_id: 35, batch_id: 1400, cost: 0.038343, acc: 0.984375 -[Wed Oct 10 17:21:08 2018] epoch_id: 35, batch_id: 1500, cost: 0.001654, acc: 1.000000 -[Wed Oct 10 17:21:10 2018] epoch_id: 35, batch_id: 1600, cost: 0.002577, acc: 1.000000 -[Wed Oct 10 17:21:13 2018] epoch_id: 35, batch_id: 1700, cost: 0.026908, acc: 0.992188 -[Wed Oct 10 17:21:15 2018] epoch_id: 35, batch_id: 1800, cost: 0.024004, acc: 0.992188 -[Wed Oct 10 17:21:17 2018] epoch_id: 35, batch_id: 1900, cost: 0.013134, acc: 0.992188 -[Wed Oct 10 17:21:19 2018] epoch_id: 35, batch_id: 2000, cost: 0.003633, acc: 1.000000 -[Wed Oct 10 17:21:21 2018] epoch_id: 35, batch_id: 2100, cost: 0.011727, acc: 0.992188 -[Wed Oct 10 17:21:24 2018] epoch_id: 35, batch_id: 2200, cost: 0.019991, acc: 0.992188 -[Wed Oct 10 17:21:26 2018] epoch_id: 35, batch_id: 2300, cost: 0.004771, acc: 1.000000 -[Wed Oct 10 17:21:28 2018] epoch_id: 35, batch_id: 2400, cost: 0.013732, acc: 0.992188 -[Wed Oct 10 17:21:30 2018] epoch_id: 35, batch_id: 2500, cost: 0.096741, acc: 0.984375 -[Wed Oct 10 17:21:33 2018] epoch_id: 35, batch_id: 2600, cost: 0.006102, acc: 1.000000 -[Wed Oct 10 17:21:36 2018] epoch_id: 35, batch_id: 2700, cost: 0.007046, acc: 0.992188 -[Wed Oct 10 17:21:38 2018] epoch_id: 35, batch_id: 2800, cost: 0.028777, acc: 0.984375 -[Wed Oct 10 17:21:41 2018] epoch_id: 35, batch_id: 2900, cost: 0.116960, acc: 0.976562 -[Wed Oct 10 17:21:43 2018] epoch_id: 35, batch_id: 3000, cost: 0.039752, acc: 0.968750 - -[Wed Oct 10 17:21:44 2018] epoch_id: 35, train_avg_cost: 0.014921, train_avg_acc: 0.995075 -[Wed Oct 10 17:21:45 2018] epoch_id: 35, dev_cost: 1.203598, accuracy: 0.8348 -[Wed Oct 10 17:21:45 2018] epoch_id: 35, test_cost: 1.205202, accuracy: 0.8347 - -[Wed Oct 10 17:21:54 2018] epoch_id: 36, batch_id: 0, cost: 0.009331, acc: 1.000000 -[Wed Oct 10 17:21:56 2018] epoch_id: 36, batch_id: 100, cost: 0.004473, acc: 1.000000 -[Wed Oct 10 17:21:58 2018] epoch_id: 36, batch_id: 200, cost: 0.001097, acc: 1.000000 -[Wed Oct 10 17:22:00 2018] epoch_id: 36, batch_id: 300, cost: 0.001914, acc: 1.000000 -[Wed Oct 10 17:22:03 2018] epoch_id: 36, batch_id: 400, cost: 0.003967, acc: 1.000000 -[Wed Oct 10 17:22:05 2018] epoch_id: 36, batch_id: 500, cost: 0.008101, acc: 1.000000 -[Wed Oct 10 17:22:07 2018] epoch_id: 36, batch_id: 600, cost: 0.037581, acc: 0.976562 -[Wed Oct 10 17:22:09 2018] epoch_id: 36, batch_id: 700, cost: 0.031872, acc: 0.992188 -[Wed Oct 10 17:22:11 2018] epoch_id: 36, batch_id: 800, cost: 0.002586, acc: 1.000000 -[Wed Oct 10 17:22:14 2018] epoch_id: 36, batch_id: 900, cost: 0.025838, acc: 0.984375 -[Wed Oct 10 17:22:16 2018] epoch_id: 36, batch_id: 1000, cost: 0.012382, acc: 0.992188 -[Wed Oct 10 17:22:18 2018] epoch_id: 36, batch_id: 1100, cost: 0.006482, acc: 1.000000 -[Wed Oct 10 17:22:20 2018] epoch_id: 36, batch_id: 1200, cost: 0.006437, acc: 1.000000 -[Wed Oct 10 17:22:23 2018] epoch_id: 36, batch_id: 1300, cost: 0.026039, acc: 0.992188 -[Wed Oct 10 17:22:25 2018] epoch_id: 36, batch_id: 1400, cost: 0.017908, acc: 0.992188 -[Wed Oct 10 17:22:27 2018] epoch_id: 36, batch_id: 1500, cost: 0.025722, acc: 0.984375 -[Wed Oct 10 17:22:29 2018] epoch_id: 36, batch_id: 1600, cost: 0.031398, acc: 0.992188 -[Wed Oct 10 17:22:32 2018] epoch_id: 36, batch_id: 1700, cost: 0.034194, acc: 0.984375 -[Wed Oct 10 17:22:34 2018] epoch_id: 36, batch_id: 1800, cost: 0.001353, acc: 1.000000 -[Wed Oct 10 17:22:36 2018] epoch_id: 36, batch_id: 1900, cost: 0.000942, acc: 1.000000 -[Wed Oct 10 17:22:38 2018] epoch_id: 36, batch_id: 2000, cost: 0.004051, acc: 1.000000 -[Wed Oct 10 17:22:40 2018] epoch_id: 36, batch_id: 2100, cost: 0.016359, acc: 0.992188 -[Wed Oct 10 17:22:43 2018] epoch_id: 36, batch_id: 2200, cost: 0.010324, acc: 1.000000 -[Wed Oct 10 17:22:46 2018] epoch_id: 36, batch_id: 2300, cost: 0.015250, acc: 1.000000 -[Wed Oct 10 17:22:48 2018] epoch_id: 36, batch_id: 2400, cost: 0.053711, acc: 0.976562 -[Wed Oct 10 17:22:51 2018] epoch_id: 36, batch_id: 2500, cost: 0.059409, acc: 0.984375 -[Wed Oct 10 17:22:53 2018] epoch_id: 36, batch_id: 2600, cost: 0.009707, acc: 1.000000 -[Wed Oct 10 17:22:55 2018] epoch_id: 36, batch_id: 2700, cost: 0.003367, acc: 1.000000 -[Wed Oct 10 17:22:58 2018] epoch_id: 36, batch_id: 2800, cost: 0.001207, acc: 1.000000 -[Wed Oct 10 17:23:00 2018] epoch_id: 36, batch_id: 2900, cost: 0.009538, acc: 0.992188 -[Wed Oct 10 17:23:02 2018] epoch_id: 36, batch_id: 3000, cost: 0.013745, acc: 0.992188 - -[Wed Oct 10 17:23:03 2018] epoch_id: 36, train_avg_cost: 0.014009, train_avg_acc: 0.995522 -[Wed Oct 10 17:23:04 2018] epoch_id: 36, dev_cost: 1.647745, accuracy: 0.8324 -[Wed Oct 10 17:23:05 2018] epoch_id: 36, test_cost: 1.662931, accuracy: 0.8368 - -[Wed Oct 10 17:23:13 2018] epoch_id: 37, batch_id: 0, cost: 0.009128, acc: 1.000000 -[Wed Oct 10 17:23:15 2018] epoch_id: 37, batch_id: 100, cost: 0.000989, acc: 1.000000 -[Wed Oct 10 17:23:17 2018] epoch_id: 37, batch_id: 200, cost: 0.031867, acc: 0.992188 -[Wed Oct 10 17:23:20 2018] epoch_id: 37, batch_id: 300, cost: 0.016197, acc: 0.984375 -[Wed Oct 10 17:23:22 2018] epoch_id: 37, batch_id: 400, cost: 0.004157, acc: 1.000000 -[Wed Oct 10 17:23:24 2018] epoch_id: 37, batch_id: 500, cost: 0.004215, acc: 1.000000 -[Wed Oct 10 17:23:26 2018] epoch_id: 37, batch_id: 600, cost: 0.000303, acc: 1.000000 -[Wed Oct 10 17:23:29 2018] epoch_id: 37, batch_id: 700, cost: 0.005056, acc: 1.000000 -[Wed Oct 10 17:23:31 2018] epoch_id: 37, batch_id: 800, cost: 0.016816, acc: 0.992188 -[Wed Oct 10 17:23:34 2018] epoch_id: 37, batch_id: 900, cost: 0.036067, acc: 0.984375 -[Wed Oct 10 17:23:37 2018] epoch_id: 37, batch_id: 1000, cost: 0.002430, acc: 1.000000 -[Wed Oct 10 17:23:39 2018] epoch_id: 37, batch_id: 1100, cost: 0.001621, acc: 1.000000 -[Wed Oct 10 17:23:41 2018] epoch_id: 37, batch_id: 1200, cost: 0.034505, acc: 0.992188 -[Wed Oct 10 17:23:43 2018] epoch_id: 37, batch_id: 1300, cost: 0.008605, acc: 0.992188 -[Wed Oct 10 17:23:45 2018] epoch_id: 37, batch_id: 1400, cost: 0.039387, acc: 0.984375 -[Wed Oct 10 17:23:48 2018] epoch_id: 37, batch_id: 1500, cost: 0.005761, acc: 1.000000 -[Wed Oct 10 17:23:50 2018] epoch_id: 37, batch_id: 1600, cost: 0.002905, acc: 1.000000 -[Wed Oct 10 17:23:52 2018] epoch_id: 37, batch_id: 1700, cost: 0.009640, acc: 1.000000 -[Wed Oct 10 17:23:55 2018] epoch_id: 37, batch_id: 1800, cost: 0.004734, acc: 1.000000 -[Wed Oct 10 17:23:57 2018] epoch_id: 37, batch_id: 1900, cost: 0.029191, acc: 0.992188 -[Wed Oct 10 17:23:59 2018] epoch_id: 37, batch_id: 2000, cost: 0.000724, acc: 1.000000 -[Wed Oct 10 17:24:01 2018] epoch_id: 37, batch_id: 2100, cost: 0.014325, acc: 0.992188 -[Wed Oct 10 17:24:04 2018] epoch_id: 37, batch_id: 2200, cost: 0.004239, acc: 1.000000 -[Wed Oct 10 17:24:06 2018] epoch_id: 37, batch_id: 2300, cost: 0.000597, acc: 1.000000 -[Wed Oct 10 17:24:08 2018] epoch_id: 37, batch_id: 2400, cost: 0.008226, acc: 1.000000 -[Wed Oct 10 17:24:10 2018] epoch_id: 37, batch_id: 2500, cost: 0.001601, acc: 1.000000 -[Wed Oct 10 17:24:12 2018] epoch_id: 37, batch_id: 2600, cost: 0.014527, acc: 0.992188 -[Wed Oct 10 17:24:15 2018] epoch_id: 37, batch_id: 2700, cost: 0.010813, acc: 0.992188 -[Wed Oct 10 17:24:17 2018] epoch_id: 37, batch_id: 2800, cost: 0.015832, acc: 0.992188 -[Wed Oct 10 17:24:19 2018] epoch_id: 37, batch_id: 2900, cost: 0.063636, acc: 0.976562 -[Wed Oct 10 17:24:22 2018] epoch_id: 37, batch_id: 3000, cost: 0.003993, acc: 1.000000 - -[Wed Oct 10 17:24:22 2018] epoch_id: 37, train_avg_cost: 0.014056, train_avg_acc: 0.995431 -[Wed Oct 10 17:24:23 2018] epoch_id: 37, dev_cost: 1.500988, accuracy: 0.8334 -[Wed Oct 10 17:24:24 2018] epoch_id: 37, test_cost: 1.491400, accuracy: 0.8327 - -[Wed Oct 10 17:24:33 2018] epoch_id: 38, batch_id: 0, cost: 0.016895, acc: 0.992188 -[Wed Oct 10 17:24:35 2018] epoch_id: 38, batch_id: 100, cost: 0.001690, acc: 1.000000 -[Wed Oct 10 17:24:38 2018] epoch_id: 38, batch_id: 200, cost: 0.009989, acc: 1.000000 -[Wed Oct 10 17:24:40 2018] epoch_id: 38, batch_id: 300, cost: 0.023480, acc: 0.984375 -[Wed Oct 10 17:24:42 2018] epoch_id: 38, batch_id: 400, cost: 0.004687, acc: 1.000000 -[Wed Oct 10 17:24:45 2018] epoch_id: 38, batch_id: 500, cost: 0.020183, acc: 0.992188 -[Wed Oct 10 17:24:47 2018] epoch_id: 38, batch_id: 600, cost: 0.028614, acc: 0.992188 -[Wed Oct 10 17:24:49 2018] epoch_id: 38, batch_id: 700, cost: 0.000448, acc: 1.000000 -[Wed Oct 10 17:24:51 2018] epoch_id: 38, batch_id: 800, cost: 0.000913, acc: 1.000000 -[Wed Oct 10 17:24:54 2018] epoch_id: 38, batch_id: 900, cost: 0.022090, acc: 0.992188 -[Wed Oct 10 17:24:56 2018] epoch_id: 38, batch_id: 1000, cost: 0.006918, acc: 0.992188 -[Wed Oct 10 17:24:58 2018] epoch_id: 38, batch_id: 1100, cost: 0.028611, acc: 0.984375 -[Wed Oct 10 17:25:00 2018] epoch_id: 38, batch_id: 1200, cost: 0.013097, acc: 0.992188 -[Wed Oct 10 17:25:03 2018] epoch_id: 38, batch_id: 1300, cost: 0.014227, acc: 0.992188 -[Wed Oct 10 17:25:05 2018] epoch_id: 38, batch_id: 1400, cost: 0.033064, acc: 0.992188 -[Wed Oct 10 17:25:07 2018] epoch_id: 38, batch_id: 1500, cost: 0.004276, acc: 1.000000 -[Wed Oct 10 17:25:09 2018] epoch_id: 38, batch_id: 1600, cost: 0.016516, acc: 0.992188 -[Wed Oct 10 17:25:12 2018] epoch_id: 38, batch_id: 1700, cost: 0.004443, acc: 1.000000 -[Wed Oct 10 17:25:14 2018] epoch_id: 38, batch_id: 1800, cost: 0.001648, acc: 1.000000 -[Wed Oct 10 17:25:17 2018] epoch_id: 38, batch_id: 1900, cost: 0.026780, acc: 0.992188 -[Wed Oct 10 17:25:20 2018] epoch_id: 38, batch_id: 2000, cost: 0.006375, acc: 0.992188 -[Wed Oct 10 17:25:22 2018] epoch_id: 38, batch_id: 2100, cost: 0.013131, acc: 0.992188 -[Wed Oct 10 17:25:24 2018] epoch_id: 38, batch_id: 2200, cost: 0.012666, acc: 1.000000 -[Wed Oct 10 17:25:26 2018] epoch_id: 38, batch_id: 2300, cost: 0.001973, acc: 1.000000 -[Wed Oct 10 17:25:29 2018] epoch_id: 38, batch_id: 2400, cost: 0.005966, acc: 1.000000 -[Wed Oct 10 17:25:31 2018] epoch_id: 38, batch_id: 2500, cost: 0.011249, acc: 0.992188 -[Wed Oct 10 17:25:33 2018] epoch_id: 38, batch_id: 2600, cost: 0.022209, acc: 0.992188 -[Wed Oct 10 17:25:36 2018] epoch_id: 38, batch_id: 2700, cost: 0.003999, acc: 1.000000 -[Wed Oct 10 17:25:38 2018] epoch_id: 38, batch_id: 2800, cost: 0.010264, acc: 0.992188 -[Wed Oct 10 17:25:40 2018] epoch_id: 38, batch_id: 2900, cost: 0.003841, acc: 1.000000 -[Wed Oct 10 17:25:42 2018] epoch_id: 38, batch_id: 3000, cost: 0.075514, acc: 0.992188 - -[Wed Oct 10 17:25:43 2018] epoch_id: 38, train_avg_cost: 0.013573, train_avg_acc: 0.995548 -[Wed Oct 10 17:25:44 2018] epoch_id: 38, dev_cost: 1.577028, accuracy: 0.8317 -[Wed Oct 10 17:25:45 2018] epoch_id: 38, test_cost: 1.546861, accuracy: 0.8363 - -[Wed Oct 10 17:25:54 2018] epoch_id: 39, batch_id: 0, cost: 0.000487, acc: 1.000000 -[Wed Oct 10 17:25:56 2018] epoch_id: 39, batch_id: 100, cost: 0.003988, acc: 1.000000 -[Wed Oct 10 17:25:58 2018] epoch_id: 39, batch_id: 200, cost: 0.069709, acc: 0.984375 -[Wed Oct 10 17:26:00 2018] epoch_id: 39, batch_id: 300, cost: 0.031796, acc: 0.992188 -[Wed Oct 10 17:26:03 2018] epoch_id: 39, batch_id: 400, cost: 0.007788, acc: 1.000000 -[Wed Oct 10 17:26:05 2018] epoch_id: 39, batch_id: 500, cost: 0.014854, acc: 0.992188 -[Wed Oct 10 17:26:07 2018] epoch_id: 39, batch_id: 600, cost: 0.017382, acc: 0.992188 -[Wed Oct 10 17:26:09 2018] epoch_id: 39, batch_id: 700, cost: 0.003342, acc: 1.000000 -[Wed Oct 10 17:26:12 2018] epoch_id: 39, batch_id: 800, cost: 0.003279, acc: 1.000000 -[Wed Oct 10 17:26:14 2018] epoch_id: 39, batch_id: 900, cost: 0.018283, acc: 0.992188 -[Wed Oct 10 17:26:16 2018] epoch_id: 39, batch_id: 1000, cost: 0.000697, acc: 1.000000 -[Wed Oct 10 17:26:18 2018] epoch_id: 39, batch_id: 1100, cost: 0.003188, acc: 1.000000 -[Wed Oct 10 17:26:21 2018] epoch_id: 39, batch_id: 1200, cost: 0.002884, acc: 1.000000 -[Wed Oct 10 17:26:23 2018] epoch_id: 39, batch_id: 1300, cost: 0.016443, acc: 0.992188 -[Wed Oct 10 17:26:25 2018] epoch_id: 39, batch_id: 1400, cost: 0.036063, acc: 0.992188 -[Wed Oct 10 17:26:28 2018] epoch_id: 39, batch_id: 1500, cost: 0.010849, acc: 0.992188 -[Wed Oct 10 17:26:30 2018] epoch_id: 39, batch_id: 1600, cost: 0.002218, acc: 1.000000 -[Wed Oct 10 17:26:32 2018] epoch_id: 39, batch_id: 1700, cost: 0.011184, acc: 1.000000 -[Wed Oct 10 17:26:34 2018] epoch_id: 39, batch_id: 1800, cost: 0.002410, acc: 1.000000 -[Wed Oct 10 17:26:37 2018] epoch_id: 39, batch_id: 1900, cost: 0.010422, acc: 0.992188 -[Wed Oct 10 17:26:39 2018] epoch_id: 39, batch_id: 2000, cost: 0.012162, acc: 0.992188 -[Wed Oct 10 17:26:41 2018] epoch_id: 39, batch_id: 2100, cost: 0.042420, acc: 0.984375 -[Wed Oct 10 17:26:43 2018] epoch_id: 39, batch_id: 2200, cost: 0.006210, acc: 1.000000 -[Wed Oct 10 17:26:46 2018] epoch_id: 39, batch_id: 2300, cost: 0.002905, acc: 1.000000 -[Wed Oct 10 17:26:48 2018] epoch_id: 39, batch_id: 2400, cost: 0.067472, acc: 0.992188 -[Wed Oct 10 17:26:50 2018] epoch_id: 39, batch_id: 2500, cost: 0.030382, acc: 0.992188 -[Wed Oct 10 17:26:52 2018] epoch_id: 39, batch_id: 2600, cost: 0.049727, acc: 0.992188 -[Wed Oct 10 17:26:54 2018] epoch_id: 39, batch_id: 2700, cost: 0.024157, acc: 0.984375 -[Wed Oct 10 17:26:57 2018] epoch_id: 39, batch_id: 2800, cost: 0.021991, acc: 0.992188 -[Wed Oct 10 17:26:59 2018] epoch_id: 39, batch_id: 2900, cost: 0.001997, acc: 1.000000 -[Wed Oct 10 17:27:01 2018] epoch_id: 39, batch_id: 3000, cost: 0.001907, acc: 1.000000 - -[Wed Oct 10 17:27:02 2018] epoch_id: 39, train_avg_cost: 0.012756, train_avg_acc: 0.995835 -[Wed Oct 10 17:27:03 2018] epoch_id: 39, dev_cost: 1.650582, accuracy: 0.8342 -[Wed Oct 10 17:27:04 2018] epoch_id: 39, test_cost: 1.662477, accuracy: 0.8325 - -[Wed Oct 10 17:27:12 2018] epoch_id: 40, batch_id: 0, cost: 0.000858, acc: 1.000000 -[Wed Oct 10 17:27:15 2018] epoch_id: 40, batch_id: 100, cost: 0.000849, acc: 1.000000 -[Wed Oct 10 17:27:17 2018] epoch_id: 40, batch_id: 200, cost: 0.016273, acc: 0.992188 -[Wed Oct 10 17:27:19 2018] epoch_id: 40, batch_id: 300, cost: 0.042659, acc: 0.992188 -[Wed Oct 10 17:27:21 2018] epoch_id: 40, batch_id: 400, cost: 0.010672, acc: 0.992188 -[Wed Oct 10 17:27:24 2018] epoch_id: 40, batch_id: 500, cost: 0.000544, acc: 1.000000 -[Wed Oct 10 17:27:26 2018] epoch_id: 40, batch_id: 600, cost: 0.005578, acc: 1.000000 -[Wed Oct 10 17:27:28 2018] epoch_id: 40, batch_id: 700, cost: 0.039266, acc: 0.992188 -[Wed Oct 10 17:27:31 2018] epoch_id: 40, batch_id: 800, cost: 0.013144, acc: 0.992188 -[Wed Oct 10 17:27:33 2018] epoch_id: 40, batch_id: 900, cost: 0.000740, acc: 1.000000 -[Wed Oct 10 17:27:35 2018] epoch_id: 40, batch_id: 1000, cost: 0.003259, acc: 1.000000 -[Wed Oct 10 17:27:37 2018] epoch_id: 40, batch_id: 1100, cost: 0.002126, acc: 1.000000 -[Wed Oct 10 17:27:40 2018] epoch_id: 40, batch_id: 1200, cost: 0.003089, acc: 1.000000 -[Wed Oct 10 17:27:42 2018] epoch_id: 40, batch_id: 1300, cost: 0.000690, acc: 1.000000 -[Wed Oct 10 17:27:44 2018] epoch_id: 40, batch_id: 1400, cost: 0.000283, acc: 1.000000 -[Wed Oct 10 17:27:46 2018] epoch_id: 40, batch_id: 1500, cost: 0.013878, acc: 0.984375 -[Wed Oct 10 17:27:49 2018] epoch_id: 40, batch_id: 1600, cost: 0.005389, acc: 1.000000 -[Wed Oct 10 17:27:51 2018] epoch_id: 40, batch_id: 1700, cost: 0.024631, acc: 0.992188 -[Wed Oct 10 17:27:53 2018] epoch_id: 40, batch_id: 1800, cost: 0.003978, acc: 1.000000 -[Wed Oct 10 17:27:55 2018] epoch_id: 40, batch_id: 1900, cost: 0.004993, acc: 1.000000 -[Wed Oct 10 17:27:58 2018] epoch_id: 40, batch_id: 2000, cost: 0.014580, acc: 0.984375 -[Wed Oct 10 17:28:00 2018] epoch_id: 40, batch_id: 2100, cost: 0.003148, acc: 1.000000 -[Wed Oct 10 17:28:02 2018] epoch_id: 40, batch_id: 2200, cost: 0.000848, acc: 1.000000 -[Wed Oct 10 17:28:04 2018] epoch_id: 40, batch_id: 2300, cost: 0.009250, acc: 1.000000 -[Wed Oct 10 17:28:06 2018] epoch_id: 40, batch_id: 2400, cost: 0.006138, acc: 1.000000 -[Wed Oct 10 17:28:09 2018] epoch_id: 40, batch_id: 2500, cost: 0.050052, acc: 0.984375 -[Wed Oct 10 17:28:11 2018] epoch_id: 40, batch_id: 2600, cost: 0.005259, acc: 1.000000 -[Wed Oct 10 17:28:13 2018] epoch_id: 40, batch_id: 2700, cost: 0.027375, acc: 0.984375 -[Wed Oct 10 17:28:17 2018] epoch_id: 40, batch_id: 2800, cost: 0.010132, acc: 0.992188 -[Wed Oct 10 17:28:19 2018] epoch_id: 40, batch_id: 2900, cost: 0.003442, acc: 1.000000 -[Wed Oct 10 17:28:21 2018] epoch_id: 40, batch_id: 3000, cost: 0.005328, acc: 1.000000 - -[Wed Oct 10 17:28:22 2018] epoch_id: 40, train_avg_cost: 0.013034, train_avg_acc: 0.995832 -[Wed Oct 10 17:28:23 2018] epoch_id: 40, dev_cost: 1.424795, accuracy: 0.8311 -[Wed Oct 10 17:28:24 2018] epoch_id: 40, test_cost: 1.404285, accuracy: 0.8345 - -[Wed Oct 10 17:28:32 2018] epoch_id: 41, batch_id: 0, cost: 0.023169, acc: 0.992188 -[Wed Oct 10 17:28:34 2018] epoch_id: 41, batch_id: 100, cost: 0.008356, acc: 0.992188 -[Wed Oct 10 17:28:36 2018] epoch_id: 41, batch_id: 200, cost: 0.034033, acc: 0.992188 -[Wed Oct 10 17:28:39 2018] epoch_id: 41, batch_id: 300, cost: 0.003154, acc: 1.000000 -[Wed Oct 10 17:28:41 2018] epoch_id: 41, batch_id: 400, cost: 0.000178, acc: 1.000000 -[Wed Oct 10 17:28:43 2018] epoch_id: 41, batch_id: 500, cost: 0.001488, acc: 1.000000 -[Wed Oct 10 17:28:45 2018] epoch_id: 41, batch_id: 600, cost: 0.034724, acc: 0.992188 -[Wed Oct 10 17:28:48 2018] epoch_id: 41, batch_id: 700, cost: 0.011531, acc: 0.992188 -[Wed Oct 10 17:28:50 2018] epoch_id: 41, batch_id: 800, cost: 0.003504, acc: 1.000000 -[Wed Oct 10 17:28:52 2018] epoch_id: 41, batch_id: 900, cost: 0.010360, acc: 0.992188 -[Wed Oct 10 17:28:54 2018] epoch_id: 41, batch_id: 1000, cost: 0.014474, acc: 0.992188 -[Wed Oct 10 17:28:57 2018] epoch_id: 41, batch_id: 1100, cost: 0.005857, acc: 1.000000 -[Wed Oct 10 17:28:59 2018] epoch_id: 41, batch_id: 1200, cost: 0.007621, acc: 0.992188 -[Wed Oct 10 17:29:01 2018] epoch_id: 41, batch_id: 1300, cost: 0.013386, acc: 0.992188 -[Wed Oct 10 17:29:03 2018] epoch_id: 41, batch_id: 1400, cost: 0.004675, acc: 1.000000 -[Wed Oct 10 17:29:05 2018] epoch_id: 41, batch_id: 1500, cost: 0.023563, acc: 0.984375 -[Wed Oct 10 17:29:07 2018] epoch_id: 41, batch_id: 1600, cost: 0.001719, acc: 1.000000 -[Wed Oct 10 17:29:10 2018] epoch_id: 41, batch_id: 1700, cost: 0.000334, acc: 1.000000 -[Wed Oct 10 17:29:12 2018] epoch_id: 41, batch_id: 1800, cost: 0.001468, acc: 1.000000 -[Wed Oct 10 17:29:14 2018] epoch_id: 41, batch_id: 1900, cost: 0.002295, acc: 1.000000 -[Wed Oct 10 17:29:16 2018] epoch_id: 41, batch_id: 2000, cost: 0.021738, acc: 0.984375 -[Wed Oct 10 17:29:19 2018] epoch_id: 41, batch_id: 2100, cost: 0.023329, acc: 0.984375 -[Wed Oct 10 17:29:21 2018] epoch_id: 41, batch_id: 2200, cost: 0.005678, acc: 1.000000 -[Wed Oct 10 17:29:23 2018] epoch_id: 41, batch_id: 2300, cost: 0.004800, acc: 1.000000 -[Wed Oct 10 17:29:27 2018] epoch_id: 41, batch_id: 2400, cost: 0.007035, acc: 1.000000 -[Wed Oct 10 17:29:29 2018] epoch_id: 41, batch_id: 2500, cost: 0.041456, acc: 0.976562 -[Wed Oct 10 17:29:31 2018] epoch_id: 41, batch_id: 2600, cost: 0.011735, acc: 0.992188 -[Wed Oct 10 17:29:33 2018] epoch_id: 41, batch_id: 2700, cost: 0.016611, acc: 0.992188 -[Wed Oct 10 17:29:36 2018] epoch_id: 41, batch_id: 2800, cost: 0.004084, acc: 1.000000 -[Wed Oct 10 17:29:38 2018] epoch_id: 41, batch_id: 2900, cost: 0.001111, acc: 1.000000 -[Wed Oct 10 17:29:40 2018] epoch_id: 41, batch_id: 3000, cost: 0.015571, acc: 0.992188 - -[Wed Oct 10 17:29:41 2018] epoch_id: 41, train_avg_cost: 0.012473, train_avg_acc: 0.995959 -[Wed Oct 10 17:29:42 2018] epoch_id: 41, dev_cost: 1.301212, accuracy: 0.8313 -[Wed Oct 10 17:29:43 2018] epoch_id: 41, test_cost: 1.292132, accuracy: 0.8314 - -[Wed Oct 10 17:29:51 2018] epoch_id: 42, batch_id: 0, cost: 0.006710, acc: 1.000000 -[Wed Oct 10 17:29:53 2018] epoch_id: 42, batch_id: 100, cost: 0.003760, acc: 1.000000 -[Wed Oct 10 17:29:56 2018] epoch_id: 42, batch_id: 200, cost: 0.007728, acc: 1.000000 -[Wed Oct 10 17:29:58 2018] epoch_id: 42, batch_id: 300, cost: 0.010997, acc: 1.000000 -[Wed Oct 10 17:30:00 2018] epoch_id: 42, batch_id: 400, cost: 0.015313, acc: 0.984375 -[Wed Oct 10 17:30:02 2018] epoch_id: 42, batch_id: 500, cost: 0.000985, acc: 1.000000 -[Wed Oct 10 17:30:05 2018] epoch_id: 42, batch_id: 600, cost: 0.001277, acc: 1.000000 -[Wed Oct 10 17:30:07 2018] epoch_id: 42, batch_id: 700, cost: 0.002231, acc: 1.000000 -[Wed Oct 10 17:30:10 2018] epoch_id: 42, batch_id: 800, cost: 0.002233, acc: 1.000000 -[Wed Oct 10 17:30:12 2018] epoch_id: 42, batch_id: 900, cost: 0.002083, acc: 1.000000 -[Wed Oct 10 17:30:15 2018] epoch_id: 42, batch_id: 1000, cost: 0.004574, acc: 1.000000 -[Wed Oct 10 17:30:17 2018] epoch_id: 42, batch_id: 1100, cost: 0.004339, acc: 1.000000 -[Wed Oct 10 17:30:19 2018] epoch_id: 42, batch_id: 1200, cost: 0.006596, acc: 1.000000 -[Wed Oct 10 17:30:22 2018] epoch_id: 42, batch_id: 1300, cost: 0.000877, acc: 1.000000 -[Wed Oct 10 17:30:24 2018] epoch_id: 42, batch_id: 1400, cost: 0.001873, acc: 1.000000 -[Wed Oct 10 17:30:26 2018] epoch_id: 42, batch_id: 1500, cost: 0.000632, acc: 1.000000 -[Wed Oct 10 17:30:29 2018] epoch_id: 42, batch_id: 1600, cost: 0.002006, acc: 1.000000 -[Wed Oct 10 17:30:31 2018] epoch_id: 42, batch_id: 1700, cost: 0.002035, acc: 1.000000 -[Wed Oct 10 17:30:33 2018] epoch_id: 42, batch_id: 1800, cost: 0.010094, acc: 1.000000 -[Wed Oct 10 17:30:35 2018] epoch_id: 42, batch_id: 1900, cost: 0.002634, acc: 1.000000 -[Wed Oct 10 17:30:38 2018] epoch_id: 42, batch_id: 2000, cost: 0.045660, acc: 0.984375 -[Wed Oct 10 17:30:40 2018] epoch_id: 42, batch_id: 2100, cost: 0.034275, acc: 0.984375 -[Wed Oct 10 17:30:42 2018] epoch_id: 42, batch_id: 2200, cost: 0.001633, acc: 1.000000 -[Wed Oct 10 17:30:44 2018] epoch_id: 42, batch_id: 2300, cost: 0.001030, acc: 1.000000 -[Wed Oct 10 17:30:47 2018] epoch_id: 42, batch_id: 2400, cost: 0.002235, acc: 1.000000 -[Wed Oct 10 17:30:49 2018] epoch_id: 42, batch_id: 2500, cost: 0.017729, acc: 0.992188 -[Wed Oct 10 17:30:51 2018] epoch_id: 42, batch_id: 2600, cost: 0.004357, acc: 1.000000 -[Wed Oct 10 17:30:53 2018] epoch_id: 42, batch_id: 2700, cost: 0.000981, acc: 1.000000 -[Wed Oct 10 17:30:56 2018] epoch_id: 42, batch_id: 2800, cost: 0.000964, acc: 1.000000 -[Wed Oct 10 17:30:58 2018] epoch_id: 42, batch_id: 2900, cost: 0.018888, acc: 0.992188 -[Wed Oct 10 17:31:00 2018] epoch_id: 42, batch_id: 3000, cost: 0.032965, acc: 0.984375 - -[Wed Oct 10 17:31:01 2018] epoch_id: 42, train_avg_cost: 0.013007, train_avg_acc: 0.995946 -[Wed Oct 10 17:31:02 2018] epoch_id: 42, dev_cost: 1.701511, accuracy: 0.8335 -[Wed Oct 10 17:31:03 2018] epoch_id: 42, test_cost: 1.704458, accuracy: 0.8312 - -[Wed Oct 10 17:31:12 2018] epoch_id: 43, batch_id: 0, cost: 0.002044, acc: 1.000000 -[Wed Oct 10 17:31:14 2018] epoch_id: 43, batch_id: 100, cost: 0.018454, acc: 0.992188 -[Wed Oct 10 17:31:16 2018] epoch_id: 43, batch_id: 200, cost: 0.002746, acc: 1.000000 -[Wed Oct 10 17:31:18 2018] epoch_id: 43, batch_id: 300, cost: 0.008316, acc: 0.992188 -[Wed Oct 10 17:31:21 2018] epoch_id: 43, batch_id: 400, cost: 0.009446, acc: 1.000000 -[Wed Oct 10 17:31:23 2018] epoch_id: 43, batch_id: 500, cost: 0.000336, acc: 1.000000 -[Wed Oct 10 17:31:25 2018] epoch_id: 43, batch_id: 600, cost: 0.000436, acc: 1.000000 -[Wed Oct 10 17:31:27 2018] epoch_id: 43, batch_id: 700, cost: 0.000142, acc: 1.000000 -[Wed Oct 10 17:31:30 2018] epoch_id: 43, batch_id: 800, cost: 0.001449, acc: 1.000000 -[Wed Oct 10 17:31:32 2018] epoch_id: 43, batch_id: 900, cost: 0.040274, acc: 0.992188 -[Wed Oct 10 17:31:34 2018] epoch_id: 43, batch_id: 1000, cost: 0.002314, acc: 1.000000 -[Wed Oct 10 17:31:36 2018] epoch_id: 43, batch_id: 1100, cost: 0.008140, acc: 0.992188 -[Wed Oct 10 17:31:39 2018] epoch_id: 43, batch_id: 1200, cost: 0.001320, acc: 1.000000 -[Wed Oct 10 17:31:41 2018] epoch_id: 43, batch_id: 1300, cost: 0.000427, acc: 1.000000 -[Wed Oct 10 17:31:43 2018] epoch_id: 43, batch_id: 1400, cost: 0.004985, acc: 1.000000 -[Wed Oct 10 17:31:46 2018] epoch_id: 43, batch_id: 1500, cost: 0.005165, acc: 1.000000 -[Wed Oct 10 17:31:48 2018] epoch_id: 43, batch_id: 1600, cost: 0.006397, acc: 1.000000 -[Wed Oct 10 17:31:50 2018] epoch_id: 43, batch_id: 1700, cost: 0.026334, acc: 0.984375 -[Wed Oct 10 17:31:54 2018] epoch_id: 43, batch_id: 1800, cost: 0.003058, acc: 1.000000 -[Wed Oct 10 17:31:56 2018] epoch_id: 43, batch_id: 1900, cost: 0.009215, acc: 1.000000 -[Wed Oct 10 17:31:58 2018] epoch_id: 43, batch_id: 2000, cost: 0.005750, acc: 1.000000 -[Wed Oct 10 17:32:01 2018] epoch_id: 43, batch_id: 2100, cost: 0.006973, acc: 1.000000 -[Wed Oct 10 17:32:03 2018] epoch_id: 43, batch_id: 2200, cost: 0.040183, acc: 0.984375 -[Wed Oct 10 17:32:05 2018] epoch_id: 43, batch_id: 2300, cost: 0.007980, acc: 0.992188 -[Wed Oct 10 17:32:07 2018] epoch_id: 43, batch_id: 2400, cost: 0.018794, acc: 0.992188 -[Wed Oct 10 17:32:10 2018] epoch_id: 43, batch_id: 2500, cost: 0.031288, acc: 0.984375 -[Wed Oct 10 17:32:12 2018] epoch_id: 43, batch_id: 2600, cost: 0.010219, acc: 0.992188 -[Wed Oct 10 17:32:14 2018] epoch_id: 43, batch_id: 2700, cost: 0.021514, acc: 0.984375 -[Wed Oct 10 17:32:17 2018] epoch_id: 43, batch_id: 2800, cost: 0.005614, acc: 1.000000 -[Wed Oct 10 17:32:19 2018] epoch_id: 43, batch_id: 2900, cost: 0.065875, acc: 0.984375 -[Wed Oct 10 17:32:21 2018] epoch_id: 43, batch_id: 3000, cost: 0.013279, acc: 0.992188 - -[Wed Oct 10 17:32:22 2018] epoch_id: 43, train_avg_cost: 0.011822, train_avg_acc: 0.996238 -[Wed Oct 10 17:32:23 2018] epoch_id: 43, dev_cost: 1.703876, accuracy: 0.8322 -[Wed Oct 10 17:32:24 2018] epoch_id: 43, test_cost: 1.724094, accuracy: 0.8315 - -[Wed Oct 10 17:32:32 2018] epoch_id: 44, batch_id: 0, cost: 0.003358, acc: 1.000000 -[Wed Oct 10 17:32:34 2018] epoch_id: 44, batch_id: 100, cost: 0.003024, acc: 1.000000 -[Wed Oct 10 17:32:37 2018] epoch_id: 44, batch_id: 200, cost: 0.038726, acc: 0.992188 -[Wed Oct 10 17:32:39 2018] epoch_id: 44, batch_id: 300, cost: 0.001766, acc: 1.000000 -[Wed Oct 10 17:32:41 2018] epoch_id: 44, batch_id: 400, cost: 0.005300, acc: 1.000000 -[Wed Oct 10 17:32:43 2018] epoch_id: 44, batch_id: 500, cost: 0.023175, acc: 0.992188 -[Wed Oct 10 17:32:46 2018] epoch_id: 44, batch_id: 600, cost: 0.002893, acc: 1.000000 -[Wed Oct 10 17:32:48 2018] epoch_id: 44, batch_id: 700, cost: 0.025870, acc: 0.976562 -[Wed Oct 10 17:32:50 2018] epoch_id: 44, batch_id: 800, cost: 0.019898, acc: 0.992188 -[Wed Oct 10 17:32:52 2018] epoch_id: 44, batch_id: 900, cost: 0.001718, acc: 1.000000 -[Wed Oct 10 17:32:55 2018] epoch_id: 44, batch_id: 1000, cost: 0.000221, acc: 1.000000 -[Wed Oct 10 17:32:57 2018] epoch_id: 44, batch_id: 1100, cost: 0.002172, acc: 1.000000 -[Wed Oct 10 17:32:59 2018] epoch_id: 44, batch_id: 1200, cost: 0.001158, acc: 1.000000 -[Wed Oct 10 17:33:02 2018] epoch_id: 44, batch_id: 1300, cost: 0.004667, acc: 1.000000 -[Wed Oct 10 17:33:04 2018] epoch_id: 44, batch_id: 1400, cost: 0.000685, acc: 1.000000 -[Wed Oct 10 17:33:06 2018] epoch_id: 44, batch_id: 1500, cost: 0.007730, acc: 1.000000 -[Wed Oct 10 17:33:08 2018] epoch_id: 44, batch_id: 1600, cost: 0.006694, acc: 1.000000 -[Wed Oct 10 17:33:11 2018] epoch_id: 44, batch_id: 1700, cost: 0.009508, acc: 0.992188 -[Wed Oct 10 17:33:13 2018] epoch_id: 44, batch_id: 1800, cost: 0.018037, acc: 0.992188 -[Wed Oct 10 17:33:15 2018] epoch_id: 44, batch_id: 1900, cost: 0.020902, acc: 0.976562 -[Wed Oct 10 17:33:18 2018] epoch_id: 44, batch_id: 2000, cost: 0.006977, acc: 0.992188 -[Wed Oct 10 17:33:20 2018] epoch_id: 44, batch_id: 2100, cost: 0.004821, acc: 1.000000 -[Wed Oct 10 17:33:22 2018] epoch_id: 44, batch_id: 2200, cost: 0.000209, acc: 1.000000 -[Wed Oct 10 17:33:25 2018] epoch_id: 44, batch_id: 2300, cost: 0.008764, acc: 0.992188 -[Wed Oct 10 17:33:27 2018] epoch_id: 44, batch_id: 2400, cost: 0.029171, acc: 0.992188 -[Wed Oct 10 17:33:29 2018] epoch_id: 44, batch_id: 2500, cost: 0.015028, acc: 0.992188 -[Wed Oct 10 17:33:31 2018] epoch_id: 44, batch_id: 2600, cost: 0.007096, acc: 1.000000 -[Wed Oct 10 17:33:33 2018] epoch_id: 44, batch_id: 2700, cost: 0.000547, acc: 1.000000 -[Wed Oct 10 17:33:36 2018] epoch_id: 44, batch_id: 2800, cost: 0.004024, acc: 1.000000 -[Wed Oct 10 17:33:38 2018] epoch_id: 44, batch_id: 2900, cost: 0.002191, acc: 1.000000 -[Wed Oct 10 17:33:40 2018] epoch_id: 44, batch_id: 3000, cost: 0.008875, acc: 1.000000 - -[Wed Oct 10 17:33:41 2018] epoch_id: 44, train_avg_cost: 0.012328, train_avg_acc: 0.996076 -[Wed Oct 10 17:33:42 2018] epoch_id: 44, dev_cost: 1.575702, accuracy: 0.8331 -[Wed Oct 10 17:33:43 2018] epoch_id: 44, test_cost: 1.573283, accuracy: 0.8313 - -[Wed Oct 10 17:33:52 2018] epoch_id: 45, batch_id: 0, cost: 0.002271, acc: 1.000000 -[Wed Oct 10 17:33:54 2018] epoch_id: 45, batch_id: 100, cost: 0.005500, acc: 1.000000 -[Wed Oct 10 17:33:56 2018] epoch_id: 45, batch_id: 200, cost: 0.001735, acc: 1.000000 -[Wed Oct 10 17:33:58 2018] epoch_id: 45, batch_id: 300, cost: 0.008910, acc: 1.000000 -[Wed Oct 10 17:34:01 2018] epoch_id: 45, batch_id: 400, cost: 0.010551, acc: 0.992188 -[Wed Oct 10 17:34:03 2018] epoch_id: 45, batch_id: 500, cost: 0.005958, acc: 1.000000 -[Wed Oct 10 17:34:05 2018] epoch_id: 45, batch_id: 600, cost: 0.012035, acc: 0.992188 -[Wed Oct 10 17:34:07 2018] epoch_id: 45, batch_id: 700, cost: 0.002110, acc: 1.000000 -[Wed Oct 10 17:34:10 2018] epoch_id: 45, batch_id: 800, cost: 0.014834, acc: 0.992188 -[Wed Oct 10 17:34:12 2018] epoch_id: 45, batch_id: 900, cost: 0.010944, acc: 0.992188 -[Wed Oct 10 17:34:14 2018] epoch_id: 45, batch_id: 1000, cost: 0.017574, acc: 0.992188 -[Wed Oct 10 17:34:16 2018] epoch_id: 45, batch_id: 1100, cost: 0.006877, acc: 1.000000 -[Wed Oct 10 17:34:19 2018] epoch_id: 45, batch_id: 1200, cost: 0.001731, acc: 1.000000 -[Wed Oct 10 17:34:21 2018] epoch_id: 45, batch_id: 1300, cost: 0.002963, acc: 1.000000 -[Wed Oct 10 17:34:23 2018] epoch_id: 45, batch_id: 1400, cost: 0.009798, acc: 1.000000 -[Wed Oct 10 17:34:25 2018] epoch_id: 45, batch_id: 1500, cost: 0.003309, acc: 1.000000 -[Wed Oct 10 17:34:28 2018] epoch_id: 45, batch_id: 1600, cost: 0.022402, acc: 0.984375 -[Wed Oct 10 17:34:30 2018] epoch_id: 45, batch_id: 1700, cost: 0.003854, acc: 1.000000 -[Wed Oct 10 17:34:32 2018] epoch_id: 45, batch_id: 1800, cost: 0.000418, acc: 1.000000 -[Wed Oct 10 17:34:35 2018] epoch_id: 45, batch_id: 1900, cost: 0.014512, acc: 0.992188 -[Wed Oct 10 17:34:37 2018] epoch_id: 45, batch_id: 2000, cost: 0.031922, acc: 0.992188 -[Wed Oct 10 17:34:39 2018] epoch_id: 45, batch_id: 2100, cost: 0.002671, acc: 1.000000 -[Wed Oct 10 17:34:42 2018] epoch_id: 45, batch_id: 2200, cost: 0.042934, acc: 0.984375 -[Wed Oct 10 17:34:44 2018] epoch_id: 45, batch_id: 2300, cost: 0.008559, acc: 1.000000 -[Wed Oct 10 17:34:46 2018] epoch_id: 45, batch_id: 2400, cost: 0.050518, acc: 0.984375 -[Wed Oct 10 17:34:48 2018] epoch_id: 45, batch_id: 2500, cost: 0.001887, acc: 1.000000 -[Wed Oct 10 17:34:50 2018] epoch_id: 45, batch_id: 2600, cost: 0.002196, acc: 1.000000 -[Wed Oct 10 17:34:54 2018] epoch_id: 45, batch_id: 2700, cost: 0.002765, acc: 1.000000 -[Wed Oct 10 17:34:56 2018] epoch_id: 45, batch_id: 2800, cost: 0.024691, acc: 0.992188 -[Wed Oct 10 17:34:59 2018] epoch_id: 45, batch_id: 2900, cost: 0.003790, acc: 1.000000 -[Wed Oct 10 17:35:01 2018] epoch_id: 45, batch_id: 3000, cost: 0.001317, acc: 1.000000 - -[Wed Oct 10 17:35:01 2018] epoch_id: 45, train_avg_cost: 0.012084, train_avg_acc: 0.996298 -[Wed Oct 10 17:35:02 2018] epoch_id: 45, dev_cost: 1.603634, accuracy: 0.8321 -[Wed Oct 10 17:35:03 2018] epoch_id: 45, test_cost: 1.609678, accuracy: 0.8291 - -[Wed Oct 10 17:35:12 2018] epoch_id: 46, batch_id: 0, cost: 0.002291, acc: 1.000000 -[Wed Oct 10 17:35:14 2018] epoch_id: 46, batch_id: 100, cost: 0.018703, acc: 0.992188 -[Wed Oct 10 17:35:16 2018] epoch_id: 46, batch_id: 200, cost: 0.004407, acc: 1.000000 -[Wed Oct 10 17:35:18 2018] epoch_id: 46, batch_id: 300, cost: 0.000953, acc: 1.000000 -[Wed Oct 10 17:35:21 2018] epoch_id: 46, batch_id: 400, cost: 0.000732, acc: 1.000000 -[Wed Oct 10 17:35:23 2018] epoch_id: 46, batch_id: 500, cost: 0.011275, acc: 0.992188 -[Wed Oct 10 17:35:25 2018] epoch_id: 46, batch_id: 600, cost: 0.009521, acc: 1.000000 -[Wed Oct 10 17:35:27 2018] epoch_id: 46, batch_id: 700, cost: 0.000671, acc: 1.000000 -[Wed Oct 10 17:35:30 2018] epoch_id: 46, batch_id: 800, cost: 0.000768, acc: 1.000000 -[Wed Oct 10 17:35:32 2018] epoch_id: 46, batch_id: 900, cost: 0.001357, acc: 1.000000 -[Wed Oct 10 17:35:34 2018] epoch_id: 46, batch_id: 1000, cost: 0.001384, acc: 1.000000 -[Wed Oct 10 17:35:37 2018] epoch_id: 46, batch_id: 1100, cost: 0.010220, acc: 0.992188 -[Wed Oct 10 17:35:39 2018] epoch_id: 46, batch_id: 1200, cost: 0.006540, acc: 1.000000 -[Wed Oct 10 17:35:41 2018] epoch_id: 46, batch_id: 1300, cost: 0.002771, acc: 1.000000 -[Wed Oct 10 17:35:44 2018] epoch_id: 46, batch_id: 1400, cost: 0.010623, acc: 0.992188 -[Wed Oct 10 17:35:46 2018] epoch_id: 46, batch_id: 1500, cost: 0.000798, acc: 1.000000 -[Wed Oct 10 17:35:48 2018] epoch_id: 46, batch_id: 1600, cost: 0.004519, acc: 1.000000 -[Wed Oct 10 17:35:50 2018] epoch_id: 46, batch_id: 1700, cost: 0.010096, acc: 1.000000 -[Wed Oct 10 17:35:53 2018] epoch_id: 46, batch_id: 1800, cost: 0.001868, acc: 1.000000 -[Wed Oct 10 17:35:55 2018] epoch_id: 46, batch_id: 1900, cost: 0.039460, acc: 0.984375 -[Wed Oct 10 17:35:57 2018] epoch_id: 46, batch_id: 2000, cost: 0.008906, acc: 1.000000 -[Wed Oct 10 17:35:59 2018] epoch_id: 46, batch_id: 2100, cost: 0.008440, acc: 0.992188 -[Wed Oct 10 17:36:02 2018] epoch_id: 46, batch_id: 2200, cost: 0.014774, acc: 0.992188 -[Wed Oct 10 17:36:05 2018] epoch_id: 46, batch_id: 2300, cost: 0.016775, acc: 0.992188 -[Wed Oct 10 17:36:07 2018] epoch_id: 46, batch_id: 2400, cost: 0.008999, acc: 0.992188 -[Wed Oct 10 17:36:10 2018] epoch_id: 46, batch_id: 2500, cost: 0.001394, acc: 1.000000 -[Wed Oct 10 17:36:12 2018] epoch_id: 46, batch_id: 2600, cost: 0.005627, acc: 1.000000 -[Wed Oct 10 17:36:14 2018] epoch_id: 46, batch_id: 2700, cost: 0.003667, acc: 1.000000 -[Wed Oct 10 17:36:16 2018] epoch_id: 46, batch_id: 2800, cost: 0.016338, acc: 0.992188 -[Wed Oct 10 17:36:19 2018] epoch_id: 46, batch_id: 2900, cost: 0.005622, acc: 1.000000 -[Wed Oct 10 17:36:21 2018] epoch_id: 46, batch_id: 3000, cost: 0.003068, acc: 1.000000 - -[Wed Oct 10 17:36:21 2018] epoch_id: 46, train_avg_cost: 0.011216, train_avg_acc: 0.996266 -[Wed Oct 10 17:36:22 2018] epoch_id: 46, dev_cost: 1.772260, accuracy: 0.8309 -[Wed Oct 10 17:36:23 2018] epoch_id: 46, test_cost: 1.783967, accuracy: 0.8284 - -[Wed Oct 10 17:36:32 2018] epoch_id: 47, batch_id: 0, cost: 0.001193, acc: 1.000000 -[Wed Oct 10 17:36:34 2018] epoch_id: 47, batch_id: 100, cost: 0.000584, acc: 1.000000 -[Wed Oct 10 17:36:36 2018] epoch_id: 47, batch_id: 200, cost: 0.001534, acc: 1.000000 -[Wed Oct 10 17:36:38 2018] epoch_id: 47, batch_id: 300, cost: 0.014105, acc: 0.992188 -[Wed Oct 10 17:36:41 2018] epoch_id: 47, batch_id: 400, cost: 0.000929, acc: 1.000000 -[Wed Oct 10 17:36:43 2018] epoch_id: 47, batch_id: 500, cost: 0.007649, acc: 0.992188 -[Wed Oct 10 17:36:45 2018] epoch_id: 47, batch_id: 600, cost: 0.009973, acc: 0.992188 -[Wed Oct 10 17:36:47 2018] epoch_id: 47, batch_id: 700, cost: 0.006471, acc: 1.000000 -[Wed Oct 10 17:36:49 2018] epoch_id: 47, batch_id: 800, cost: 0.002720, acc: 1.000000 -[Wed Oct 10 17:36:53 2018] epoch_id: 47, batch_id: 900, cost: 0.001402, acc: 1.000000 -[Wed Oct 10 17:36:55 2018] epoch_id: 47, batch_id: 1000, cost: 0.000697, acc: 1.000000 -[Wed Oct 10 17:36:57 2018] epoch_id: 47, batch_id: 1100, cost: 0.001998, acc: 1.000000 -[Wed Oct 10 17:36:59 2018] epoch_id: 47, batch_id: 1200, cost: 0.009035, acc: 0.992188 -[Wed Oct 10 17:37:02 2018] epoch_id: 47, batch_id: 1300, cost: 0.006139, acc: 1.000000 -[Wed Oct 10 17:37:04 2018] epoch_id: 47, batch_id: 1400, cost: 0.007283, acc: 1.000000 -[Wed Oct 10 17:37:06 2018] epoch_id: 47, batch_id: 1500, cost: 0.016960, acc: 0.992188 -[Wed Oct 10 17:37:08 2018] epoch_id: 47, batch_id: 1600, cost: 0.001158, acc: 1.000000 -[Wed Oct 10 17:37:11 2018] epoch_id: 47, batch_id: 1700, cost: 0.001425, acc: 1.000000 -[Wed Oct 10 17:37:13 2018] epoch_id: 47, batch_id: 1800, cost: 0.001285, acc: 1.000000 -[Wed Oct 10 17:37:15 2018] epoch_id: 47, batch_id: 1900, cost: 0.002734, acc: 1.000000 -[Wed Oct 10 17:37:17 2018] epoch_id: 47, batch_id: 2000, cost: 0.000576, acc: 1.000000 -[Wed Oct 10 17:37:20 2018] epoch_id: 47, batch_id: 2100, cost: 0.001285, acc: 1.000000 -[Wed Oct 10 17:37:22 2018] epoch_id: 47, batch_id: 2200, cost: 0.000798, acc: 1.000000 -[Wed Oct 10 17:37:24 2018] epoch_id: 47, batch_id: 2300, cost: 0.059468, acc: 0.984375 -[Wed Oct 10 17:37:26 2018] epoch_id: 47, batch_id: 2400, cost: 0.004177, acc: 1.000000 -[Wed Oct 10 17:37:29 2018] epoch_id: 47, batch_id: 2500, cost: 0.001915, acc: 1.000000 -[Wed Oct 10 17:37:31 2018] epoch_id: 47, batch_id: 2600, cost: 0.000491, acc: 1.000000 -[Wed Oct 10 17:37:33 2018] epoch_id: 47, batch_id: 2700, cost: 0.001129, acc: 1.000000 -[Wed Oct 10 17:37:35 2018] epoch_id: 47, batch_id: 2800, cost: 0.000988, acc: 1.000000 -[Wed Oct 10 17:37:38 2018] epoch_id: 47, batch_id: 2900, cost: 0.024258, acc: 0.992188 -[Wed Oct 10 17:37:40 2018] epoch_id: 47, batch_id: 3000, cost: 0.000902, acc: 1.000000 - -[Wed Oct 10 17:37:41 2018] epoch_id: 47, train_avg_cost: 0.011536, train_avg_acc: 0.996225 -[Wed Oct 10 17:37:42 2018] epoch_id: 47, dev_cost: 2.235156, accuracy: 0.8326 -[Wed Oct 10 17:37:43 2018] epoch_id: 47, test_cost: 2.289617, accuracy: 0.8318 - -[Wed Oct 10 17:37:51 2018] epoch_id: 48, batch_id: 0, cost: 0.007975, acc: 1.000000 -[Wed Oct 10 17:37:54 2018] epoch_id: 48, batch_id: 100, cost: 0.000491, acc: 1.000000 -[Wed Oct 10 17:37:56 2018] epoch_id: 48, batch_id: 200, cost: 0.015161, acc: 0.992188 -[Wed Oct 10 17:37:58 2018] epoch_id: 48, batch_id: 300, cost: 0.030692, acc: 0.992188 -[Wed Oct 10 17:38:00 2018] epoch_id: 48, batch_id: 400, cost: 0.016749, acc: 0.992188 -[Wed Oct 10 17:38:03 2018] epoch_id: 48, batch_id: 500, cost: 0.005637, acc: 1.000000 -[Wed Oct 10 17:38:05 2018] epoch_id: 48, batch_id: 600, cost: 0.014267, acc: 0.992188 -[Wed Oct 10 17:38:07 2018] epoch_id: 48, batch_id: 700, cost: 0.002352, acc: 1.000000 -[Wed Oct 10 17:38:10 2018] epoch_id: 48, batch_id: 800, cost: 0.002758, acc: 1.000000 -[Wed Oct 10 17:38:12 2018] epoch_id: 48, batch_id: 900, cost: 0.000367, acc: 1.000000 -[Wed Oct 10 17:38:14 2018] epoch_id: 48, batch_id: 1000, cost: 0.003479, acc: 1.000000 -[Wed Oct 10 17:38:16 2018] epoch_id: 48, batch_id: 1100, cost: 0.006107, acc: 1.000000 -[Wed Oct 10 17:38:19 2018] epoch_id: 48, batch_id: 1200, cost: 0.000989, acc: 1.000000 -[Wed Oct 10 17:38:21 2018] epoch_id: 48, batch_id: 1300, cost: 0.000442, acc: 1.000000 -[Wed Oct 10 17:38:23 2018] epoch_id: 48, batch_id: 1400, cost: 0.002006, acc: 1.000000 -[Wed Oct 10 17:38:25 2018] epoch_id: 48, batch_id: 1500, cost: 0.022174, acc: 0.992188 -[Wed Oct 10 17:38:28 2018] epoch_id: 48, batch_id: 1600, cost: 0.004670, acc: 1.000000 -[Wed Oct 10 17:38:30 2018] epoch_id: 48, batch_id: 1700, cost: 0.014862, acc: 0.992188 -[Wed Oct 10 17:38:32 2018] epoch_id: 48, batch_id: 1800, cost: 0.004648, acc: 1.000000 -[Wed Oct 10 17:38:36 2018] epoch_id: 48, batch_id: 1900, cost: 0.035342, acc: 0.992188 -[Wed Oct 10 17:38:38 2018] epoch_id: 48, batch_id: 2000, cost: 0.018578, acc: 0.992188 -[Wed Oct 10 17:38:40 2018] epoch_id: 48, batch_id: 2100, cost: 0.003790, acc: 1.000000 -[Wed Oct 10 17:38:42 2018] epoch_id: 48, batch_id: 2200, cost: 0.026731, acc: 0.984375 -[Wed Oct 10 17:38:45 2018] epoch_id: 48, batch_id: 2300, cost: 0.003608, acc: 1.000000 -[Wed Oct 10 17:38:47 2018] epoch_id: 48, batch_id: 2400, cost: 0.005601, acc: 1.000000 -[Wed Oct 10 17:38:49 2018] epoch_id: 48, batch_id: 2500, cost: 0.000833, acc: 1.000000 -[Wed Oct 10 17:38:52 2018] epoch_id: 48, batch_id: 2600, cost: 0.004157, acc: 1.000000 -[Wed Oct 10 17:38:54 2018] epoch_id: 48, batch_id: 2700, cost: 0.010146, acc: 0.992188 -[Wed Oct 10 17:38:56 2018] epoch_id: 48, batch_id: 2800, cost: 0.001127, acc: 1.000000 -[Wed Oct 10 17:38:58 2018] epoch_id: 48, batch_id: 2900, cost: 0.004332, acc: 1.000000 -[Wed Oct 10 17:39:01 2018] epoch_id: 48, batch_id: 3000, cost: 0.004895, acc: 1.000000 - -[Wed Oct 10 17:39:01 2018] epoch_id: 48, train_avg_cost: 0.010959, train_avg_acc: 0.996475 -[Wed Oct 10 17:39:02 2018] epoch_id: 48, dev_cost: 1.764490, accuracy: 0.8343 -[Wed Oct 10 17:39:03 2018] epoch_id: 48, test_cost: 1.826369, accuracy: 0.8296 - -[Wed Oct 10 17:39:12 2018] epoch_id: 49, batch_id: 0, cost: 0.004527, acc: 1.000000 -[Wed Oct 10 17:39:14 2018] epoch_id: 49, batch_id: 100, cost: 0.003537, acc: 1.000000 -[Wed Oct 10 17:39:16 2018] epoch_id: 49, batch_id: 200, cost: 0.034318, acc: 0.992188 -[Wed Oct 10 17:39:19 2018] epoch_id: 49, batch_id: 300, cost: 0.024897, acc: 0.992188 -[Wed Oct 10 17:39:21 2018] epoch_id: 49, batch_id: 400, cost: 0.002212, acc: 1.000000 -[Wed Oct 10 17:39:23 2018] epoch_id: 49, batch_id: 500, cost: 0.012678, acc: 0.992188 -[Wed Oct 10 17:39:25 2018] epoch_id: 49, batch_id: 600, cost: 0.006081, acc: 1.000000 -[Wed Oct 10 17:39:28 2018] epoch_id: 49, batch_id: 700, cost: 0.004294, acc: 1.000000 -[Wed Oct 10 17:39:30 2018] epoch_id: 49, batch_id: 800, cost: 0.000339, acc: 1.000000 -[Wed Oct 10 17:39:32 2018] epoch_id: 49, batch_id: 900, cost: 0.006350, acc: 0.992188 -[Wed Oct 10 17:39:35 2018] epoch_id: 49, batch_id: 1000, cost: 0.002183, acc: 1.000000 -[Wed Oct 10 17:39:37 2018] epoch_id: 49, batch_id: 1100, cost: 0.006977, acc: 1.000000 -[Wed Oct 10 17:39:39 2018] epoch_id: 49, batch_id: 1200, cost: 0.003140, acc: 1.000000 -[Wed Oct 10 17:39:41 2018] epoch_id: 49, batch_id: 1300, cost: 0.003361, acc: 1.000000 -[Wed Oct 10 17:39:44 2018] epoch_id: 49, batch_id: 1400, cost: 0.002039, acc: 1.000000 -[Wed Oct 10 17:39:46 2018] epoch_id: 49, batch_id: 1500, cost: 0.001850, acc: 1.000000 -[Wed Oct 10 17:39:48 2018] epoch_id: 49, batch_id: 1600, cost: 0.045419, acc: 0.992188 -[Wed Oct 10 17:39:50 2018] epoch_id: 49, batch_id: 1700, cost: 0.000883, acc: 1.000000 -[Wed Oct 10 17:39:53 2018] epoch_id: 49, batch_id: 1800, cost: 0.002086, acc: 1.000000 -[Wed Oct 10 17:39:55 2018] epoch_id: 49, batch_id: 1900, cost: 0.014964, acc: 0.992188 -[Wed Oct 10 17:39:57 2018] epoch_id: 49, batch_id: 2000, cost: 0.002001, acc: 1.000000 -[Wed Oct 10 17:39:59 2018] epoch_id: 49, batch_id: 2100, cost: 0.013663, acc: 0.984375 -[Wed Oct 10 17:40:02 2018] epoch_id: 49, batch_id: 2200, cost: 0.013116, acc: 0.992188 -[Wed Oct 10 17:40:04 2018] epoch_id: 49, batch_id: 2300, cost: 0.002713, acc: 1.000000 -[Wed Oct 10 17:40:06 2018] epoch_id: 49, batch_id: 2400, cost: 0.004193, acc: 1.000000 -[Wed Oct 10 17:40:08 2018] epoch_id: 49, batch_id: 2500, cost: 0.001507, acc: 1.000000 -[Wed Oct 10 17:40:11 2018] epoch_id: 49, batch_id: 2600, cost: 0.034837, acc: 0.992188 -[Wed Oct 10 17:40:13 2018] epoch_id: 49, batch_id: 2700, cost: 0.006245, acc: 1.000000 -[Wed Oct 10 17:40:15 2018] epoch_id: 49, batch_id: 2800, cost: 0.003659, acc: 1.000000 -[Wed Oct 10 17:40:17 2018] epoch_id: 49, batch_id: 2900, cost: 0.002175, acc: 1.000000 -[Wed Oct 10 17:40:19 2018] epoch_id: 49, batch_id: 3000, cost: 0.000767, acc: 1.000000 - -[Wed Oct 10 17:40:20 2018] epoch_id: 49, train_avg_cost: 0.011233, train_avg_acc: 0.996326 -[Wed Oct 10 17:40:21 2018] epoch_id: 49, dev_cost: 1.652680, accuracy: 0.8353 -[Wed Oct 10 17:40:22 2018] epoch_id: 49, test_cost: 1.685406, accuracy: 0.8324 - diff --git a/PaddleRec/text_matching_on_quora/configs/__init__.py b/PaddleRec/text_matching_on_quora/configs/__init__.py deleted file mode 100755 index 5711d76f..00000000 --- a/PaddleRec/text_matching_on_quora/configs/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .cdssm import cdssm_base -from .dec_att import decatt_glove -from .sse import sse_base -from .infer_sent import infer_sent_v1 -from .infer_sent import infer_sent_v2 diff --git a/PaddleRec/text_matching_on_quora/configs/basic_config.py b/PaddleRec/text_matching_on_quora/configs/basic_config.py deleted file mode 100755 index 70c2ee06..00000000 --- a/PaddleRec/text_matching_on_quora/configs/basic_config.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import print_function - - -class config(object): - def __init__(self): - self.batch_size = 128 - self.epoch_num = 50 - - self.optimizer_type = 'adam' # sgd, adagrad - - # pretrained word embedding - self.use_pretrained_word_embedding = True - # when employing pretrained word embedding, - # out of vocabulary words' embedding is initialized with uniform or normal numbers - self.OOV_fill = 'uniform' - self.embedding_norm = False - - # or else, use padding and masks for sequence data - self.use_lod_tensor = True - - # lr = lr * lr_decay after each epoch - self.lr_decay = 1 - self.learning_rate = 0.001 - - self.save_dirname = 'model_dir' - - self.train_samples_num = 384348 - self.duplicate_data = False - - self.metric_type = ['accuracy'] - - def list_config(self): - print("config", self.__dict__) - - def has_member(self, var_name): - return var_name in self.__dict__ - - -if __name__ == "__main__": - basic = config() - basic.list_config() - basic.ahh = 2 - basic.list_config() diff --git a/PaddleRec/text_matching_on_quora/configs/cdssm.py b/PaddleRec/text_matching_on_quora/configs/cdssm.py deleted file mode 100755 index b773d4ca..00000000 --- a/PaddleRec/text_matching_on_quora/configs/cdssm.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import basic_config - - -def cdssm_base(): - """ - set configs - """ - config = basic_config.config() - config.learning_rate = 0.001 - config.save_dirname = "model_dir" - config.use_pretrained_word_embedding = True - config.dict_dim = 40000 # approx_vocab_size - - # net config - config.emb_dim = 300 - config.kernel_size = 5 - config.kernel_count = 300 - config.fc_dim = 128 - config.mlp_hid_dim = [128, 128] - config.droprate_conv = 0.1 - config.droprate_fc = 0.1 - config.class_dim = 2 - - return config diff --git a/PaddleRec/text_matching_on_quora/configs/dec_att.py b/PaddleRec/text_matching_on_quora/configs/dec_att.py deleted file mode 100755 index dbb9977e..00000000 --- a/PaddleRec/text_matching_on_quora/configs/dec_att.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import basic_config - - -def decatt_glove(): - """ - use config 'decAtt_glove' in the paper 'Neural Paraphrase Identification of Questions with Noisy Pretraining' - """ - config = basic_config.config() - config.learning_rate = 0.05 - config.save_dirname = "model_dir" - config.use_pretrained_word_embedding = True - config.dict_dim = 40000 # approx_vocab_size - config.metric_type = ['accuracy', 'accuracy_with_threshold'] - config.optimizer_type = 'sgd' - config.lr_decay = 1 - config.use_lod_tensor = False - config.embedding_norm = False - config.OOV_fill = 'uniform' - config.duplicate_data = False - - # net config - config.emb_dim = 300 - config.proj_emb_dim = 200 #TODO: has project? - config.num_units = [400, 200] - config.word_embedding_trainable = True - config.droprate = 0.1 - config.share_wight_btw_seq = True - config.class_dim = 2 - - return config - - -def decatt_word(): - """ - use config 'decAtt_glove' in the paper 'Neural Paraphrase Identification of Questions with Noisy Pretraining' - """ - config = basic_config.config() - config.learning_rate = 0.05 - config.save_dirname = "model_dir" - config.use_pretrained_word_embedding = False - config.dict_dim = 40000 # approx_vocab_size - config.metric_type = ['accuracy', 'accuracy_with_threshold'] - config.optimizer_type = 'sgd' - config.lr_decay = 1 - config.use_lod_tensor = False - config.embedding_norm = False - config.OOV_fill = 'uniform' - config.duplicate_data = False - - # net config - config.emb_dim = 300 - config.proj_emb_dim = 200 #TODO: has project? - config.num_units = [400, 200] - config.word_embedding_trainable = True - config.droprate = 0.1 - config.share_wight_btw_seq = True - config.class_dim = 2 - - return config diff --git a/PaddleRec/text_matching_on_quora/configs/infer_sent.py b/PaddleRec/text_matching_on_quora/configs/infer_sent.py deleted file mode 100755 index 896672c4..00000000 --- a/PaddleRec/text_matching_on_quora/configs/infer_sent.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import basic_config - - -def infer_sent_v1(): - """ - set configs - """ - config = basic_config.config() - config.learning_rate = 0.1 - config.lr_decay = 0.99 - config.optimizer_type = 'sgd' - config.save_dirname = "model_dir" - config.use_pretrained_word_embedding = True - config.dict_dim = 40000 # approx_vocab_size - config.class_dim = 2 - - # net config - config.emb_dim = 300 - config.droprate_lstm = 0.0 - config.droprate_fc = 0.0 - config.word_embedding_trainable = False - config.rnn_hid_dim = 2048 - config.mlp_non_linear = False - - return config - - -def infer_sent_v2(): - """ - use our own config - """ - config = basic_config.config() - config.learning_rate = 0.0002 - config.lr_decay = 0.99 - config.optimizer_type = 'adam' - config.save_dirname = "model_dir" - config.use_pretrained_word_embedding = True - config.dict_dim = 40000 # approx_vocab_size - config.class_dim = 2 - - # net config - config.emb_dim = 300 - config.droprate_lstm = 0.0 - config.droprate_fc = 0.2 - config.word_embedding_trainable = False - config.rnn_hid_dim = 2048 - config.mlp_non_linear = True - - return config diff --git a/PaddleRec/text_matching_on_quora/configs/sse.py b/PaddleRec/text_matching_on_quora/configs/sse.py deleted file mode 100755 index 4966465f..00000000 --- a/PaddleRec/text_matching_on_quora/configs/sse.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import basic_config - - -def sse_base(): - """ - use config in the paper 'Shortcut-Stacked Sentence Encoders for Multi-Domain Inference' - """ - config = basic_config.config() - config.learning_rate = 0.0002 - config.lr_decay = 0.7 - config.save_dirname = "model_dir" - config.use_pretrained_word_embedding = True - config.dict_dim = 40000 # approx_vocab_size - config.metric_type = ['accuracy'] - config.optimizer_type = 'adam' - config.use_lod_tensor = True - config.embedding_norm = False - config.OOV_fill = 'uniform' - config.duplicate_data = False - - # net config - config.emb_dim = 300 - config.rnn_hid_dim = [512, 1024, 2048] - config.fc_dim = [1600, 1600] - config.droprate_lstm = 0.0 - config.droprate_fc = 0.1 - config.class_dim = 2 - - return config diff --git a/PaddleRec/text_matching_on_quora/data/prepare_quora_data.sh b/PaddleRec/text_matching_on_quora/data/prepare_quora_data.sh deleted file mode 100755 index 111c2b88..00000000 --- a/PaddleRec/text_matching_on_quora/data/prepare_quora_data.sh +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Please download the Quora dataset firstly from https://drive.google.com/file/d/0B0PlTAo--BnaQWlsZl9FZ3l1c28/view?usp=sharing -# to the ROOT_DIR: $HOME/.cache/paddle/dataset - -DATA_DIR=$HOME/.cache/paddle/dataset -wget --directory-prefix=$DATA_DIR http://nlp.stanford.edu/data/glove.840B.300d.zip - -unzip $DATA_DIR/glove.840B.300d.zip - -# The finally dataset dir should be like - -# $HOME/.cache/paddle/dataset -# |- Quora_question_pair_partition -# |- train.tsv -# |- test.tsv -# |- dev.tsv -# |- readme.txt -# |- wordvec.txt -# |- glove.840B.300d.txt diff --git a/PaddleRec/text_matching_on_quora/imgs/README.md b/PaddleRec/text_matching_on_quora/imgs/README.md deleted file mode 100644 index 60f55a85..00000000 --- a/PaddleRec/text_matching_on_quora/imgs/README.md +++ /dev/null @@ -1 +0,0 @@ -Image files for this model: text_matching_on_quora diff --git a/PaddleRec/text_matching_on_quora/imgs/models_test_acc.png b/PaddleRec/text_matching_on_quora/imgs/models_test_acc.png deleted file mode 100644 index 12b76682123111235f9fb85572431e2e36ce5334..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 136935 zcmeGE_dA>K|2}~0P_3d<)$T%x(NcT0RMp;MYirM_S-VTM_EvkZ7_o!cs`eg12ukdT z6(c11-d@k+b9{gMe*b}YjyQ6K9QS>@uE+g&oagyCul&+fSEQw8qNbprpjCSNTAPA` z5*S`;y?O6lUBSRz z2V(8+_1@Kr!s)%cizCF{(eA?oPb*h9JBafGA^vCl&v+l$y1Tnb2nhUtKfn)hwGk*> zVcnsictD}_`jzgd)U7$MPjPzazq`KHg6mCp>=2V4{goG zg`pv=opdn7#q6gK-rfSg2H(^QdCGTr^fZ?%Hp!d(_U&G7w4l{SkNZjHAab3 z=*VsGB*}z7Q3bf-^M_PSeU;>YLyAn|O^S>EjoHE|ZvHpgd3X8qf1}S-k0~zyH7hnE2YW;TW{|@b@$cE6&{;2u(H4Br^n z(ZFsw|1ambj8B!n|Mr89b!K|*CD%LZXa8lQg|_<_eyZQ1r`IFmf6y|I=cy#BZ_)nm zZ|U5^!Zkiu%&i`ACP#57;Q6Qh<(*NF3{9k^g z`2TrNyFUYa*DHH%X(~87+jvn@xZHj4zuNJcPB%I>$3F+6+iqSqnZB4gDM*{7SjjQR{DsAuSqQ>yW6; z=C8}2Sp3bGl0uB>lZPi2aMaH^zj88P{<0wP_$r4y;a&=2xa&ElM%{wH=dJMC-4!=% z#zIM{oGz#bEwpt{`HBI>xt;BX)GbBOZ81_T*tTAM2^=Kz()7JREmo3JOjWt&wA+g9 zQTg@w!vmqpf_%>pI4Pa=49R%HpV;0{b_pyZ#=#l$7*}q5MVFO^vS4+viCay7TicJ? zFb4DNr&lRjvHz>BpXvU&*2lqJ-gg|u$8#T%Ng4=&dR^{Fsq8)fQ;AwPw%+vUEFv5Y zZ9LhJm$r$x-4&(l3dp9}hBw`PCnS&H22A%U{69iq2pOp!ehE?hAtm-9~RWD*UM1)Yw?OXkb9o z;iwZT&VAJs%T_L~q8uk>6=Gs#_2aOPaj1JHPX&+t`Elv)r;(r^PQ9L6nj3dCH1A%f zSh#ojGq4&jsa)>feHwV;DsQ|-EBe&=uh7ORo;qmGuS!)$hIZTd7z`=wun9hO}sB(paapZNMT?A_zFbxq0k*o^Zdzo<_x>_-{Z zTJ^=jcXZ@ahAJ-66I-jVz~Ps#KvA18r0>>PMxdDjvq)Xj`4wD0zk(A4G15q%i#Z9j|q^e?t98s_BCI+;aBhonpB`L`J6bo+Ak-o8D$u&WGr)Eb>T zdb$YJs^l}A;lD5$&3}p&|2yKZU)<0nY`h{NJN7jm6i-;QaHiGiyTPJ4v!roLhM@TjX?PE%zL0u`*E%b6);TLBWDa<={|e@lA1E9ncw6WanN- z`zD!PtutJ&b>b=3>3@7c*kqnV-@ct)cLK&vj>*+Cz{*s*9%!Ul>j#ajnl)NH{8q6 z;$;7@Zr^RKR_|4&8D+S$-i$1%jWStXA2>GMtbgR;d;o$P=|2xF{by^3%-Q=uhFWxO zh}$tv5aCtR^mNTylxcZTEN5{>e{TsE1s%$IPNI(3i|uU3Ki1HSPAT5W2vK_*19M zp>1e{g<#5r$BHV4iC5azFJT5-$55Hr`OH_;%+d{dM9JIqklDgL;wk+nwD(>}*w%Qn zSYKZa84_3=pc)cztQCC+r?qVA)nmC*bL(>l^MwZ#EI}q^y$g_~%F9-)PaR!SVlE<- zsU4N)oP?@V`Z*~T3HnrDYh=&9HCYKJN&GYZoMJhU&S&$UIRE7g1G>B4{aEmn;3)^A z;NG7=(w&!rJ`S6Y6{($_+SV%%MQ^Z(sLr83HOXfb?j#s2_vgF3^)Y@ zf=oUJeN~LmU7V@u^;@EiCpPC00#KuQA*o_o=(&d48kAy534Fdxlpye7w$2AdnHJdG zFnE?2Q-BtjU{Xpx8C4OaItHDsq!_!K0l7q*J>A(>$lJGgQ$;|;XgrTQVd0&JFIM<` zEf#{FN1*xqXK4b;FL_Nt$bj=M6-8enWXj z*47bPFOe(VmOud%#nh5Ug$%|SIT%8Ha%$=d{6_PZpm=C#JmKfZ?`8zqjD%^~msxsx z$u~MNE>EpyF1aqRFwNw!`@L+wL{+;W`D_n6Z~jpzU2AXo)Se0J$)VN&-VMDs0eZBg z#NzGU`)N0C19iA;Y1Kq(_`n|6s6$xd<dfb-+aP{|VTu^dkes}R?!L4Ud7-~Rl0t(GRC*Tv!%cJboH zyKHP_S@Oa5EB#44W&U_L?I9LbK*jE-`Hkv^zO8LxiGFc=M~BBslFbG?`8ZfmP%zme zl^&#Td$2aLF^7Pk5s8@1&EoAj#3r=3Z1n78A0s6t^R}H&6)z4y`Ot6X48%P&G>{6QO9T1l%{^s?amEFhNq$& zpN?G=&aiayDUXFzdSW@5i&=bsZ{3h75tWnnB3+Efs;$M5miyB{ik_Z1M~OIe&iBnS z@S@ke$2*wI(}?y*dD24vtFu5Je;Wf}MxGPzSXVL;i1m}W+mHrB$>4P8TnGR77fGD< zUzI9L;|A3{NiU{1cP=SP#D?lw&e`|Ga4-p3hwbg}GdH#bl1WCjj!6VyH8$M(Q$#hE zx+1|nZJ;LFt2b^Sxib8+Lj{`a+D`j=d)@a3L>)%571Yu+L#w`=08X`3xv3z3MZ+oTNIsyXrA!2magF)q;i0qP~qS#&s;lbZp0sEym=ub1Mev z42D2qtm@*RR9D@)W%s(f_g8Yt`7a2vx8*Ra)H;tfZ-!-k@yd|lnXf81&qVVz4p!m2 zv$r_K&Zc5xYkZHbe%$0bt3}=2U$`^@Ps3nb@6q=d>YCu|zeN5^KE)Ol@t8Q2M4|9` z4D?bTULyi!i!@}48Z}nH^M@hf=a9#^nlzsUpt+jU?9g!8Dij3Ve`##iGve_VX>mh@ z0IRdXn1Eyd{Z1Le+p5E%WNG6hSfjmTwb2A|K$QN|;B@#lxbb7oZ3wHw?>AfYNQEX@ za-(3i=cUAstV@CH1=;KY&#HsbVl$5wef5`D?2m#N7z+l$*BW=^JOY>Sbhx4PP=V){ zW172IxqP)_SVWezxdoH%dd&NW9G}krww~#so-EOtko2?=sl}j^CTQv{3?YN6wer3+=6_ZFTi{ ze@GToNG~6Tgqg$KhNQvj1v=JMhH$h`aaqF#PiJSR9hpQVtmQ;jTlU;mipj{xsC7c+ zqnmt@l7q*)OTspTEK1t$?$vCc7A~Z@_HtJmm7de#^gxm~2Pda;41d#scBw%{c8zSp zmEFsis|A3dA)?%>-`jl^JQVhOB^Lu!bzyf)0X*yNv65%SB~xXM2e&bK4BVnkbsL+# znZxfPJ}<(}%i)6!4UvS5$qIqrg17f;Je*=i?_6IlmxRhm6|fxk4i6bF>N=3_82R&s zgx}D2#NF&Xyr!k<_tWnzA}n4Q5D^a}o76gGiMn&vbky274s&~4cOQnA2kIQz90+|x zI)o3daYkqtX-MZa8tIIw$;fOVeBv9kvyJ>!fwlXtl|!Q-)aZMti?dR(+OuzIF}Bf) z_Du6lUxe`NZ6oM~ciG?>Ab=gw>V&HS5EEft)7aiV8&KAv*Oc)E4kwftO>BkU2gFQ9 zfyT_#BeZCBil6_)QkwW4yF@iwhQ7XD`97Xu$tV6^sLx;uh^(V-Hzzv!698~n#G%A= zY%DF-G~K&l!a

Y}9733 z7e#-Pzr(3o2pQB|m$tk-+sJ)4$GyA4`^2WKQIKDxI~vAn;^HLtgQ3@%-@0d69;V z<+V2oV)E!gT%~V~k(@mV(>qTw{QmOp=#n z6NFMGyzo4ROaFji0ia)v4UojNY_aqrY!LwK*i$A%*@1HNf=i>_jErx_Y?;tmOm?R~E}+1VA)9*KTe4#3kHl6tjn1-+hF zUBh;n4mU*=Jjp)EkLkXRplMBBgnVo)$vv=k|8CJFFi z03!gPz|v_^LS%7qv3FoV@Ad2Nm6erDvwy9t^facWF>yN8c4M3!MDu`?yz>O5G#baP zbMP-zK;%;@*xX+)mQxK%^7;4Y7p*grN$SFdZH#NZ`#VLeoy1!BuLXf#AeauJG$dZ^ zQFeX%_AspYaB~&&i#CGhMUtAtdjrulpn##BS%kswy=+&1xDy-fTU^Ixt5)odk0}j7 zm)fA3&l(h?8XIp&V}~%VMJ07|l+$^=L;OW&sd~__=Mj|%eSpwC6zqB!IcC;aVto=! zc>3TELhwG#VRT@jm(|Np<$U9c=w`*gKi{&*jHV}L&3f-c&)F2-un9UqDU=JX(S)_U z$}@)R!@WJ$vt>XDeOpUoD@&qJ7Jo29k*P7ct^0#IM{s8TM+wbx?u!504gdtp~&{W+l1#w5F} z3#AJ?-f>xB+#qcuuuQ7c3A|dp)l@W2Ks5<5KPYN^^k@d#$jU*SUX9xyQ52@V7uw!6 zTLO?xrrJ>7UCN*^N3xt=QKWQ!V^GHAi%;G~L*TyUxg150Sb)SIB5*wWd4^EQ(pG>g zmkA7Mzf5a;=mHy+CF^c1MKY(mIb0Xpr|FbA$zA*?*==Z6w)g#e?@O0e_4w`CqDomW z5r>WP9+dH=fa&qFoSwNaNAppI7q>)09r)q`P)=~+y6|boNj~^&b})N@fw#A}5yG>S zzkWI9URpupMz4;apI;8@`}glp1qHPY4GkwyqV6{)G(WaMZbD&?_{Us5;!L&f3%9VAkd9UEVpye8_20ml9>5hD4UTMnm1xTrOe@TVZYY6p~;(#)PyrSjg~sh^cd zie+uGRmE@1UBNyM;Zqh4N>7fnguL(bJS~A$owDsJMX>Nelu&sMyRuIl2z?Ef$$E|$s0QHq$*F}cVhkn z#+%vLw=%!Uhv0+NN~2Qrm54aotS^hXJuM=U8<10sYCOg3%_K`HZ8P>5fBZYg!%LhT zuJ!th++%#!%GU)O0J!+qe-?QGUHWj!194+tl4&3`6E(;f7*0)K>}d8Mg2iYsLx#yNX4#b=G^h zp|ZGnCC2z6;jm0E5Lh8d84l5zW)DHur?U!CPQ^c>9x3Mf%uy*j+m_KkUNyK#T8 znB+qw;Q8n%sA%qI0!dBb+O=?SOYhJSjEz++^9w_D4eD@v4z>32aNOu>bJh|CmP0a6 z(|iC84T0WLXZ%Ik`<8b&p+*Tv3eVU(3ZdQGUnU^AsMx0dtoB*$6H1>x(enLJLopWh z(g3m-7CrtcuWRA0IW6Oe3Q;2z8dZsaCeO8zSKj;$nE&yCDmb)Z=#mxmtof@(!q-hl zMDwNQ`GywbU>~y(iKfLv8YM>V?G1XG>|B1ek~;|VQBv*M77--dxRmSjEhO^nx{H|4 zO^Sw|2v*4_O8%#Z@!&gPiU{$Q-#(|+f6~0rMIV>`a!X1YLz?gj=Mmt8I@`}6zi>21 zX1@C7e$I`7Og0Ss6c*_e@RSW2y`ZF|^Z|ethhD^OM?1}H)%xB_LHdnNr4nZjZ{Is7 zc|v*%>6(F3qi{rBCanC-Am9s{PX^vJoAniX%YdrDh%MFAFiOYB^a9dRWPNM%)Nc4B zP0~GpSb$)rC4YUy^R4AVftUIY7<>lE9RaGpMM~d}3xMuy23hq4uGo{tnB7Me=7cR@ zH-qVIH*n(kIRTETlOMpSjjx(*{9R0_bT67{KzBVHB?BM`2XY^ z<=JGXx}gn+BAQF*a^WT{%2u{MRa?u<(Brq)vo%mC z>3`drZtETG?RRh9%*@Vy#40wtH^L4FbjfBi$#)=466`ha4;OKo$pyh0wM`q`M@Ym| zVF&0NI>ahw>{ojRDSz*n7bfV+mv#uuTKS4>onFv!V7Rbd2(Kc z*aOch8k>yo?C2+sM|hZ#+-myAn9SJt3-w$$^sdUblslDR(sQfCO( z^bYlpt}ESIi@LAW?MjOhIBN1u5=11+KT4Pqwqw(%ftkj|8k@&g@{x|sp|xeDWUqU7 z%|OSSt=tI%ix{U7JMiJ;=P`>KXOcR0-kF7C(Y@}V4|!VSCf_jFT$bUJ!D4m)+KPKa zHAE#Lz_iA^$d@T&n^$Qr;@X%J!;xmBMTcarK~UmEHow2zc%H`3+}?T2*DdWleaf$P zzD~XBJdPi0-UCZ|bzjTnd&YTCH7{J1r3Cx4-dq{m`V4<03ic)K3W#5)hN8|iWKE?> zK4!#>k)9FiQ(i+3*+9d&yJC*N8yXMotti0s&3ac{g&$)>95Fn#@c_ueU$;Z4umbu{ zW5v98nrO@V%b9oe@AN zvhM$#HIO2TMBu(&=N~>;9|HoPu6J;7a%;K@f3`nVTwXrxk}+XFk*gH@!frHIpYP^O zjOZ+{T*$Wo!=yrXS(Jrs=*F#FG?#*2@Y7P-dyOS4oCQB?H-4k7O{Ja6vm2=D@pZv5 z^(`+$t(^tK{3<^sc^u-2FLwI*gH3mkHvug1-Mp&WxlNsZB|*4O4VX`lL0TgH&6ONk za$>$Y7?;SGvn#x+Y8^|U{WW=h7wJYu_^T%8G*-*4pY9baUdzW0fW7U4F3S5uzcnRC z4S`?X3ZSNe1hjfRT_2K$rQVQzQH>`Y=$x(i8u-W2buv8+D4cb$+ zG5Pc$fgA(xbzycY#Zgbr*lArMECOBFBVURE*GFWG*_D)RmB7`_#~cU~rd2~GbHT~N zmj-GGA9f|`0^X&qZ|XVROy=XP5#Hd}P}W|@we)SRaq-M@+Ul&H6J|tnmj~#LaSdV%rf&Ky2b$vsHPkaCMLnUD$bCIGr#jD3w;vpP{3|T zh<0mH!Fji;f>kKVeS3|>>N}{PdG+d(HxvyDcBQxR1B~3OI(l|xxw-3|W6V_+U35UM zfsgF$bg>7H`{J4k0KE8>^B^*`ruI-|Nf`Z*lwH>SA*DH$f zTv<1K%TV}YhQ{Z_ACZz~ZPD=%IaBpS*RfDq2beH?v>4GLs|0=OwNWezUuy^s?v<^> z=tLOjjyxOP7eSwjlUR8Y*}5CC7&sw zWz2Z^?4@2*5{%qu#Z5ihDd*@mFFr>%n*r6Mu7O?WPG)^-*%jn{clcheB_`mkB8%_S zpPz4l(^EGbXy+04?ds`Bk;R3Yq|rR;7EFSC`p|}xmVx8v_79sa5BWj0Ac3aW-s=jz zY64}127}tLeRaspbUYB15*2|z;b&hPK^;|e$4TrVW6oeS7{pO^ z*JnCU&};AFKcGjNNz|Cr@kjXvvUduYJawb`{+SWhn7zeHF0*@Zi_g@uT}2F+n@jmu{(6IliRKThiN5TFE2EBY`S;SgS)M(ifo3` zeF*CX9=n|kASo%6_4ReaUY`Jd$|91Ri%aq2$B#z!u6YrXWhCZC0L^UVC7Q)N31<>k z9?Mr7`^A<1G_*6DRnl{<25B}1^a&m)H4rw6&m-H=XlE!}T<5cfG+tN9+K z$7c~Vf=Y?=ZrMI;$b-=1Q8~ouZTpLD2mU9}rx4?*RR&8v%}>Nxsh6mYgni=okL8?y}Jb4%FGp_#5)$+u$bD4I))}9lxAUzmCxGP3?J&9z7R4g$F!%%L^G_+V+l_r3v+md2p7ox9^F>E*zz>;|N?47hpgS$( zi7&fzQg#X_&OI-ue((^38NPSAeCNEs87-aM6`cWFwj0wun8(eyr`GO-MCkAQ zyhYu7=6b^Bwe?=Iy8}>F+&A?xgBdWOZuCubwS8l9@JDE`*mz!rG6yYclAteF7iR8O5+ z@GNb+bCb?hSFGkZ&pfVHbbdHsgMODuq7suMn@U$u85?RpX@;`Uc@?p{Q3Uy1!!s`Q z!|&CtW@_rAcsOJU($4Bx(EQ2Utx0$-Lj19Qhcn|;nLzHOz)Yz*7%N)svCf=b%$ec$ zTJ===E&vtgQUt357WwI?nam@fSNdkEHxvdF=zhuYQm3+PD+iKIuYt`iY_B6Qu#hwv zV$|uqGIQ7^ud{*Sd*n`rJyw@v4G-~DOn{)dora8^ZD`%5x`IaXI5$x#U2x*r?XucB z26N5s?r5y%cE2cEbJ78ZeDPa#qiI>?vfAH($kRT71bTZT;u&8q(UYgMI}X(XRdS_{Gab#x zOLXk__I2hbif5b-<`)O`w6dCV-W2(H4LDLen56m$kd;@U(~7~xSJ?aXKp1+)>9unq ze8z3vMSS1EOqM{)SdzZKQnP-9!noMt^uUaP5z;<|LzQ{X*LF?mf(6Ze)utg$b1N1~ zF<{lO#-%09Tya5(DFV8jIKX29SBFZ@)m=dE?ov1V<;uN@i1UJ(Ij?IN1I~xI7WZGS zs{+rYye6!$GkO&oGFh!|gLdxB_jdsmT<7y#I}!=Lb?X)Y6J?gW;j^Z`IQpGCTb~IulB)7%F8HL{9norBlNy2=oWJ)jRA`1$k@3@TS7xhraq?Y{2=UbeEiDjc=iqb%6N(CQCFlLb>#K&9)=@Zd__v7 zgN0Aij=+OI#)%~2r^u`1+}_pP0QnE&`^Q(EOp-I_n}H6$p<6h#>*J!t`aiY}kq7;c z(T$#QVGL%^W<{v3@a&Z#l4~FU66*tT{(UCcU;C-TkKM1e)L$xD*U|=*V}Cio37GTJ z&tvL3z^A-y107Wog_%f7+JL>L8#4UB`^bgtmXcGhDPHrYS4n$#M@5qNEH=0M+8s;d zGlhX_L4;`%q_GIFL{)Emq6C4N+5uqDbeR06*4dmhRw;U^i`&~(?bx8!A~>Y^RQ=0p z8j{=JHL0`jJt~=oyvyPIXSL+Y~AcV z+;%#yaC`)O3ReCM@TtJUl7w@DR07bs?T}`z;UI?E2H9GH#QVUhLz3NLs5d*wy!R9| z(s}1CuJBk9-0MSbG{uFBKgi@+-sHd?o9%5+sL1T@o@+iUS0>X-x(D>ykf8V$_o9$Z zn#)C{QV(Al!kWrTkYg()cIdoLWiG=#IP?hHs_6XrKq#L5*`r?fVCy15HpG);D& zt42~b*rnHsilX&zGGQRQ)_uQEzP|sXES#v)GnQZUND|RO_@JStH>0D$IHT(EhFUx{1hB_J zO)vw+G3eX%>|}(Q!98!A{dPOA3eWZI@)YM6)#nx|z<5+DVTS%1^aFLlCVhl|(asHM zwpTQ6mgN1$PogA7xhSlfgi+7%$YmUCM{;}Z4;w4nxgHoGe}#Fb678cBArWX399x2% zem7s+7FvjKDg1zmFirc(># z1SpMS$D8@gbh4<1KQbU^&F-jg^|wXxmd5&IsdC_n+!fB9v+_2 ze6;Va9)7G3!*4Rx z&8F4|KpMT=T&9_9T*|C-5QiVt0r-r;ym0<#)4d;-GO@Zzzd@RG?36mlYQ#93Z)n1|Co!bXt+7N+jp#0+ceX^1BXsA?_)cFXihuffyR2Rx zm;&%26+6_0=p!GrKlya7LjK&YV7i)KP~DZGxqgTZKWcFR19pv)0EZSWIM_TI_wcJK zh1z2Z_gJkQd-Mb0+Whpf_3_#I_|SX)J1zj%jJrB)Ra@EyM2nWbb`}QWRLWqV=CwqjfWNwtYgS`|i_LMLiEpBHI@AtM z7(i!_wojV+mkq4D+fAIJ;xnvhenv=3s&}Q0HQ}t1p*|%&W z{m^xI0Tq8%-Z>WkO)(-0x7&r3EX&bC5NO`epx_t zX2KYIPY-nB9j71J+|lt$>+I|`u6}zBIiLBx#iSH4v-BB`cT>8BDtuuuE!@WSYh~zx z=)G4zh4@i8%TcUx7Nq+?2a5<)yJ%mWu)*w7Cxdwj-7KeC@)L(NE&`;5s1KB3;Aznn zQ3@vqF_=9iLqX$JHbc(UUw8H}efX2r43(G$z$P54F45ycrmum1QA?LH#L%j4ew0XS zI70gt<0K7UT($)zj^zonllYDEE0U2PSO3^8B$+ZzZ|(>mSEg@>{;AVf4zT{E-&mzb zr-*v9>v-%6JA@29ukQRqwK>Z?D=POF0F2ArI^p0(RyvM(QH8_m>c3vEi|L6w@)+hR z9-!)D*(LP>^KevEG>;b_h8Lvs7X-UM<43^jbr))dZ;ez(Y5l$RKFFb2yrdwGHNsVf zQ250okF#jAugdb5V3|k!^G&PxvqNJ9KF@ts-KzOT-|)&rUg!sQUtMjCkB+HL)00>@ z{C&!Ja6zA0fwqf-7~twAl1_iro_qVuCn_JOJ^XiwvZA9Csh%J*+lN#&Hlo)CqgukC zM?zaPoW**#Jo0t#m72q1-k2$}4fY;xX^{k%)>_J+Tihpt6%YJl^NgyzOfZ;UzZy)A z5lUx0f|bXhesuU=z-DRX_aC)uVw_cmBRB3VN=vx9#+lZvc)MZz4zgIp{~}FBbn-tf zS3U;TDu633_uRDIf+H1g;~I0Kip%Urs*M!YBSOeWvY5fNf^kK5z&-^T&iEE=d&H5r z(JFoPbbkUI-|`_SMAS(euoO*}?GJ)C9!Emm`x4$nt^?EX2$xpaJYS-9gfs^)h_6Ia3JI&*moA!D^fV)gbK_Lhb#%@cWZdDQH$)tiB2c-Sk@vhIpmtKXj(<2-Vp#8PS z$5~lfK5JRQPn3XOhQxlqkgz|&z0?$;-$Ei3HK{pa*fbt%H)*|$S?(UxA8m?V<^?&t6R1w;u-57Apastlq={-=%OPd|8%RO< zrO!r^O(9G=<8j=AhP?lavs;g8a_*e3qj__-8UU83bhP76{-*p8$*`c}+4?)O7xGgR z8Aw!t5;?1QetW&B)_=z=-q#nu*ZK`Vc@XM*XlQ6p`3|si@-(PdFeo~gY3EB1uLx)D z-Q$nT88ixxz9Zp@ZE4_bYYRC}srctH64;{i@lIdExPG~PJmB&C0oW5^9>(Muo`Ylu z%OV}!hnr4Ex(~5$4IzzoEYI)V%-rdyYgMq>1)%JEkNgZ2fUVXu0zI`OH~LCUiN-PF zVg`#A5T}WXi|W#PUp%b!F@8Jms{||on;PIPTI$|xz0My+emH4zbQ!SzQl`7r0)A+9 z;$$GKtjbJ#L4l!nH2JFpf8y5ZQLzi2$*M`*xF;5;1E^qer@LMFgp@*S5zx-p>*Ltc z>2q&YHm7P7V2*W~zx(+O0BJ^gxH3vk_E$k-Vg^A$LCG@y4TEA!G_=ejs%|T~>NRWg zEiDOr#`&37zqQe zC4iXaqn|f6HP!f?>}~Y68pE9@>OL;H&)5~%0g=^!Nic>gKKt9%<IA0qXdgFjQ8 z?kqYdqxl(s$u{%5iJjeT+8T-EM7YA^ZHZSG9{!DHLp7^RPC`=3XwXrqagRz*%GF11 zR4H|k0~#-O%z_U(YTXTEb!b(99SSqDWbZ!$_X5R~d^gyCgfw`>b8+(Qt~>`UxeDUO zhVE_et0sIcaI)@&7l&k9^txi7sMy*ziGpjP(JNxr)r>;RQ0ay-OKrd@993lhUA>jj z=IMtsu?`u)&6_7f`i>%#0IPE;dVFW6!h+s~V27`zMXYmDnpu|Q8#ccG1_0pViB+$J zcTF@N$)5hE>V<`{QY(&A=JEJ@K#vjpwHXo^16x?K+?E zUe6kmoSq^G)r9zh|978={N0V=isI$ABQo#XeR|i<8%u?~MXIIib zAnN29G1LRwUV8m76P$;I(ZoZRC3oFeC9PO4c!Pi^S$;x#XWhnDrO>jDVXeqI<^`-xUisW(}uz|3j_U>0z1CY`zU z4xkN^53pn>iItJecUU9(GBZFWY9jfZ=$U?(e7ceDy9~CeR%#2O$A8VMeG1^QXlEml3x4>4%wD8tN3OpOphC5gibB7+f@D$$n@EnPew2jI_px7cSG^ z%G!~17OkfDhpQ{lh7oYK9w4ldoIJfqz7-{OV-Jw(vXG35b2C6zvP-+p<}nSV&r@6= z^UPORU84^5ZB}Q%iFDWHFC(zWO}|rmKkOd^|M2GwQRom7G>)GD-}N4nrfyh{=@qs} z5z<3iYPZ&}7NVG8#W+!iyB@gLh6on#J~D-(U~Fg>>)PdWKQp=s@npUA%cYG;U11)U zT^7#*nZR|1Jg)l%RcW495Wpl+>?Sdx?&+k8>^%yb==VEoa>ss?zsodsFvG)NpaQf_ z^iFH<7reb8CauqamKb*092VQ#i*vzxRB|yYa{VPOai65Q#~g+?Yn9Dh7dI~55I=f+ zlHpfrJQ(-_*kosqiw862xBixw8$az60Ji_=_jcSUI)Go9?A+bc2KIUIxB_+*zMIY( zN+ZDbL&kisE41uo&H_XBA7CpVr?E30=#wA9F^~R6!b}{fXFwYDIG*+T}P*yyLgHxrMuGjX;t9KcOL= zEoa$(TF{Rsk_nsicXEz9GvKxd@l}l`zWnvN^fmdIlZ=1{pMWV6peXhMZY}AKVCMYc zQ#RicA3j2LVL4sL|MLQjtS+2ON|%chhBfg-99$4ddvxPc60F(U;G3>>6~V7b4y_04 zp#xhXJnhcs;bQAg#J281u2R}maYqKggb~3v5+7Hscm&}-b`=3vomN|G1aQmqJ#g~I zK$h85YlDP`l<;Efm$l`ehkOC#Q(bim9mR)%r&E zdTy7Hp3c?iMD+7|cmCzE9M=P+CkbyaH}hMP)ik)4htDtGd`Wdo8Z*VWx0rFgw%^%F zPaDrU9D~@ZHfdG?hQB`aYNj70S5u7D0m8w?1Kjeb`E^{TNgW5w(QH*lODa8Aj9HhqF$iQmTm?irtoV>@jIx7 z`T2AeBfHo^_PuwDoV&qVS%9X5Y#tGXIv#Dyr-Sz6!Ql=FZ|{1(OdwxS*S8x0VizXS zRUDkqb5m@w_}j)V$#+Ix;@U%fdT;JqqUu%bpIPJS)frtiojc*wljCxuCFizY)@5jnS=*?nas<55U@eT8GfmIU;2dP8~X+n0c*O_ zW!6M3ySj7Y!Yp~^%W200pPAgEy0a9~U6+Lix&Bp6f3DD+cx}J*IDQ6NIU{V|BpCCL zI5*N7B>uLo#7E!NR8)9~W%evE4o=`xmEd8HZP}wv4c|Dj-9~0SgzegR#-f6LMG7n$#_t&%bscGBZZ9kM$;$d1O8@NU4Lr~=ig^>R2YE?e1C&K z)fRz>oK_{`yVyC?PLum(Log0)1sEj|LRNLahqHw?){Zgxj6AP`j_dK{3+#1pj>%R8D| zYP=EOF3oa=iHb++nizTt+>~<+zWw?8&0?kSzV@QDtq3>23kXc1@>_(Tj?wpU%8&nV zTHuK?N9z2ij%*7Tj@z*r=J+`TCs`k3+7eN+O)FsIlLk8z&dfcAA>D5lZgXH1+bgrJ+LX!xg+ft$WBWs@?nZRV3jHFLJ zXFcCt_1mmBaFp3NJ&Fcy8qcVqU|hb&gPomc;u#%FZN5Z_tA0}`;ZZN_n>V{g;<>M{ zgkQN@`A6Yye(&R)X|Xa&kb1$4Nol3Y(R?J?jzU1 zbgQ&g37@;iZ+e(iG-=+!F80&NUhwx+zh=|&TQHLJp>ku)WBKY&dnEtZX8ogEej+6{ zkcrDt)wa)HJb!MLPSx@3ZEDMBJ$vArAnx%$JDKLc-mHBMvLt$;+USo)2A#jBXV3od zdK7^kAygwjmLFB^WZVwg(m>}>y{4T~ni|)TwIE74e6Uc{7ZSh0Y9D)|_oH==HRRPL z!-@m{(@Yf&_*htfUu4)H0Fl0#++8ZEJHJbOZgAvV!Wu79U4L2TST#DD_+`j?5^C^BD<#--d?Q` z``fy(>;;cbYA-XKSPPUXM6GnLA7Y}&m-$>ZK3fb|{)_fNzx?s&KFwzo9t||di1SZ! zlR#!0*7XD-LJ9AmCfQ&3Pyvch{b|>gLho@jN8q+-<~2l9+>6wV8t!@{AqI{C!g?D8 z8FEhJ{=-(wZ{MOCP4bbFFPy_#Gw50|b6(dIzTfLu#w_kuVt%qpGtC@-dIB@;k{AT@ zEDE+ufGqF&6Uz0u1iigEi=UcV0_T)6Yf7g7|4{XnQ9*Ux)^tgCcPL0J-AGC(-QC@t z(hVYl(gM;T-QC?K-QC^a=6UYD@B8_OF&uEt*=w(ubFOvvS@(2LXqYH|&4R8Ifmdb_ zoI(r?{&}nTX1BpLnL2ngGxO60gEx_bxe*C(t21(gS_=m)oYcqf-__7q&SzQ8vgX1* z)?CwR6jkTHx2x5v^=&Ol&yR|cxO;jGN(a$qO0rFl9ecf_5BB8y{>~V!V2^zB^HMZB zJ=m3=crw?u)mCA**;2u@1HF+jhQtow`7S@TFS}OlRs<|@8GeuS{!xFF2-c4+|Dk;! zbG&OlUNxh@#YeheI%L%4)Ys=$ZV2y}Yx|Rc=6JC?I)gujjy9O6BY5P6jwm zZ{FbkcxA{!u75=w&zKed^tk%4bsk|8LG;dV=FIrO(&AlcG8=4t<-3T%GTQzohoC7O zY={q4mZIvz!Ngi+_@s^|9nT@h6fBD0__o`{_;zmT&ukYBx)y2W%gtnS)^2QUVx13q z!^b#QDBq-8eGy3Tem+kTEERbc#D2W{LA`%wyC8HHyzT0&BEkRR>}ORGzHWZt_SbT2 zQKl$|er&FH*8Sner2I$;pV^T!GEP73O*5;p+GmN!5_+R4i;YK;8MYmLpBLd(l<7#dk-RD zG}`M=%?-RM%U%-}jue5h1z%pOM?`qjC5$g=m_b)38>C3jJ1F^2=eFG3cuXt$%RZ8~ z8(1v2w-6Ae*&p+89KSlJb(6O?A-lv`G;_d5*?6HWMishU+!6K0G-~eNX4}k#AA};P z$ShHQBRbpo zBMb=-LazSrs;~HroN_7tjIWX%6(=s&&R_GFgyie(UZKEPHUB?b` zgTkt%2a^VZz7Pb%)kec{tn;2ySgC%c2K8W?wi}v9x8pT%yk(#sC%T4AgW|!CN>Ou+l;cjP2D~6ptpC2va8|eaL`}1z) z*8|a$`#$GmJPpZa%rdc{!tb!wQ*1E3zsF;+R=`{nND5B678gF>iR(M%AuwGzR5=Sk z!G|VXdP4pB@C9C4+ko;{2%1Fo*j3Y6Z+NKCp6S7t`NrINkAeG$lAa><*h-78@J*d& ztMbcl5HW%ph}I%d48I@r=sPd>l=tej^07g#>9C#WIN|62$T#A>g=}~8ARTqdS6-ss z`hKr9pXZ@Mxu<9kzll`pV`4e4ba>Z^8cT~pA5)1EQhF3oE33t1{LN}3rs`3)%_Gz; z=j{ANY6x0LkzokZNJrA>47?75${zz69n!U2*^pK9wK!J%*m`G%ed5ne9%kY8)+seYaW*}DxV9f=7x3ypPp3Y#X)^n7JjMkT`8{`LLP`+zV2`O2%l8w zu}2gHgRs3C;&dA=0@^X`= z8Z_(vhE(41qA7`M@+W?kXPn%31+ZqH@?)>Bmy41V-m=Fz{PN6N4YMgt}yVIT^!|` zT$W=mZ%z?prm|>-0A&-tJFg86A~gA(u#g%)A08u=e5T*1f6NjVktnrs=`fPT?+^R> zL+F28W26!V@R21o>XCueWqg*Qi|Orxx<)G>+?L0Xdk;dP+FY@jxg5W_&{9|teLX?) z6&~gD)gh8@&F@iKIx6!q1C>aIgm%wMdb@Dzx^kGbEUHFTZ{+0>L1_716?8{?|C#a} zj*lf=1T^GTtgN)}({!-wCzaxVDzvu>)iz$2bamU23J{mu929rfB{EZ8M*O<-cq!r| zKcKN&R8MYCbSmJq;Nc^Na=2o+9cAjRO%9d<2A89gC!esUP+9YZRYKq5+fYb;A|at~9N%4PvS zEdnp|YL5M6?hj;nG|5|^V`|tcA?>K8Z~cJUaEp_8FjBniyKR}TpdE;U--w+4xu#*Z z?O}wIA$*k{PX{0b#O#vrZVx>SIKdR-iKoV>00#$dGp-ti@PS0`nBjG~VB8N4C0l9g zRl}RF9I;#)z7ZkC=JD}yjV6q2F+X*0yA@w&q_C0z#uL!--aDP<)CVrw*BhFf9_4|s zHe-8M96kKi*Dr8YgkfaEqFZH#w59RRhe+vFmaPdtb;S^{8|O&{FQMa0ilUo5UJ!8v zbqE_OvC!HBY*A8c4?N_3$6E#6A|3;LxU%(bifv@if$Eup7+FkXTr$l;DW`| zHysDH$N_meP3o76DZ$9WGiRF|$)d_}ET&9Kf)zDFGj~Vx(liT0W2Q#r7ArzKn3xe? zw3r@Tz2Kz^RV1}4@d@s)%5<_)cwlX?Xw{rZyFm?ESzV#f3%4b~L4e5eun{1d?kH1b zE*$RCn7lrwDzFut#yvJo0jY2MSCC6mg7Jn<-Itx3S!bOl0S(Ap;Q*Q8_Zu5RiC>`r z+i~i9Gn$hvl$fG*^7-JnTYLLsyQ5>kFI!zMlpWo}>QAbsV59jEP=cD(*qQC)X%S`{ zdJJqSS>d2bugD-S7k_zl1D}H-;kLSD!|A@5n{ykb!oH)ESF;DPSZ!twBT~TT@k9%u zH{ZZaku6^DaDc&Z3lCup4>OauLDs=CN<&S4-ies)b4P&8&HbCsp39Dq55IqRLRvP^ zJ&0>{Xwy1#cuaVY+UT)-2q$c%$%Ziy@=0>By(Bf^v4_m<;%fe6{tgy~^r$-#_S5L< zFxM?zfhL{~!*|VtL=UjoSl4wpws4$?neHAc{cf2X??>eX&9XKYML;I7JCgmTynY%raz0|D2~*5 z%~9WO&*sK-#rs0EIy!tAsdo;nhAW;*=kEC3B$4pla}-FXl{0u-RvT=;3P zaLMhT9)4xmJN|5KwP+d_$Hs&bTW&Vt)!{`c))^9vl+vgU6dl0rZBVEz=5`)X$s_wv zhUL}QxAlC;!0ZokHk_J?&jnGAN@8}OLvmuIP}?Mk5tE41q#y5lNLU_C7GgM(hD$)j zz?YKn0FCt1dX@(mo9{=_%D%RaQcYz>v#DqDND#WnL6`~zWW9xU9T87gXx$CSH=c&UQ0_chbm@lw#;5r5vxk0t(i#;UF`fOogo#O%!eP8yu| zaycP{y~6v!Cwb1#zk1!J68{@>&UA#rm4TD?TXzx7ccUEtBZ0$V&w_#!l}_x@T} zWx;vStacqT#=?hn*EqY3UGuo#4uS_D$ykp0#zy)7P^%6RlDH9lOrqx{_2X-{fx z!Tl0LoNpHZNGV4-l^g{FAMu>&@Ske0SE64qF3v|>JmX`>_TwtOxRv11gu$*aj|c>{ z4h%3kQgo8;+$KX*LdC{f9JaOe-JEZ~M(Ff%B2xNnWlX=PT1 z1GAa=QC{L|ccM3%D=4MHbjU{7(7`e8sRK=Nxen`$lz-)Jslm={JiFbHO@k40Yfzu& z8KTv4vmb8_Qgvk)(R`kxx6ZDNh56}<*B*l8(%1`^c z8j?D0&%V8d$1Xy+)B?49rc{`Sq=SbxPr2}~%_F1S&i3i|MVX!kq4q{X5#pDGVPFr z_74n9uiW3_Wh&xXL1RhX;GgL;y#6-*=E7#U_#83xW)+oTny(V~FDgRW9C7~_eSEX^ z^~~r%DUi*Dg`2dN!905u20bw$9KnyDP?+#Y&$rslS0MNZ zIf(aqC8zZ_E6weQOc!xT>Yg7Wb}*>a7!0^D5uvUQf@kws{Q#bamQ}(or=pC#lz=%a z(Rr_#zVA+~QSGnMT;Jk1!fEUI^1gF`__uO6XAORWEf{o#_VwP5u!vgjk;jm_->)1u3{%|d6EeuvsM zOlge3mx&C2j`WE4`0oh$bWSmVFb98@#=pv8Z&0;wq4gComQL9uwcn0)Yy>ybG&e^_ z@`B%17!uI2eU4C@70j!)*gM(qgWIw=mxP{tXOIl}@bF+l$l&E^8BU$+FPw~?pLp3~ zLa0De5z1w7CbkcNLzpE~Ir23`r1&m6vJ8^}Ltche52?J=#1Mng9Q7#IR2YLCU{d2J zWb_y^(rAU^!qg1oEH}pN?V!pU%;-Y^r9b6vs+=C@KYL9_aeUm8?(0BcM?$iCqM%qC zMTQ0j7;x)M)OW@wFgVT2=SoFabO(&Oq8dG2Z>*M#;^8MQ78~L!oqqJCKy|dYI#wPO zD2W$}9W6g&Q$JEt%EbRlAV)?H9O#Yvbg{4bo6yBwwR8=%CCk3{z7)eJr_xaELG5 z^3agUIm`Fc31Mm`ErgxR*lJ7o7Uq@=9^0u8-Tu&@NCcqpk-NTniX#f-luw!{WqYvB zz;rveyVc64_J{8(D8@OTQM1L6NgQpq^k2cz$H1%iL<6d{Y+%fZK!NlB*)v}|uGXD) ziCU*20lvOvrgbkYec5%PJvBnANbUGKnLMNHk(6gBES;a|M@Y=K=O8G^55mIeQ~A*L zi#+I4Z*EpR-Da(;LWHw~0W9j}Q@ERs?b6<>xOo{ z$aKytH?}uv4<}S?SBDH$qBOuv!?vg)=O?Q)HtPV%yuc=xwS*Cg)}uu`vxPjX^Fm?F zt)Y;;++WYn?Xh|QoG2Koh!YIlQ9;>b%`xPp1UEsJc12hVByOM(#a&IQ!lv8Roje|} z_cpkc-SS16V_{`}lntoWht{|hmBhj0)u#ke2O0rfT;w-5wh$fU3=c5n?HzHRn@A0V zl2R~GK&}oljQP}C@8V9;uaB-kGJ4Jl2_(RSKpUZi8p}SKY`P!v29;#QUlQx5%Ky%u zx=z@eK{wor+BwpAL{}~LCzkitxnZy#H(xKD!w3ijAqsp>{fj=653MgGl%Q4C&VsBU zxcp^uM1b8hg(04T+=}djzduK6qHwYJg{Cdl^WCMiZUhxB9u1rrWnR{={zm2G^%V&7 z2A5dZLn!rJITKakU3T_34BTM4<>yZpO%l;+$X$Lj1CNDanU4SU-cu z4dIs4R%EG(H``jL`j!}hVk-IW$@|0QVGn&)n_|6i`d5wLGme2GlDsZ~hGuBJJg(I@ zd%hx)n`6LCd5GZm6{3NDaE1oK^l$-TmV;!zAsC>xGu8UQsi*JS-eG@JUG2Mias&)+~!}m9|h? z-`CJyVx%c34bgK^sv-Y&w_KdO%8IUh_n67%&i9%n!nM#E%GFD1Pe19c29Q4Yq{84iw zMbOrRvV(lTQ_3in3o+!kY-U)5p{@j_|MkzQl@r|sUj=S0ChW(1D)2VXE5jcWG40;N zR(3%myu*X#0A^EI-@B;Dj2Ga5YgSD}B2aX)fmz33pgAZ!KW<4=$2uP&yPLfRTCyKQ z+^k)2Ai4h1@|@)V~uL`tGP)ReIH-`G#k+JV!Lc!zpG}(rc(KosGq(r@we>t-@%9V>6dGE@ z(2tH)q3LFRqr-VBK{ibJ=10^5T_1gCR&QGHE{Ti*Kr%u*$F^m+N~%B`Rznne$!dzZ zE3;W4B&NpPk>UY2+>W!$cwPd0c+3T5Rj7y*%&7h$yjshic%aizzNYm9`AoH5gp?4; z{QhbsykMHB8Q7vf8tA1;qKK{&>gk>{_97KFRk4`yK@u|&)v~`kjaDy?B%Qo-eR9Y? z5d^pWSd1Ip58vys;}W9ZCJ7 zjcJfefFTlUTqm1?Algg_vN3?&slZ!7i?5HrCq zE?I{DiFy7)&s};l3ks9nwd;rV;RMYb*W-S}&LtB%QX<#Wpz`mH3NuxegB>App1YU~ zTW7<(=re_;xdd_Bt0%WB+&n7zzgEE}0@eXJW51rs^M}lw*F`8XwwEVldkv~|bHMCU zl~wNXz5>$^Aa|j}?{EOKk(esG*ZDpJb#GpcRbP^2VaXS0LX?5%1M5IUN#Y+^ngDRo zfabj|H!{Wh6#GX(|0MVvU16+OanNK<-JJeHol9q)Hp~3f3#Kh4e3Nw=K+qcrmrDKM zR%iPusMH+gE)p4ZT!LRZF2dYlq~lUuFRU&S-#_-m&aG4z9^hOE2q3}$fpf@(z$dQH z^9m-{4GT9F6?8`7I#_60^1>%fO{w&UwNzYX5MWN!E4J!TOhFDo2W=>NP=h4j8)`j| z@OC0<>5RpcN7P`KM&^z1^yul?Jrol4_PO~KzjtIJ4Hwb*9gMSo<6(`^rEG~|e|C3C zOJc^4#{{7&ow_TzX?NrG+T&V$#s4wS`wH12T@ZeEp!kr#m_CFqzK!);(E6SbVY5#x4e>qH@(H5Y$*7>dAeWG`9GWN_-49d}`YWJq*Q7SVzI{mo(F z{-`Ai?RFyV8|k2A({=SSzwQzQ4=MGMeKpOaWnU^PD?tX@{VSvm?S{YR;@Q8~7!s7w z^095=bNv|N%M7Fj5=BXHjB^Q;wWj;xKat#o;W(O&L6u}JRlH=Ie=LFj;#E=Kl%7XY z67ON#O8s9ke8)}*tGGYZ$)M z7!V22bDEMgzkH6guOq&|pF)qeIUrQ#9HCvAt_x&~ay`7}%I-3)Fetu*X&;-)B(A7# zt~_=T@{ZBY8nx~>LTGF}ct<6oV-?Ns*}i)@qt5DVME!N9=*=pQ@+;6D)%t^e=X4cr z8FPD1aZ#)#O7mi7!0!EDG6vO1;j^jCTbz}t)2KGQKO@{tbeK|T$~n;qKfZ?yNX%(o zX?bUx-G%FNn_6rCtmOi2l=kqjy_t{!P20JbJ%c#8v10=5E6@^ZaG_%7TaaU?Z-{KV zj|K871MTq=N;u@>A$Yuh?n(bL6QD$8od0Y(IHVfQbUbO;h0Fuj;_VJyCmzM0rjHM2 zdc71Nr&y#z4!`qY!ha$y#r2mEEJC9iS?1I_h#0l;I}>DxZWZA-^s`^9K;56K^a>Dz{kny#-yo|BTe?NofSxxx24k16$;KKpV?J%6X<@r z7?v&!^nJp62RoL`0qgGe9w(+Gr`!OA*R6wH^7F8|rYdOo_HsLZ$8jX(Kt`4Q`QoLq zYab{mPPAG}y3BhAKeP}2034rpsK6*vE(B!m){V`wwI#A*h~+w~Yt4UGT|oKbSF+!7 zpVGMx;b1y)nLNAn*1U0;4oqi;CpPx&Oqd%!hX!c*aXbbLa2xKAo0_hlN-(Iif=>2k zCECm8hH{4kxns5vkKsEwgie9O*CLl7;qB!BbKLXtdzC@+St7vPjb8HRi=7P!zCloD z|9oxxC}^lfikF#1B)s+3psQ4@lj2OlV0AkYP4I(U@K0yC5Q-{_?@074mitm*iUV}v z04!)kYl|Ov*|AV=VpRCFf$t(KJvu=tSC=!r0JHvhlGGW8h3#{#7WZofq!xx zxNoP!KZ)dX+?{VX@rFnvPcy5V*Hw&9=dfgUrx67W?W-9|(AVVc z7P=cwElR31l<4)ms(cB=oYg*~7eyHT&$6Us5_=DIoGn`Lf)@4QPn4ZYF=0})CCix? z1Ym3MZ@y4Nc*z%V^oq|TNewMjJSe3D;k#9E4|iS8A0FJ`krlDGPwRHxHQFT5dyVWk zNoq9WchU7@gss$>=_mZ8qX-i-1VmuZIuJ{4wj0&0_U{d5Cm^2@Vs3iaG z9}k+ue=#dzo)s|b<-B{QJsp|K+kGhk1%q<6GThR;j=m;~VjrpHs#mu)Hf07jSac$0 zl})ke3YQ>P>nq4JZ@0r{Ti*c1fjI>Qk~zpUOJN#4tZqAM_gZPFI?p5!;y+kEIVZiE$;=!Q&I2 zsu$yXGNVDRJ18F2`S{(B>fQ5ubddtm<_h-4nPS-5t|Mj-exP0Y>Ex8p@!I8yc02;n??F2b@~a9sDiz!~w%N00s%N^hYxOi7=bv9&Vo>?SPVr9+Do~vs z@YhgoAcq))6`C`uNfg zDwj)LnaxErO6B!uz)UcY{5B#O!bA~=;|K`0m+3%JAa&p@DNv)X>#JU-A1(;QuIo0G z3n|Q(h(VLEh>a<0hD!fP3*W&(&3?rAW1A{{F&pha1&-oMoB;&O+RGym)43vo@t$uC zw~5Ys*uU-?RUFPn5VFQzY~LxL8t!?>OTOCS4&*t=NFCiZ%QGsf(~Sm_qNHqR%hz|; z4Cj1^6w2|f9niq_OuF!+;6N8WnR@3f4Z!gm+PBF#ipazWJx7bhY^49*TN(1j>)MOd zpG(=Cc6UR;YV6nPgsyMO`s_n+88dcf(6qv`J1z0cD>8Z9&{>b8+M9+`<0>6!&Pucm zr=AUv;iwrkkg?^_kzh`=%9E;hF%(35d0m4CLNSVxwpK-{i*I_@3^$jXqrMHLmm5Io z>beako1T{aPjQzvd&NSboK&^{to&iiw&z7=y)ZYY?E&i9Lasbe3iMa(>R8@te*tJb z156}w=Z^S|Sq$kYrhg5;IQA8~yA~8$(R~%Z`cT?_$!0F(90Tl z$EV?>=miGVSl@FKeHrHy@@(sQp|P4$v2K>UDaj;$qSroTYj!J59yo4n`C{Dw6#`eu zF8BrNzj6}e-g80%6Ai4?fGRly-@<{tc|-MyRyDDRZhAYu>f2x-+S?t>VRa?8>QH56n~53Xe_7X7g(i$%IxMq}2+2eo|E8&cAob zfD{w|eCN+Xo-uBiZ8eK<*>n$Pm3GF!FxBT>ORRt0cIq<_ndt{rE}x}4SdiJ!$i&C{ zWAs1e%S1bFI)E7?=&n}(9Gij7H`sQhY%s&3!9w>ptZXrl;e%8XvA+8%eE~#Ma2Fbl zwS!TjfU`%8E|aBwOr*?I1K+ixuUj;1Y*1O{g6)WaWaH zxM6|JXhzDX*CY;v^l$NW>WBMa7-TP4A74wRF;vsMX0*Qo=O=ygUr*@`*zeyHgCoN5 z>?xk0o;)7#kl~=k-x2%Q5H-r?_#xzzzZ6|D<)Ay?znvebnbg0eOqdRmCOYpzjnI4~ zKe1z;afT%&wexnR)>jpfVIb5JA&T*Gg z=^hQcd$Puc3Xr2-9w#H3Yo|A2hyp;0s_R}w2m+{@Fh${OcpyL~dH~3*j~EP0G1mT2 zQ3}u0EVfA+bAa>OY<5W@asO;~si7rX7k+-aZBMZZWg}Q;?J)e8!SYo8Oi*pfhkHY$ zj>JSt^0He$3G-?d!$9n%xp{sL4bj=%-9D4(Q^w)~XicCk!ySzb`Ii30T{OoW)F9Zc zs~`&ZA;OAOE_XS(6SY4O(M_J}2P9mek>Lv(Trs3Ud$X)NiVEuMuq6CY&B@7P-IRe! z+MvAh5wlGib*0g`58k6Q9XA?Q?PG~ZI-k5ft-0J&S&(c1!%P&ap%^F~DFKr7@(p*G1Gq!oF z7%(|UI>A8gaxacIt-3EmFfA<{=nGMRxxgWCu*fAn-_{8H^3;@tquzzbhEDTKde{H7 z4JNw~LiZ=j{UcjbG67FFHdOIRF5;GP@1s&{s{HxCZh{}hh&mS3w2RYs>{f-{M2+jy%yaU5)YG! zoQb!jea2yhS5Io5W(Q|-;0t?E-9Mcd*rZtcf$7U=Y?_b#mpouV_U1?$;oA`yo((8f z!hN{C5LFvMgJPcNH^KsFi?9h!-?igps;cya`WX=-!`~9x>H0YO=xK5Fc*i!u9uW7S zz*n}NFix?Y?d`;+HpNXOh4)%8plYTCcwv1E{tD-6VNTJY z{I{;1pp3Nuj&xZ0_&L@4n5B%KE28<+j$*Rd5;@n5<-9&X=v?8Fub> zW;Viy^QCEskQ%dqjA@;J}nXaE~PwK>oI)AbK`g2!0RU2#9cCb(Fh1ktO z5CM`F3PsFM`Zy^1DkR6@!M58#aOMqxU*RmyVdH#esyuJyrNt}Nkgo&C`Ii=658FLl zcE^5wtg2-U)6NfFcP+Sb+Mf`V4&>q+>wCiS>Ei!^ z8IrQDRF`ls3rfUfPH>ZRyxvNhw8C_PBastN@9ay84|)%o-XpQ4sK)-MOmn^6s7F`I zclEq5ef_w{H}UT8HqBF{`32!YebXylt$Qf$zr+>W!157u#U@mp+}ScS)b9t5Sz8R3 z=G;wqZ{fJ$GJua&UYo~!CuL+D|9ofiS5_t_$;O4(+f3;7_bN^C)WV3&ZCj{MP_eU! zi+bNdV+BV~5 z_bjd97-zO^5D{(mR*u2BV6y$r69o&4cjSh>n_JV-e$vhOhu0sJc<;pY^pI!omQ}n? zm+@$Ov^ts8md%@{sy9tGO!bcD>P%{g;NN`tQ#K$D^M+nmqJlQ#zt&?iaOdmJX}dO- z(N?5Ze7Fp1W@=Xt(7u4Kl#f|~Rkqo}$n~)DM+$z?^u;?Ctk^`LMbaBK$6E327&i&q zvpVd-O1?ULrRU>h&d2M1M-cGT_V|j3UTJ!0GFi>pT{%ov44V>`w&@*Wz&^6c6iy{x zu)f{NlQ3}vPcZs6dgJr>eeA2vt2C9W#Nxj0kZpm^{8>fX9>RK#PC9J?|J=!m0>@FJ zi+@(=e)uuo5VIMEfu~5w_4!C8Rn%GNKb^yb!S9#PU%!{WaMGZ3kO9yg&ZxPx_TLfx zgJAQe#?Qw89NuvpL@m537Ph=1c7=D1wB$jBwm{1p?dJuL1$|5f(|{AMD_QK zyCfe=b)OnC0)bvxXZmTQkM|A3^m7wwU)`1}!?a6D)aGGgR;HzBM2xPAdp+OV=0Er? z0uNOz)4f#~%{x!9#R^4%5(N@Tn5?j9%4o9DmncUOV%B)Vomg{L0=b6r>m80{lpo`b z9S#*vBv05UZ2PHag$=A zMVj-$fkpq3a$VB0W~yGEjXt*Vfv?iBcF=0Y(Nif@cgs2=;;mfOLy z4}eRMO9hEjy?Fh5HkEI9>i%lI4}zCXRzX@Co#(W$smd+~fR zt2JkFIF|2>oPV`^@Zhv({m}DQ*$x@A*Gl= zE{0xxISZHb!F=pCDLySd`bVzm8mOv;G0G`1gYdSK-q65-USD6W%nqu_ge85sC%Bf4 zZX&#)|6Z-xuaiH-(t6m@Iwb1X343CVa#IDD0u4d5)KQwF)p4OXamsaF*2jvd`(;VE zi8Xr|ebTkqwt{=^6~8Z*AEsZY+zy;kTJhdM$~aL{)_88x<1+ zYF2cNh=?52ehNengw=i-3aJ8G#9&Vful$kT9+{$Sw+|^uKPg?qpF*+iFuBVFJ7Im# zxV>H;f$o?5xB9a&|6{+GRW>f!O}dmkE|f7-Yqb_lRLDG$IdE&qSPGy}5k~9LIVe*4 z8?yU{n94BIIx|bc+%;*PtWII0t6kr&OFdx?3h)yFe5Ss}9i>I2FS$suLohmFupFa| z@?R$F2mBX501{2aQv{qxi0|mJ?(D{>ZZKvKEEXI7^WokoC7jn7(-o6@)$X~&4wrY{ zN8z=acI8vQhn8$}IKX44asjR@Z!Rkbv?Swh4%}XyLJ?D5s|8SwAcy zea%Nr3d|}@p@2<$pCUGFht2cda^FTl4)aRvsKehA9kgN(@(|A@e zjuV~C`{y)0iiHJkniXu%^ph>)-`h$CBnt<>2}{3-bZ8o`dq_%99vbb8Kk zBm(!>>|T!R$qFO!h(9wJ5H z>~h_WW0IYNNLbH3>~%*tDuWeQ7Uip9#?}!uAEt+tkLT&Dp7CHS^Gws5;>#Hk<^WRycg7)PA^sl{SWjKg);^n%UhO9 zVeSTlp6)t9s@F#m3#Qf6$$`vm{iqD5;m-^vx{2T713&jjP|z|2<6Otxt|X7zzEv5K zvmG^@Xh}XfM48Z-KO&~!D0pXvIf}E>OpLC16y;l`6SYkG6s+y)va0g8SYhPyyT^Kz zhpnF8p+E5HUXZE#f@rv|PLlfSDC8(+7mV2-h+m~`BGueKaiH}^vU9gAq&%e4LZ{Qh zE)qfT`D~WHT9F>dIjF75`eQC<{_}ORo@6Qo0t!MgMpWK^m^Ha#bpc>5rl?AScvE5+ zQ>el#s~t*;Vdx^Ei{_Ua$b5Y0F>GWB9!poFhi02|{T%Tgdqx*hGQ97H4p^B&B0!m9 zsXyJ>Yq1E_x6;TeI1tT9Md`D z#$vp3dzW|NW##|h&Khy&3*LHL*uN2ExfRnyTi71--*2E|ru=zN;F679wMYozn0?hU@1o8AvX zDW|i;z_BQab(-r$D;D-w;~NkXzoBM2kg7LOZim;xLc>S*s<`lcQF};9J6@3OJl)Xg zH;=WcR@E`yKb(79Gnt&+g-NqTYJ*(wJFLFE>3jq80uEvoKjqIz_Q|$^8ZlJzWOY12 zTF6_%${S4@iBCz6^p3knci}v!n-7Rs_Y063qScpKXN47%|EaaI z0Jri$D^$wr4XFxInGEw_i%b3ux3TPmx_}%5|K7AMy{`u18#@lgHB|5+WFw=a?-YU4 z%Q}GuV#jgxHfeh>_lGAtay~l@{{{tmP!!DBxPMtLWKY3zX4-%D#-Kv^vE>4^8!O%H zKl_bH98To6x;j_waAkWa0AM+_)&5cC(l?Cay{?it_7dL}ZF$uk1X6fK>CPW0*@W-9 z1hNIez#tdgvmpsPmK%Q&cs-9hk9Zr(_Ri?1Z{u}-2HMN^sC(-G9J6+eWzUrMc>FGX zfOF14P0Mm*8h_F4-Cwt={5!-3;h)2kf1NXyX8plB>+32Agr-0z z=R3y{)lh7NOEj_43>~wrrO4`I6?=;&Ld$23h^9DC*=X()l0ScjL>h4)e$LJ4$k#=q zQ3?n-=#=-BT@yBi0GRWw(1i5UpByiz8?$)AuRzM0#jQv24?m;nkO#ro=y)ERJ7Cy# zt0f?S4X?j56i?HVGyv~W3a=6Qb|z^vbIj82Va)!Zu?dTF!sfe2i41v1L3H70U#X(- z$}sCn4Y!bZ8I&8-k7)ayyH|>=VCq_68aTh>BM!+00cySHs&JDBmJ|c zg^G9l-UAKmr`(^per5ySS0l`wnfD=TWvNXrzrxB9fv)tjzx%@AV7N8CmH-@`R7Dlf zA+fFeIuh%=t1Cahwze zQ$nj^u4qk)RweQt>{F~QI4o1GaAqxLRuDKlvM=rTg|c%3KnuX1iXXQwnQbqkLZ$V2 z&?|I3UY&5ht*h-k7#RhGfTiYAEA&=O+W-wDoXy1V$xaA(=bbMKuLs;!(TUdh`2;Ji z7c$^5>h^%UykMd>xwes}V1HIUARXVF{Y?afysKoE+%j;>_wiFTViO7ZGvGH&Gh!+tpS^%Fa91_n;2i|Z&ebI^?9%lSo+Et$np0zOV zff7!Zc8}ij;Zb3YyDHVv&^RO^Z(Cs`n4N0ciGPzPn2cyR{Ds%X@OUZR+!j}A;$IE0 zsH|R9>sX%}Uo#h1w;#prVFVKswEuM}aQd{C+bjq;Bus|hi6lSZrU@0xdtmDllvL*dwt!?pOmJ*BT}Ujs@uUkOCIz(S6*`#h}9sq5d#G( z@!AkjyPn}WsIM5CwCX+KxZt|~5VgVcT$ zyxQ6X4cJ#lp?|5t(!e?SHUd)hUD`av&Y)G>_6ZWEed;z8Wt}k{I5jjDxv>&9RKV7w zamp-X+oF(0YP}rvo0cGn8?Uj(rRUfl&G&jP2(Kfmc?LwRYP$A*2H1nmwL-7$z~?9!edcTpX5m=M-UFiV#r&Fv+PTiS zk7+>5!1pz1>~oW&qyy1BWcBMbbC2Tp18bKRbXIelcUf;H#|Meuo2J!UFoNY2KV@AAPQb8; z)I$ceRGGmWf4`6zJtBw>EqWcu-EY_DuN^b1xaPkMN5Xtv?Wp-*xi@_F*ArRR)SAvV z(`YVEQ{fTlZ!VY)SI**RTYLl(+{Cw3g?g&kYJOFYhDB_ zfxVM4$5-9)1^X{<(sm15G<$(NacYz0Dmy$hwv}<5*(^ECDE+4{T-)BQRi6uq@&2S{ za}U?*(UyVXN_>%%rDf*&ubF+`SFk7p%s!#3%n-9Z4y)qr|q7_`fJ z9SpmCE{A(`LE?<$Hhr+3k1s%WzsdG+uog`h>?Px6PD%SNRd3F|*zaE;R2Ui^-3`vp zKOJChAqOu&QpO>)wYA~%IK2ifYHvyEglC%Lv8?9`*QM3Z-*kzZT_=bF-%0y@5`Fdl zf{=$Y5S8-vYjR2D6VV(b`n=a-t0Cb|v3`E!bBE zx8D8HK>2AgKn&+L_*QmXr;~qrNo^nv-m~&<)2L_m&hWNYc+8jGP{xPvn#$6)XzIT9 zC}&}A!O*d6#2`XC(qQyRSmQ^2L=*1BSTR*ihb{Zz&AX!y3u8PpTIR!@;#dS4mS%Cj zP|Q>`v~Pmbcs($2$-a{oD*eKoZ>lj>xA7offk-F4Rl+3N5s3Z4XtHOmttwi2_Ox_O z^Tjs@Ghuez@ncC>3AQp4I2_k7=UJN3(0}k-`RlISwDj=?4cqk}K?{PNnTN-xLGk>j zgqy!qeyb=cs5!86o32MyuFuz2XFBE#S2|l4Dh|qg&RMzcjA=P~=sL~qaU8s|+!zmq zd=ED|SM2D|j>?l!UWS{EW?&|&Zaig<(R5vV zYCdB>R4QEmpsh>`sg62q{>OYygdammiQe3%v4-zN&Rglz_!;?kVKE^nkCm`Z!@8n{ zVfA#Ve9*}uLPfzsB`p3&f}!QAq8M06cq#$QBBM0z^xsA=sLTY1UK{uEoGv5rc8gg2 zdS|1lVI*D8L4QMR@jgWd_1@I1rTeY4$a4a>+v`d=6EU! zdkVEAHSFC;#wv}Wx)sNwx@NeRT%S48{nec|mKgmMRGy*ghWYXJ$Hja0hOPsv;kP<@ zQx;2qpH!-iCtz1=gC`gp`9 zu0UNn!qUy+{xZ}^uYR50!m?OY+ZhVh#Ybov^}(|&-z2R9O}e1toj(5q)P^N{qDF z^Y?+|<|f2tf8DRWPJ58B;&2JGn>VEn7h5|Ps#o;5h2>6!>EWcz}bZ-6-V;Gpn`d{?uHaQ)k zmTA_J3J4HtX=$~;UP~`G*vff%@d3e7s$K<#Ou7l&OIlSk;u$}SMkPC0Z>^7I zXV4QoRief;oJc3GuTKoDX)<0Y*8x%2(6B#S<|sCb-E-dT1ogN-l@t;JyT8AW#9@PC zVq((P)((w`*a6b&>3UoG!9=#>@odrShrScBo*yf-(?>_br2M%@UC37C)}5J z2X|9l4JQMXlJYQtaNWH&Y*kb2tOEY1Tkp<*L1qNnQpXWX*=ximMG%b)6Jk#`>+ zdumBKx=HOFI!`Y2XN|Ly(mvLWPurB>uF~neau)2ckZ`1EWNZScy$An#sdM9Uox+uy zu{ZSj@=jtnd#Cl_RrR#Sy5Zp|s-VUZ)#t1+Or*}}D@$_JQhmId$91KFjN>)`$OhGj zM9o_9180pd+(RVW_gO-!WhKdWq&j+}nnxNL9ZlpSvyH;|7mz)#j<)h{Y$=&W`8kq- zWJD1bWyu`PayNi(rVSjXBh$YcTOGd~yAGTP4GY-~mDdy^|M7Mczk#9&+pv0i>fNiG z!$Tux;#L29BgHSn7bRxI_pyXmh=Ju~+4>WO@nT)tWMrOb2t^cnL7|eFw&Psc?&uAC zyFbzejPA4)j7fX~@V?$4OwI{OEp@@aPvR6szuiLMpMslI989;$FgKCYGXE8_{Qa}al)Wxw$DE!DXwvr+St8 zph|ybA@%Q1m%M-*FWs~9#A66gm&h_uk{Pu6f!#HA(W$J)aPU~+c%NG)tDK7Ne|VXh zQP)|_ZTCfBv71f$0ShS=nw`uu9Vsa&3~b6bxN@!`CG;cTeVJ6tNsYRG-C74FV$8k0 zGFN%OJ$Iv1E_9c(xJ~GnxoJY_{0+~9D+^@f`7`f(lEZ|w6Yjv-< z_f)2#@n)`kJ9jb39vyuAJje|E4cJy=6?lSruc;XCbkhwZD@g|%?H?v|%tf=bx$(JS zGLht@B)|Uk{&UK?l;0#_4AzzroG)p`>Tr7P`?Dde0Ng?BE_O~tLsLg@tPl=-o_v3y zOws{GKKbQMLH3fJVSp@4uILixs5J-I3X2h3ausyHRCU6S#KP0VI`9V6CQq0`Y3{6T zo}ewlo}0EpL%HbBv?E!X<;XwpqE)hB`MURe^5-tl?8rkf$B3ROZyL?7n_JNwH=+d;62u&bVENC0l^!Op#sjNa98y1VR z?L_kXcU)(FPdqn{lj4C-fzXiA#89i=i#NK0oW2T|bxyqtlYwe%vML5G2L3zLtIzcM zfxOCXo8$0us(M7Doq~|3J2+oUTu&FA*(-@th6Z!a)bC#L&ZLK}EtWIyETV4@kiWGa zuEnW()th_eE0}W5W^*b0_EG*9Z?+lyo=GNSX`sv=w`M+XcWPyZK#C(IBwTHENAvXb z6cQ3*W*b>ivVew$CM6}cb8;f5mwAX!`}dFf^z01OZ1x`t7Nhp$SCier*i!8#R7*=s zo7G1D^mHErQ%9k85Lkq@54kecn?K@mapxOyF^Q*Z=?)uX3nPzxVMF4SJ9wMvdp>Gf zYQgikQX9wKQPZRR8i}JoiG756yx!>f2$daQUhG-#&T_SA@8RWGAG#i^*+3KQCmePA zvuU+An&ThhM5u*-wZ1jh^A7>Kb;~)Jt>7xbE2iP4%clMIh1GjyYC5(D90{u>vaa%x z8R{IDngvY$&*s8-$8@#!xm4~1WVYdcSnAi28tXBur`#hfH?=!P7r~X>XvUl~@ED|& zQy!iLL-LY1oLWz0SF-cMfl`^W#VJeZJ*$yD_a9rZTeoXV$yl&BIJ`h^v^r}_45y&o zcW+(&?7nPeeTC(%eviL-mpm9(AMAoPWHzEWMTdPvZ8j-B*Xx1G*l>p#IEx#-;Wkx@ zrJx%14-tqFkZNa+0{sr~3LNg2G!$nw627!|_e=$Rc5h{h4c#mP?gU{megr$4_k?HO zoa4ExRpDx4^oOjaH^G6_M^!4koxJItGA;!ZOkqV~UJj3F(SnHrtxN{GzaXu#LRAT? z41``i?bSRSFqo#|r`WUk87gf2Gk5A<)-@6WK}&Kp2TCV?{P2H)-ntfRY*T9?%AqV& zm0!V2iwSZ-yl&7njEjTt6bO>Wt9?*8BW9(jIZ^wk+S4Q$cobIHX=4NL?HiJ2d*VD4 z%ShV~0umMfX${N=bf^^`Xl}~QpRj$o*^3F^l zozy!tB1!}o=Y^X2D{oC`g_3L(b z-am1C81CO^EOLff*KbW1N40fm5lr+++{(uh`LpEF{5<`mXl&*k1^jv5;x%{U^Ynck z;Vu}dQ&vzfFBhiT0rpD*JPTI3ucTW+vIBG5g8_rks8>jT;PTC*y#?COfJ>y(Nhn4~qky zr`n^V$K9dXf{$X)@Updv6qgd8h?aM9r@oR+I@d}**-?SuAjZM%2e47 zcJad=p_%K3lmo(iL9B-xwk~vi*;>c#WYCv2zOP)^40wicc@mxu-8>`&Uj+E#?3XaS zFa6s}<`iUZT^~G!ycX-J&6Mgn?;N-1R!_+7@bvXLdBqclBG@`n)tjmJ(vrlfuPfpJ zUxltL7{KJbTQsVLHnxS3V+v)p(%IH4;AGx!RDJ~~S#0f4(ze}!VIDR*Fo!lcBcH}44ig7Gzfpm<5G=fIkX6{(JIi$=l{SL*+ag(O&X%+Px_}Dk{&;DSG|=kx#LCZGDHesR`2uWQ%CsTgKFi$7 z!ob23mzPJbudh#F)X^MIVg{4Dkg$y$Zdy5UejT)4p;$4o!Kkt*?QqE5?oKZ5-ydAV1-`y)PWj6(X z!hCbp4@gftDqVC7(f`Kh$K$LMHAUTo9xb;lOw8EX1a(Sv{+%dSLM>vP(#m(YLNLX% zBILdHywX@|)qmS*m!)rMjjZW1rKn2U)7;m90J}=dnEQ!IK<8pKKct}v_PR9?(uD31 z#+2?OX+5w^UKA&xN)extmhsNRkd#3CmXRwp6`o2hW2Rfr!!TU;`$O%x`+|!5w(jwH za)I>Z-_M%cH9Q*|;?06vlT)P$xhj?sPs?y9M#RI5b-n4Di@|s@d^kQ4@dhi_pX`hF zw$LGj(luEtiMHY$$8XoIg{?oWO~w-XmE2l}SG=6B=@{~U1ASHd<;yRf2UnK7CxqdZ zJw{7mpctGa?!8_NJCu2s7gg4t5Ggxa7w*U)qQf1&lkv5{b*Zs!bGdXE735A}%1`H# zx0T!#y4J0WP>DJ|FPSUO71i2X$5(7v$5L_dJQM`Ei0Y<728l4+zn1kTKu@>>Lw=`BNGq!; zj3;7+Y2Rp45>*j3aMSuHWuQv7D~ZH0cBB#_b~OgQ6}*7^Q$O=DvK219H$$3~#EIX*i!!rcbs-`8|y%&%~ewW(kQv^^V!`KfU^Tw5{G-N=8v z3SVxMC-g@=FGa-V<}ETsUCI)DF2;X6v8SC@s-EiLQ#6ktcdb zE(WOMBUMxbq0(U^SNM={UsX>BfwN$?8TUX{4^Y<=cUf`Q4A~9dk^9wZ2{@IOAPwEN z1-3}#JSpa5g&1zDEbWHeC<$AHkKZ$qQdYSi_F^j7nui6YwUSMwCm6WZzoMI+0&t~W z@P)sH@RW9vyQ!<>7nY;uIL&3+2K9EgfdPZWP=c}1PtdjxXy+F+Tr8=W4o``4%e#z| zs`s)xYK*55zZj{=ZXj<075j(<%M$DT=uNIX@bAD>%+ZIO?@$^(J;CCB+t{HAcu}!3 z=h|{FQ?k#L`&k2f)$R@y(T(oeLp29)m|U3Ye3-_mXRVxNwy7d*gojnW@=~z7mUb`M zV_9ZxM@Bb9@Edrd=7DwTLh7bvwM;zas~*+q>D zdaA~pLu%0bZ8sgtWdXZxTLdyFd|YgyoMJr5CHm?*wCg>bW5}tQAi=Vis?&2-L1Cv} zPwm)ODOEqDkAS%H=S(IVOcyWu!1h=DpCsB2c&&8L!uthOTPK}6j@ul?OCuArimg5t z52(Rk(?f(Jzqnp=D)nwG;@vyOUUV3L8wZ(%hlPbb!g1wk`z@778x$3VSXEWU=kv-v z;pqLi6@oA5li_?a&tkL6&8Xez*U-RaJd#Ag%>4WAWZ`cznH+G*vh*uBuvSCtc_9Re z=(7*Z&zDfj5*8NLsJFracAy(>w0i?Qi=f^R2l zrPC$GRC_y6a0}w%+B}awD|#><9{F}Qz(M8Ih5RgLFR&~qcd|d}?)x19kT1aieR7Du z(>sHf;pXux?F$DE0E~6$oadVDBP7GO7UK;+d#BeUdsIb7wOplzBF4%cB5%;p9xgY z*sv;V2oXM`DpDa*D;oJr;~AzKhIjwROgU@TM5fnviI||?pvv26MFHDq_xgjvV#QC4 zowUf=(r1+y;R`}3g%&(dC`HvE>*cwxRrWy#TLk-wR{4?cFI6Hc{+*O&M!oXg^TdCV zn;s^IIK)fuM7R)S87X;$mM@MQ3|-;|nXcZRE9y>kxn-eNKwwRYM-#!Zo-KK}L_z8s zD7gE%k~2*Ay|U~vV;LRJ0GY zc*n$>dwG^nK1BtvznvIFQslex?aiT@(+~dJ??IHz$HZ3nFa`~5+cdDEZXlO1jUsz zuXVie2Oe$tqmJzk7IZFZ-eGlJ#BRTx-wTLI-Q<)Zgd6tV?>nyfnwxdmIAx6obsAvO zIj1gox=vAToF7Y!kSaTC^gd&h-N{_Sv|-$-cu}%~B2V}pVaq~YOmb*+pAARd@Xva` zFM)=mHKL%-pxwG-rPJo5pKudn*?4qbObqM$^-;vB10Rt(;~?w5pe#RY!2uKtR-ly?+OB&Urj@^x9dk`tv?HzG-v-^TtF#1o z6@`+9LdCNE4^my$`~wU0rBuk>V6u%a6hyWvxG{rAo1Vm8OpNHa6ovIHJp2-4BN$Z+ zL>G<5xRNA zbT9`S`LrYIAgBz>ZeHJChAMC6Y)q%wuvETD{iA2SCtcY5x8>FW1cr4)t@#F8nlQSE z>yXKs!FqBEqypE8!k1GSF?FtJzS72g2p#1@LgOF_pgiF6OyuN7+IC}OV~%|5-9oSN zKTdj*)oj*Vhk*Uq)#ylanP2+=Vz5Z11hc$+SuXP@n{f~YIeDec+SheYrk(y0m$T(k z-8NiDM@OHJH&;S_k6vK4+SBXJX?E_&E~4=Lhcp~QLZ~lsy67T z;cIso8XmP^iLgiPuX?&S0cwbo0*_&@@_>JY78kpY0v4RpHx5%mN3VA zOWq4`A^Ikao!V;i2qhJicaO1r^5f48eMPif*n!Wi_o{T$iScXhV9bHs7qdvN*CRr5V`#WE7wqV_{?IkugsK`bv{>_T*lGbHVrZ z(59=IJg7%94VYjBR5mL}DsL2km|i}y)bRG^xT4v9>ka#ZmH{i!vryLF-<^+K?QwoO zrueD^dP7ivLsf55FJr~7z@$Wn4rjudEt#F^Jxx^jZh6o=Tp{OxX{71ir!A!Wjug3YME* z8B`8!A!`*{%)r2@=9lWzOd&_Q!2Qs1*TI@>I}=5^ayS{n%F9}>g;T<^mpdq6FN zVt=(O6oe~|@OlOvV!2Q~94se7KG@`L=T zWgg*Mm~>elbA`ap_Z+6^1T>w`DnW_UGOw$+2q=5oPZ-g9uBE32hn$OS8b*IjMfY5~ zX%;nY4oGhHm1HWQp8KX*^TNs*7j%B0qV?gi(Ur8C!ahDdP1Tsn<>uzvIRdsx+x8sC zgK=(d?#tIF$M>fLB9rlqu$UO6foKB5kN3CPBC@uj(bjd3yJN}Tj?vL{t{7mSTU<)Y zAi!1Mkb~?iufLuyv84eS_2g=ueOs3j1%;l0%fWd;kJe{*!&kcghg4E;y>JM9bkdZl zkQ5jIBYyWbH#!d`;^pThk{RMf?g327qO&j7iB0K@gg1OLV_WQTkMO|Pv zuBr$s&lZs1h=PwdIyYGor~ZA-mu<&rPlPAk0iqA8V4yrm$I}q9(;Mlb>S&%)vNTsm z6yRkICnGt6guXuj<4L*&yZ~`~M5_CJA0YzC@P23s-_t<`)ff5BkL2Tvfmgs)_uL%F zQU=i8xvZ{qO^--Bf5p21;(K#ZAG1Nv>UBNP)F<91*^-)7?%sPyR?D(nOG0!grUYsB zq^|5|Sa@VHSGaL9n@HOUQ+W+=0N2az%6At76CePPBQ*V>TqOS)`lP1n2BtOxxBD|k z3U$zqvSV8|fwKne~S|&kofileH|6bD7{!2GW zE~rolVQCU8juj_-?foGUrcq3gRNtx6PPfff-Ya@S7%#Z`PE_@0l#P77KeLNOULUtz z5u%ph@8$?95!iz@*&5Np{3_NzOr`$ddXk24Xji;!E^;ypV$Amm$gw}l0_`kz%vL$j zRb)m3YpR4*u5jn```ixM|2~H2v-W*?V$wlcTr$4R3Jn`t%3)&wpP1*geOpIGL&M>7 zV^&a5NT5*)p7|@MR45}x^Zn_14~G|!CYI~+0InJW5|YJgk$F5j`Rg}hU0WB%-stFP zcAM1{KyR_z8^(x>i*rcaN~y<8EoCwK5OlVnRq(y|-7DqhnK!z3AuCo1l-M$aY$Sex z(HU%je?ur``Rz0JV!u0tGOw#QQh~*g^+@Y&Y-HXWh@2&=0^3#w8G7}@I%HnQojcoV z&YJlVZdp}X`m@S}Pj&T|BRn}juaYtieS2dHfQ8s9?pjMnZsT{hrlI7ibG9b&r7wZ9 zNcBtha^@+!5zC!v#gHjT`{!U53H?cJBrQeB?$gT%)KE01iGP2{z8 zMggAXd`ac7PWg6_4V{H#+hE`-X!*s1As}rHk;~OZJ@FSM8NBIvZnO|+nILG0+Lx{& zdm-wxCWvDs+S9qALF`1*pP9EFhUVsBd3kQbaq&;0A|fKqAStQ=*;@B!D+4osb4^zR zVbRF9^-KVDt!FYPH{-ZF2wuC%L9|-%rj@6R#JWSOfRu05E^0Oc@4IWhan4@xQXc!% z#0bm%E{)=^01B3-S1#N9e~Rkf-gV^j)!Nb)Jo^{H{UYW<1K|W35A$C+oi@G!nd=Dg zVk~jrE%9M{Wq!lKXcHyLRZ}+m*gKN(-QNY)&P@Ai9E1$L*vz7=EuI6m7}p{Z>`61L_&LC4!OT#MrFEWIJbuuTPfG$F1Q+ z0RJ-8fOt1i=H|wPi23xhK(4PY<|uqN}W9v(pAU7LgP!IvO;-xu2#3h1+dI7}4 zER=Z2zFUKBtKi&GS6sQMQZ0{uH8=loC^>jq#HgYUn`&54&=-b?Ac%1T#$bdr*}IlO zs{2pW30kL_{HI6P0%=e>e> zrdN}OlVVqnxYty*_JVhzkuppCn;JafCLC2lRU?Oax!&g}6Bl>$ioc zN&U+AgTyJKI?t|N>26TZX$+0mc$y@@JdpKIN9*>b$@L;DDbL2lW^LFmzEp+I_GkB4 zI@e5x&Fnvg`T-9QkK+uq-w6xNdxulz0r3^{}fy^kz2zW)QQ^(mL?7k;1zHK7P()yJBHdTY z1z40e#rm!?Z)AaLMMM)xal0$MkS~MQTH+!7^9B5V2M{;_QpSWZ*@R3pMg3AI{oVu(DSH$VaO6%vVN{yJ+mRq$IMPy@N+FN@i0GzIRaS*{-`3o zmFr>IFzEmLf^xafxcHn?4}(nvyWk1~YGtpZJ}-iuw0aY?`a0oI1a4i}84Aw47d@Cz zGDr52>aow6J6k`a_Q!s0uSxLr91_XF;Pp3}1qa_(P~Ud4_Jat^0-Or-+9#P0je6GL zu5yIgW145%;X!m^BT;d=TAMXoK%V_+XxL=81vZv;#E8c0!M>BNP@-CP%`R>N%E%jw zLd?UnL)1`cw@`1ra$7;f_dBV>%;DqR`B7HA5!RIKV5(VT9#l)(qi^!F zA8;v|?=6R$`^=CVuMt0A>%cYjwyJJS@Ag(K7gU3Yw_~+c47(wxFgINpX(m~w628>DWXo`~`=2neR@*7g(v0%4#CIn9N7`}=@V}^6XlpZJfF=}9 zJ_>)vZ}yAswCMz(bIhVTlk8SHMUvf+y!s=T-fl`fYwS#LKkrg2qz*H8@ZT`h;aiyV z9QzzU(Wsn8Dbr@+DJl_vmAGAqKtNxTG1n7^>K^&~M^+T{7G#+^(WAu2u7qc)#Ngq@5*A^#$jB@;ztk6Ot7kO*PJp#}hw)Mdno!0VYMDy?9-; zd>vVW!_3Ds2AtU&O~Soj8CgEX!qpaOP4fuXTUD8W{};CF zb{`xZoK&H2m>_o|7wc`Qg1!PEC2AG(G$6&wwDVhBe4QFsC*vnD{wAnuhaP?W`Ai}xrZCN$<6Ojuo9sE{xdR1kGYX3<#n+^r+{DnNN2nr5J#~K#;>5YLrj|;p zt%3Dol+Q(Qkzol7Xw^gC$R3GV!>|03=<}o~jR(x?8wdtiKQ?MTeiv^2JnQZaMmtg{ z2uT_Wt*K)xX)gb{d}R(`GwP*=ZePflJfa)tl8U>RhEk~@N4oqf&2{-)d~q-&N7lo9 zqXhu;7ES8ozj7gtztZx5S}VK|F0AM)@eFB36lJb5x=Z76_BsK{&%)TXfqo&bivzyM ziizgS`8$;qJQ;WpiinY)UAl^?UQTRk94!V}E-O#VSJe>L4^}Ip+R+q^?oQ`REv+J+ zo-Q8Jk}@gYozfm;{HHJ5x)O%A-fDIRbG_)pp5Zaw`HCfKq?;f`Fm8q+kAoa=BsQvN z)FOgvayy|DAalGu;vohz`sQ+?&a|%&1;~Fr>A`BWUpFKp{iRQ~Za?y&xYANmgxmac z)ppY7FBHYZ_M^m9oo+>D3V)c-6e5Dqsg>`}7Aa{Cyxz{p9T~UAQ~u~4m3fZ28U&Ue zp~C?^0q~12eC+_%va4%FKB5ii(QZK(kGo=(fydGMYl);hNft;->Q~rkMTqLE2y&Z?)ol9vIg4J{%DN zp0?zhD5ab$+_$~;M;p+e_NB}xNXBGRw>wAjlQZ01#8BBpo@{ZQGmoX}-+G+%R`jTJ zDcg*OV`%`biTtXG;b2>}8zhxj6o%K47nt6fK(h9oWaU->FFBRIwrOJJV`fY2jZxQ_ zTG)lu%TBLQdadECGZE^6Jw2~U*PZh)K4P{4BqSi-5GvTTEjQZ5U$CouA(*!H%(7GT z&R_p}=dD_?1B&*m-8r=6V&8C3th0Fd^9Yh~=@jM54E*Usa144H&@_;Z!%$&*~LqI30$Khc@K(TUHD7p^Hs0 zELO?QxV^3DzPq0mo4UB_mjx7Xqa^;@fKE12&&wz0E6*4hWZ7!_t>4&)G?GC|vahdD z8FT0joIXCVrB z({!4h43=xnfuxc3_~_>4<@NaHIz_y)ww4KOL*I4x0!$n2kK6Eor$f}qiJd{K-l#he z7O+01Di%mx0r21De03-sgKj#g<)ZyX)^vZpHwOp-6fD*p>!Z0FAqUmM9=zg_D1PHAk5`b=nOMLt1%vWo9ETO;WxY*CP>N z0k(VzKK^D`tBujOSKs9bvCZpg72R=t*0v%v$C?-s^UZmrg6RA$Bzb$n1s8Nr9AMm;_)wZMXoSEVtFRXUG9X$#Aj zk=15@3F#OL?F;H^pMV#r4}+Y&56%({X!2QYtpOxUA$b#)`Cg%fg!V9H8#<9rL1^Q( z89=K^K>Ta`#IwL| z`S>ybvjs95TF>j#&5U#mo1uwGf2;e=^qL<>Rc)=&!};3n=~6sAup4b>Cm<#!X5N0S z#dTm{;IDduAZ~#8JClU$;H`-w=p0|&2;+_L?tKTvN}=(v{!DI@Za*Vq>9rokl1r7t z)){SIN#JtdvM4D?a4$v&LMJ%rv_KleNL{a3jY2cw)F_A-?o9%q730(EWw)0oged?z zbD*ss)8DMjX;iIaz6~}oC*7YaFU4N+2jHrY(ris0 z%17T9q;zihZL$;!x|=?;K54`yNQk{bvlb^_b+}6;l)Iwk*FDb}P^kjNJ6L4{$ zyndkK_y=%?qiPB}I3~|;aF$WRLN9YA(w+qdTqPO&MyB~WUiZfW`rkb0s^(sKWIeM2 zZPy7rwGvjnFS&OmG9H|zr%=O1XHoOvh3mz)c|*_yONcT`p|PyskM#K1*jN^@>qMD} z&tD8^Fh27Ipmg-aQ^_%Nas35S$ES{bwKMQH`D!0JNT@4UG~(0E@B4ZMJrF}gMNI71 z*vM^VZC#{Vh6_+LhPQ`PjV&#vysAsp#yp=Fud0PUFS*Dpl-N2`T*A@GT~jrNZ8qr0A>1J{I`%$a+3$jk`wda%Z$SGgvh(iC}FVkfkD zXYzNLAa#YL7m`XnEeUpzqLWkNMX1DYsxAgbtytpQ=U(=0FKaqLfB*BVNlLQ#ZWDx-TvJsUx_mHmv)5}Z-&Z<){K@k1Y)A4|FYP8iIA-zPLq*E6Cd<(#t=IVpm| zwql)PqJ*DL<^Fx-=*oZVe?p`)colIzkeKLt8&P z{SO9>1#1GjSG7NiCDVE9_)jcqriGE7;Y<_ly_z^o{u*x@C5nMgA5O1hrM74%NLlyv{DvI6?0B2 zJzdnF#aW*e{$Qz4R{-E3G~hL>!{^;7x{CfjA`n44K97sz7qkd2U#99vvx|yZh&gLt zYM8)-1SOHFlnyZI0IdL(-B7Ni69%H5@C*Vq0Obe_n*jbJU}hsQErUoom1fLLqnsk| zB&x97aScDlcSj;TNDvxMd_hKC^{f{QoNJ2mksffa5z`AGfM~I4iK(0qA#Vh!=idS2 zkop7I*k@GQs_>zJd5x~6-01o0=n+c7HdswpOA>If-)K&ZOp~Ys-%jvaKs*b73lPP% z5!-$(lbE860S-)9M5ZjYcJjQ zCRY#_x%p>2iN9U)hu6;jYta$(6vIL>t^6?JWj+*8l#+r%HeE>%#1v%GxEHgjAeAn?UqcgFD4ws4-N#KmqyJ$LfI$pnnbKILs%M zHbC@%sCjDYnK&{b113s7*~2`!XouY^6iMI73~E~~!U{NDgff11+c)||%1LWNZdDW$ zoYWebya4?h`K{X}@9$Iea*?q9-g@12Wz7$~s%WW^i7VKyIu+n>kSS>qF{6uWf$t>- zH^@gDJ?j%){WyHLk68n*NwW~bIq1=Iwu-?f_QK|*)uJP_K?H^*JhHt6#or>XLAzpgAuVXwgJ8vb{`LVkHsf1$uGgY!-J zE!EY85afaM!k+1x(6XfrgR5RN0pOkL*JIcJ-4)G4i_;AVX)0_fDognZZwoED+nPS# z6TSORmV5EyHnKA>X8ipni;|3*++OzWdq_9|0;iq{kk^1I2zS(*KsXnSNLImbUQ&Dl z4PJ{6W<*@NtAleqfRq4qsY$XnnPw-;s>hd__zEQk6Gn|M+3YrYDC=+1+k_f`H|=8H zL(N0`L@to`v}OdSDqdO?{5gE=wps*`Y=N0A#i?@APYQ3kWlC@~o5VAi835zsP}zHc za|QM0_o@7!KmO9-KnR4WK{6F5lqxcVwhT#BRzUb7E^v-Dwuxi2JEg$>5D&w9?G`Ke zC4Ca)mw;T%H9&X_aKuO?`k;412vWjh<^O)afHzm^85JF<5%v#hXz1W7G zuPJC^L`$!%i+fV6AmtHW0|c|ilCyCv!{0S0OSG%htow9o8DJJgouvGq-4s2jPwH)4 zU_v1M2pFPh=Lkrd;tb>gW@Y=!Gv6J}2z=Uk2ciycp{Wo~ngi#aU8NH-9|<473&UyJ z6vQ`R1f(%wha1a_alp`e)ap~8IS9`*KUzkmn;|`iGnaYw2?Q89mJ&r~C*H-q+=G&^ zRA%|dGV4MgGmFJ5mPnrR1^Psv^EYZ!t!NtYLgdU0l1j)vsH?l93P+CMIw{eTM~LOr zuhaW&ZQO@1U6OaQ%=;Sb*%}g{<_ugOBxP&Jt~9FnY(>K!%0f5F;a>L#x}ev#f_HslV{Om6;g1dJr9}5J`9J^`K1zqD9PYF zG5(X{gALAefznKFmh_=&y&D;((WU2ebubZ+Pi?KNld{+0BllYSb3sqrbUH^8)i5Ym z2#7x4^;ad-6U`#Jy|ZDg>2NqAEV{F}vuWg(iWs0L3<-qmR@}S*HJe|Pxj$K;6axL24BSbJ6qjw zdm{c$qtbdOepsaWrl^&L8^3|^fii*kGl9F`UdN7moBo=(e5#tD@V*ZR^WAt$5AB7K zJQa>Xf;Z<71;=Ba;!9V_d^@(w*CbxxLHnlVSHs7nAWt`3?l$=y&$X z3QPR~rywEjy=Xyb#1M-MwZCu&^B)``ElqYWv(QLyn+vLTx1JJgHbar(zSzFHMrAa6 z4J#hPImrA^77ua#hI&ml1G_u^D%y?pZWoH;2_OG?X@jlb2QMUyj1Wc%qOSGuJmIxV zRJ^nj5iu=b5fxI0i1s24RGXK1Lr}Z#L)$r0}2=%o7YWFI<9lE{(%^> zomr40PaQ5o)VaC1NxzfU!R7tu=pC4A{I0Q5{h!zM0IO1R_kCLlp2<*Fn^_aeM)QPL&Q#G=ky}0l|RS;P_5|dHMc$k>Jp<;Gna}TyJYiIP$je{v8!*PHK0R^3Ij1_N- zV?GtqvtxJS=ws_*CawVH0aKHQYUwz{IEh`;)t*mDyPiV4G*_cSVpaj^x@d zsVM7T?{1fuww|-#C@C+C>pkXO7^tZ=B`N!7ma_!SOuqhzj60q_5@=xOis_fwwyAP-;Bs9wW%l$f8}vSyyO()W4Kr zK(4zQa@Q6#n3RgT*Hwhet%|0hZ?2bIs;@0R@CY;IN-svHRVS-=n8Aw{g>U{|g`5s1 zXWTpf23qF<2Q3Z$yCA+s3ewa!x!m+qb{tE{fM{d%>@e2Pz34N*ha zp;{7j74kJ+!pZ_t=l1qF5$I8YKI!Lw`XtaP_77^6d8ohF^^k;IMqM$#)6&|@n7Ib_ z;w6E9bO4?_O~qm}0E+>WaEoax)qmE&D2Y;KioY7RMKIFm=DSNiH$Bir?rlHJ#(2g~ zj}`+)3HsXWquk&^w8|k{vr;1Evr#eX=`#r&pF^^&V31cs+@D@o|$zrK|fH z&Z!a21yLW~9@n(vJyB7GvQAd6OgCy-Dm0f=WZ6ogDO^8WT1t#3yfr%Vd3mAIOCOqM ztpfKC4~CA&)PR}r9D|#>0+)6%+WNh*k&q}h3tlEXKGnTLEPGM14oru_3?jb}K7F4x zybf$4aLS<27LB427%|dUg9?5}ENT9xT?ozd?(1)De)w!Xf3Us?3X1E^q2&vAkAYaD z`q};fnvB?g&cBF7c(^B4K3B#04=F5lqi}xj;FKQ42E~xA9u~*3wurp;DRg8$9lk0K zEFn{%c}$sc_yS<0-}r0~x7t$b3CzVa7Pxe81$zR3Gz)mrlcyFGl&4NX&8$F~Nm+gc zDx2@0W+XXcH~ErRr4A$ca=(CEHkZ*%T1o*>i5N%_F6Cyg9YgHLU+4BVLOu#@EtB?- z(|2c$WcdmkkOrl zl|@^pM^^k`$Tuuft~88v=}&e1;bt0ee+AiBMeCFX?)kw)hIxm5F9&_0cgw5hlJWMV z`AiXdRTIbd{ZyV-lu78p=5m9bmKtBG)wI7bCxejwtpTATEoEcOEtfbwKHOnT5SH-A^7K=x6-qk`e+Q;0lmn} zVQaX|4PRBEX!&ne6=q2E>?H}scrjqUD?1=Rn6I6!Z$tFWHGd#mJy&OY?GA4TXZf>1 z0$L=nYl4c%NdHDpRa)AFBE>jVSx8SqoVpt#RNM80-VCb9f!_K}@oN-Me0bMIU%{locBrL-T%%SV%Xi+(Zt zUfc4;mId9Ila8AK381q)WsPuks#TwRr3N35aSdoI`PEd3tCx1?A zn--87l3*jJ-0YN%-p#{(G19sc(wWb2B6+^NsR_T}$lD?VR3#=9V}J@pgpaR+e={4x z*qO+EEQ#k9P2!|w(V+6^s301#fZ_ws^X@6!QGEgbEQgi3hyTBM$rStAXOR@gT}VqR z1F)$#acoa0ffji1c^H)WB^}IPhpI{uhE` zfNeLH1w;~-hOZTsG7N+LMGgZ?{P5=yp6ZQ-O4Y?i2M~hjKR|O@t=(yp|CQb>0$c@h zD)x|`E9-F0w$e6^ryV^2I}5_SF#^i(Y(l!Kj3FMMv1rGfxocg7S zSd~L6eb&eCX0L`F8n_k|8ZvD~?48Gsp&@R3f@2fSE~SFH578nx7Agwe)y|YN;m|Xd zeyP~_VNy8tqHmLN^1YpdWn~^PN(BUE_y3l!CT#4&&Gh=fZd5P(=T? z)L~B*n(b#+E3SkG^cPk=OLWS~GtZ|7#!s=~rl0H+L_%$D?a3>~v1#umNI}lwwir2z zjkl_|r6nMn7wQ}pDJi62o{ubEgw?(_SU=Tkz)=H&%F$Ti4eJrb2mdn@KOyk0sW%i# zeJ%Ncky;$9GRyLcSgYh(YRQh{AVU$w1OQ<|!9*eTjg4~mW2fQx8C#DO`mXhr!AEK1 zN)9*rc0BU}VCN>I$%X5h(SK^1P(NrYUNOE|)|moq6R?~^r5;R;h@9K4(gxii-QC?tH$!)K4L!_z(ck}h-VgJE56s+q&OSTV z-e)aL8TOyg&-Waiat<}_IO<=R376tG#feSfxZk>925h6+uT!l$B=*FJ{;uJjbQ@rE z)SpF`86%m;$o})qQx5>|C9gYf)a=*HXzo?b8M-!Qi`(mCALMjCg`zOi2SaZ2vq{P~ z_^us6PVqGTyPOlBUPYJKEO7tJ@Sh?6g{nYYi(tk{m$rT$cu8X4h`8Hn&}3alj_C*M zxo5_uEYut{k%dMY067Rpuv|3Vpq+5qPL{a>N{ZZ%psql+Iae+q;IxnRe>}Fr4n<#E zMup27`&;4gXsgff2UI6;1C!TXuX?k3#n!~ax55i&qga`d@B)H`fXk;#vr%IsZ-CJ- z)3`WxfYwB4C^G@3F`dIyO@Rrg8}01#XLha0vLow9d(Y_!0s67PO&n|vjac1NKME>; zZQ2OP(GzbtXacd}X;Qo?AJPP~>vo>17?U*dlT6O8jx0@@=Q(uZ-CI3<_XFW#G_rnh zh?q7*vsL8lnCaU2^8ZpUry%9P^~e;vXu46S=8fjIH`{^Q1!(p{JA4Il zyhi}c!^JYe*LyxFH>kN$9qchYp0o{&S3b_vg~){VtqtB+9`y()Pm*N&#?r+$>745| zkDTWy#e0MNjh*8_xgUG;ahBAwn-d?YQ4UY$-4mrIV6{M{8TgQh_zNLE0hlfJzrU~U z2Qz+3X|}=}F8$WpYaRxMqKdbwJt2mlg(<%o6glWea~*TQmas67sD00Gv5p&n@U6VN zl}k1@sN_7WJbv4qxAqVA6>dny-XIrD8h-xg&kk8Oz6bLFzj*DsTxY`7e_?|Cv67Y? zSrCSkJLl}Nt6&l^Kp-sngUsjvLsB)&PG+#hz<|lBZrfAN`**^-$s##Uz)v|imjH$U z2gpc)JG@a{cHfpa;Ioa#C$9$%R@avkWR8Vt(3^u4UXRBZqpH4t8~O~vl)3Pr@L|E( z+11$@5?YcGoRN;m!0(^9s04TTb9o0vna`0jh#Oudj1Ug9qiuIv zUHm_N{8#Qy6PeDW2Zk(#d|_(z>Tk-wQ1m?Ap@Hwz(hjQFXeqK<)*%}@6Z(SPiXbL# z4XQQYNW)R|Bfx~2W8bIcSGCV$u#X_$Fvvvy#Q>%eHN zQ_jZm2?^649S=<4lu^aAjPnro)|+B(c2a<` ztGcZl`$*?ADbXkS_q;o>x1C2a070|)`$*8Ujy{Elf*LrW67{vP(O(4(Y{o@ef1d_d z;?B%FJ$rFc-n&;NW%imkx{}i(7r&ARRx9vu^w1=F<{j2oavHaOGcet%bd=S37e}R| zP~LLZW|tz_TNQ7iP=5vOZ$cdP8h7-5!@&BYQnNU{+J)s)ohu(_!?2REr}CwyYW1wK z2#j~;Wjy6?Ve$nV*rib!XKwXif!Y+Y}uskU2tkyQ0&9)XF zGXX};^1Hh9wNtGBE@~gM8o0DNftvJ)Vi+}3g!+%TaHHiwH?vUdzI%3eoj{;zVDbQe zZ)tw(*?I!i&BaM>)9rqtQ1#)PSW2yeqUL7kAymM6!(&vFxov9-Y}ky5XXp{P8{nCL z2rcbxRY+(V+`BCp9D$RK86!6o?vTAZM^3L~1Pu{n!U$I!BRCDG_PN*~TIoM{^z59j0} zHaD}HZxity(Q)a`Z!hL-NAhdv^C!T9$S%|!jV^t|u|CWD2Ixvv}V zILp`Hk7GC3W$mmb?qg6*=SgQbWtZNz1e^*ZoGcJQbUN1PFP7QP3ovE-Xjp$U7;x4hYYff!N)Yj7y`mJp^ zc1Xee`!$7kDc8%vaQ3UX`u-d6z;`B-*@p-%tB>9$801CR-PI{NzYfQd~AJOuuCnZw+MaRv)z+NgcSiN z;oNf79`3&pz45yI_YFUfy2h6;wYw&Z6Qk!o!6^v8U6Lg%N0V|Uk801{z2*yZYLq_; zl438wM7#uvzr)RMd@Mu`JjgD+K+U-w>Af`R5<^$QWnw8D;V8EvJ5@HAW3Ppf4BMJ| zmI5E`!#wMWCK|m=>rbgmQ{X}X4$Mm9Y3|6tOW2`nbi9-t)`LmUOkIXiHc^(ti~Run zLixMW`k50=eX2_V0eM)J@omG++50s5z0D;^5*zCP4fQ3?OkP+b9VltSvPx+2*n&1z zBeAyw3DZX3Wa+_E|{F3^3LvVT=~TS;|Nk#r_HDf zvSB7gEgzXnE9&uOAZ=}DdujSY&nTrgi5F)t$4%~yw^;5g_}*|QE**1_7FqiG;WT9D z!wRJP#D(?x(nwm=$eSWwyZVYn*@VK*KfQrI?eS%4-mm|{cuSMK(M4H(3$W<=pG^By zH$Y+}!bZ(Z-;|f-rb#-+kbk$+hiz|fNnMQ|0Y0y6tMeL?4t`^k^8MrslxC7Knr(zl ztVs;*9P1_UhkdENZEzEUXzd&fJU$supBCC5_J?(#7cx|EU2isB^Qs zph#FA)2qSpdmVWWx$5Ke=Vzv6qBgDgjoLYI_3Sb89n$fbA!1&QE__aWl0_G%lw%v8 zhvl-4jDF(ZDJx4q+J`VJhTB^djo@ZVhM7<9?-eF`729Lf{MuWoY7F!d4Tj3B6{i~7 zgu}9)jLarpanmBI;I@C~T|MD7^!Zlsn;gdr0X0^9@cHxmSMM)XA<45?B%jecd?ND* z9~JQGrezS7(CFEohbMRBql75eva|7i!*?&lx?%R_~n+{rR}|_isx<%t^MaP zC>gOndkSyuJ|NQk%tO>rK>X-}&N*cmy9bPsVy#*b(h#hR&&Qw6EzE$(i2q6iLS^uM#rWQhpE6-Meu&Ww^1&gm z)0emqY?s(v6Wtn#@!1qS?RK9x6ZdL_PY^?2UUYOfc=Lq9Ozr^a9SHnrO zlRgq~y(D~pTN=|yfI}qA!j0OeS4oHIUsJdAF&Rz^S z=kmmhXk!KFN3C{^?A^h(|2Tbq6wC?UZ<`uZeBH%;)TLi-!(J80>i6_JeNiY5IrWtk zl=`YoCd%H<26m9GC({^zzy4{fD6KOgnG;8}lxIz4{Y}A{d=?6}{|i(#M~TCmx|`9C z(!f`6(hzqtdv9FCF|rF?7F9-z!(tF}{FDt5O)8D-!IXhP_JR`5TLl@M)|`ieCUCVp zytv?lH1Vc$!tcSg19_>0S~$)dCP~!MP21oU{d4TE^BsBMjwmU);*R12@`v>;OqJGO z8wMsPKfdZ1e51!nV1O;yko(&k5f?oWsOJCdUZiSJwEk&>@%II9Y0=-(?BzA7j_krL zjkCNBdJEy|@#>V{k)| z3;pQJvZXXe($pL3pH*UH+im#n-NjH{Wxj?tZ^nWmvPw?kzIU5s#l2~me z?TW}?cTk5AIxT->8AKi1LPn$jRDRR$CacxR*Hb;#QA9FLD{FG8W+gry_fsb-6_ z&N!lwr;?dtFAy}T4qMmu4D}J>KYq&GoRAru2kTXrAiE>;JN+oP=^cNQ7lrT1yQ=5# z!I3!!juT4mG?hj&%bb;T*1)n2I^u6kRW>pGI@N9d$?yxw!d=K484Vqp!$l|B>u zk)Ry)anWnZj*DtM3Z1(<%pAv^Eo z`Q>sRj^8!1fJ;vN@2co3h`x$($xspjpJ~Mul=z(GY_ac{X^o1`CwM8n)*;X_c}RF5 z`~HKh9KJ7v_52+*R1d-Y%R`zgXBBDhL*lrFv(VJl8`@a4$nK=OrB51XEVMq{weW}b zd-66QRkU(-M?>uV-z>mOf4|}{r3}B9x@oVm;f?2!jwO_%Fp>Cr9LjlImDa#sAOB!m zZUev;l86^h@PuLw1sK`>9C(eCT&XHJ5&hi~h>#2j<0ESK6Vqf-Cy9`$&(~7ZW@#!e zGV(=%Luo&zhi;AFVK^)q3+A&&o;=%3WGvQYz-z=S!m-b>GEEG7Jv>v*b&1SCL{rBk zzSH==)nmLFter`?8pZ}`*NGJjQA&DMELc_|Me0{}kNL&6!^kh4#QhWR@W!RjI_^36 zWP4K!lMOmlU+3P=udeKAiCz&8udksr=eEB*&2gk?TtxMMz;bPwni2e`kgD+N@r0~g z*jYTkh(XQS*R85Lq);CsjP0+i_@h~;6ykn)6miVVtD^@Nxe{7Vdsy0n5mD#{jG0&6 z_~^ycgM!}UzI!=AQ>ugQjbiBFQ*k!8gx)qic+5#fTzvTM3YG-z9U1Cj3n-KhYQw3s z)Qg!}4e$KExS=d0<=Q0H5O%w4vXiIrjuDQ9*E+E$w=MAf4o?z>BvkS+g8O)e(TqfI z;#%>X?Tt$X%BxIk4HtxM=qZZ&+^&SN+}&lF?ss#o>86)NPg*$sy!CH5dOwXGCX+V& z(;#uAW#BG)=BK$Jy>F;hq&ud)hml7!Z5)C!=A_H*oH_`x>cpyjLt$EH2SL(e-+oIE z$zh^iE7@Xal;2lk_5_Mm%4}xP6x6I@*MAZAFM>IOuPE`GvRecGhg3LP#SPs?J~XSvE%2EYTkND;@=0 zp|s`1rStV}DW5xn4pWh>xjL?Xh+jUbanCQV6W3XOG1d6j2F18&{yrg4Fsj4r-5W+L ztjff%q&*LvUzf@iSS&1{d7hys?GsaK2Dh8%xgBn-EpT2B~X=E3~>9V*|k@R&Bwj`_?XOXARxmXeA)J5DYq_w8pz&+2^4IKCO9oO zyF4@O>}!J6+8itC>N>j^7?hc@-jGdgh>s#o*hhp`JTw&%+5)0MuZF&inxyk0~n(cXquti-i;&NtKepzkoE z#>Q)afTYg8UI}(|< zDXi;f1ztz7#O=ga@6H|?gbV2;GxIx+8-vpONm7Z4Xmz-wTt zRlN<0C2G^)OmBMLmH~4$+<2?&Tryqq?4JSY=DtFB{hm3Q%?b|1v8S(SHZA@;$Zj1xsnGhOC%l_gno(Eqql8qQk{a|0!ml0AR%L+E1wqrrbuVgS-s!9*AS_z z>4KX@Q8<5Y&HmgFwk${Jvn3{|)dS*COwx`f#uoRqe5vP8!H0U1Yev1!2g+_^QN-Om zG#VR!VEh|Q-m5%X6Zt2u=lmAa5A!XG&$WxaQTNC~L#4sz%(Pq6HOvU>z4J|5%}B>@ zN?{B4@#F5H_>NQc9F_ZWv^9+G#38#bI9Pe#-0zNmzRrKJV(4g^1cHk*;NV&WK(EGg zzaO^l_R`$chQ=L9nvZ!Bk)~h*rJ7Tnyq-Zc`iEYzt=R@)h_3q^pm1K*BTYt?|OouCKRbjr6**LPei zrdQ9Vt>3F^3ZOVlefm@!vyn7>_u+p0tU1Ym%iFvfj}UW0y|aot$3SO{`LOEj*1pH+ z0;N)YZIJT>mKHP0V;~9@`MrpC>13_>>%T)WNB?&xMDAa*b%g!g5^>S9;(Go^yId^9 z9hcnqN@*S)AURo5OWUUW14}uoZJ6%t_TSaZVJv5ktFdRQyey6ojC6TzhU-!9_LPhT z2{{qt9HZ^l5<#I4sAil=LB^w;n`CsdA{l>@AYo+>iNxP@HgcyiV?D~lijGUv{`P?pHUGIE z^E=BIYIER!zR5iCaSM6HkoiayyQ({(z@}E+auv~tfh z@N3R!fKw&6D+^X@!Cnd)d_CA^$i)Za?|Wi7Z^L;k@Ct;>q+uZC6m-He4e>x}i?*Yw zYbI5c4;bl9nscN0<$)&BKvI&-;i-TpbRt|uWwGpCC7?2XHs)3b6+V{h?4lxbH33JUi8iC46jK!#Bt?>W{(IGvyfKO~==&3GZFyZlw? z>!{^qD?_p{+J-~B>1MJ_Ugxuwm&^I+?8RZb_pOQ7Qte2E?Y#$%w+HGIvmUn-);{o|m8l4%4Uwbz={E3t3Pt|d@BTE7iSOoMOlu7#u}rA16} zO9PR}2F&ReG~;TcL>7NZ;f7Lhx^A`F7!-rt^5Vhd+JSUu>K#SZb2QFrKLZMsMAb3c zy0FM_W{5f8mhC0??{o8=J8Gct&fry7KzD)EPn0Iiwh{bIFKOOK(EtW@@JR$#DIGrvr26 z!;%fXS6PL{o{kM8`BNKO9Om+UZJ_FUd-b*<(5bRqTX&>GXi8SY zOcC*~i+#FfQ+Fwl?vuJof<|Hc&=FDzj-%}K{ zF8V}cn|@(X6qO)Mc_kHhrLU9Cmc8yy+4gm9JSEFv>wgKV&0|LcCl~xd1p}wS*C_N3w$d?C1qloqmL)&TL-i-{BIeBb@#uU|v^BUXuS93=?XEq(Ss<3y|5j|@ zd6Mp!zvyT+%AHkscB|Aw6UL1?fg5S!LiAhi_+JT^VG;Fc)xjkAeH7q5g}atj!ah+A z*SkogS;nb?S9)(jEw!4ZvU4sLWI-i)%cCQQBE%Xgg;UVW_%Txymbv~@^TKlN0(Vqlk{=F-DX0ToRluV6M(-+d+B-!%jfSC^No003x>hB5zuDW#soJl%F! z+J4e1Caf^WT?%T7cl~v+W=&fI9arCU?rr=2-VIVwp#0aLqkYl)`S_HhyH!)tw z4Q%IAG+3mN;gWqU35ieOyA=F$Dpy=>*MWjpnPYz%(T^F*iCUPSFHYZP8G3S`TlCD- zn8eJ4t}`-4qNu?VZy9OTP)NPiB}9Gz&k)+?@OvRlu8V z#A&?M1F*(BeW;a_Et?PeMUvjt_Efr_q2Pk^9t8KCiZov1SoTc+-%~ywEb@)Ll1{<4 zmYR+QXHla+Lu$r;1az8ujk_Im-#=5T8RJ6Tr1O^wi?#I3-b{#x^@m*_?3nRDlIZa~ zzFl@LFSZWf;X!iPSfN;L;T?%Oe-2H#NSTL0;TK=Zp43%=zUPIwc*r^E)9R1N+Y*Tc{AFL|a1u)mhy*gd~KpwI$j$L}fNN`YMP!?i~K_;F#|{Cc45LR1d4d zZfv&9+L}e2&#{|4-xfbuZ9+olb(W8!y}7QeY`>JUe)oB&Z;{%q4RjTLfynWhC%{P@ zKrRx#hIN6`ta6m17b#!fq8Y-!farCB`g*9;t9VSzbwA`%d!Ls~vL(Hzsnf7W+kB!7 zy=HG1feXHXeTJ4{Ro3v|N{w9rp*q+@+3A|C8x@yYQ@zkk2NIar}AZ8r+kFTe4vh)2KcXlqeDQ`WITn&YV0l%}mY_`?C9 zTQM^=uekm2Rw>@y`Cck4hSk*oXwvVAJegN!DM;~&X$4I<=NOJfhr-+V$sdFCf!+uG z!Qi)<-IGzA_-lUT>XO!N)y^hFwo1OJjjx}8FJ%691?04s2LD`jz#~XReA^`C;GVUVgM7Fualc(fHhBSY`OYF_wRE`*msMhl-oME)Cg1W8b%vN?cIBDOFuq z6)E#04mqe57szYp8+wsvD`v2DSw=l-_N!Ij;3ivyq0AtFlWZpRVeH_N`(MU|`{=IwLf6*c2zzUXXpubtt5TWwu#dkG zNS3s$nKqy4Y3_=`r818wDssVYddmTH9?=6@hHimOSgrK#sYXbo5MRZd>PHRrPp36E z)Ocpqpd<~D02cyZyY;n{p_Ta;e*zb0C}$~1PFk@{x*x}iWg8C2=RW?v966tb9-7gQ zqD@O^nogm*+oEAYw5FpacH9~3n{rX;y^4QwEpP4}`I_XA2OrwhYPtBL5Yv& z9;jH!%z6LI(F#*kK^Uv=H%Uk~9?rt>G2{8#U*u~Pux|SMQ@t)f`n^a(&Rw^3g*Hl` zf!o~a!B`GiH=wWNQwiz20YmbO&dW9l-5Tk=Jm|?a^%DTHS_h2Sr6++gV9LGBCv|mA zGixKsi|B3Z`{{f-5zw-ts%HHs?#NF!2~5wu|CeZqsXoHk$*(=9c$L)o169_K6x#G$ zwTkX8H)@Gu6n|WJWxdNi96l7$a8EIaEp4(rTu$6-86vY5%}9+Q>hL2x=}Rd~7qoUK zsTi5pPxE;u(uf-B=y#hBl4F*x4(2{D=LD+4hj|A!WS-(sUI1Py#-M!X6^MJg7kUg|tIz-58-X2QEdDe5pHw zT}b5z5ooJ*TwSCzv;On1(vRdG5v$FVKP%HV& zf&3ngzK@6OVU==%^EB<}R}7_hk{8)_@0UFmlbAabaWlmYQ58O=D6J{KbHZj| z{AMB=kK&QMg!R18;CK~xu}6I5j;6wO^CaA&(5TNwNr_g*+vDZK@<^}Ret{}1n=t1o z;6`m|w+qo78|T4~#g!{1(oswGQg3yEiW0pODaCC;pHdb~f0E~W$Jk_QX~O==QY z!=Icuoo0C(deY{+n=tGQ_%C!oaPk3{fnL#i+wZhAcU&bRa=q-HJ5}Ai+1+o2OZ>xQ zQAb{~`nd!!fu1}dtnsw5{3arzhHBM!BFSj;84x{v^y~#yvJHO7tPpFRd%tYxh*m3e zDsng;&Crb$T42=AC7iOs3wB3j8=wE)oS*@}kq|G;nJ-!5&SLE4_@UNx^mNYu?TtLeg2Z4J5Qb9?h1uP>4jQ>6TLD53_6 zgW9}57hxC{Fqbj261ZJ>bg&$a$?ZUex2R?J1g&bAlmd}@4+mEkl0o+q0UYQXyvf_ zX;myc_T*LOZ@ia0|CxnGdCoj9Z2&=ZvVg>DX)8myZ^%^b3gLcf2`8wVTg(8*in2WQ zeqWHR2fTI<`0IuR?964l!J;R8%=^)%%EJo4z(&2L9?)~G@qTASmwYV~!F{?X`tOwj z%Hm#u_S*sG`A4Rs6anXs`T3;ubn4s7!-2xm7Fi=Bqaoh1&U(jfx7*!ZK-Z6!ni`~` zq4D+W*XMlp)bRUlQJD{gdR9vSmF}oH7a4a z*S)izVl%4R=A@V9aL$3NZ*Xi|Fkw~0?V|W`e!yj7e!#f1fF_I{higykI_Vr%Q`07t6X=~{P@ zUSll4+B*`}#4nKv7anO-ZS(6IWDs z-7Ae=x`wNHSFm*$844zlJqV)k$Kywmd1v<(g3!GcoJP z<|Zt4bP|wdTX}#o)h(0FMO5AC<0yi7(jMkdc}}7&YX3zFnv-<<& zFW;Lzug;(l15QBt2gJe2`AExYHk@33w=^PrMGokb(K9g>se4*kI)Lw=iu=T%7m;Ek z5Ln~Ful?N-Pb{A!z9>-!T>J*lz(!gas@!$ayC^W|xBhRUa;y{_EYmV$jO2NXtLDh_ zs2Ir8r71a(f+&v6-K6Ho-JwtHe)gfr7{Gh|efP^*v;E1`1=VM?bjn`WW%!syM%SpC z>To|Sgs1Ad3z$^Pi1l%kb#|Qbrvz11_qXGCT%-{agocSF{j>hr?kBUt_BYZISF$Wh zj3ApD5ZQ&jlCV|O(UqTqLfL<7kLD?ZI4aXp=s>gpDAW~|j9FI8CGlYU9voQn*exBL zILdyI4_c|UU7Xt@1%tr=*?Kf|%KdzZceWKQN+IC1vYVUc;-&~dEzqk;{gC|n2x5MgvJE-tPLb93J&lLNN0PcX_LRueJ5>Quo!^HIIY@-j0N@QUb#wEeO>>X5IC;MWgJJ9#b z`awg@AE9!jy8TzXa~mWPWsV5eZcAs)E}1JWitR&Fhj~&%7baMCPv5T2FjD7VW z_&(CZP&@C!WXk~=Ld%f!mod!+h~*Q*FD&G+cM*Lj|N6N!0rv5)Kq={Ub#)nQECK=z zbJc!+er}$gDuD9>^l~VCrUw%^XUk1cKg6+YDuQnb0788ez-@3@O@{;PTV_355$dwh z`T#8rC+7$5RzEUM`P}Z(BO)RmP!buJ0{^CV=OZgCTYYR$3`~1yF;1TU@sf(@qMdXQ zDBuP%JzH&Fr@2H%)NbcNLtiM^fALtm1@fhQcH@7u04Dh1?x;>B*sZK&9YRA4ps;bd zzP>(#j@(R|&9|FLrhz}dEL?n)2+c)(uewC9Y1amW{U$^g^Lp7vk61BMIQ3JTc!#G& z-^buTm-$5)0F?+fHq~k>ZmzE<>mGg}8~XUPbdHSgI(Y%n=#e4?42Y*N={Lm#OTcJl z+ptTQ>U}wD83j&ANDw;hBJj1Su-DX#AF8e%YjStYT{^b>SZBtX{rM2$M{p?mW3UNP zX&7#`*8jl>DkOd1f7a~xqdgD&wWmV6gk4J&M3j@{v$~3_(&9&3tMlr9FgspNhL@3* zQ(llK&CR+5Hnc*qcJlv>it}-YWCin*l5`^W;e2MLd4DP>CMf~!Khxdu>{oPj+TNlp zEZ={K+y-a8+TXTAxZh+2(=anfJizWa0Y|QVnE9q(`!^2U-fEtx)W`dVkiM*KYjHU%kSRTS|0ye%TRCTVT8NXvjT#Ssw2MGyNC_X%@@B#!gQBhIiK#X)h?ItGSwtV*P-85~P=k#gg141f`{=N7sTJb#$O4}hGk3?M3U5}-J4g^OqxpP#pJSM%m(WsL%I zA-STpA~D(7ziN&A@iCLx5F(IdNY63e%7A!21}2=$@<~6)M_B2tlmw2#T6$*VCl)7 z5s^#(;2Pn)L%C%JlEt(q$YS-6QDKS#mn^Xvy{W2PYyfk!*&(3(dEK8LXcdFrO^S_N z%k8cCtHG8u>#)>~V%C%H+PNh1wOp~>bD}J9C9=A8XZv4%{QteeM=~ImBI4rWe)adS zci5DT=wz*^r~t~R5F z0f^#P0Xn$4h6aGVyAz`_R^9id)OXeU+tJ_RgjTm#$BO&Y#T0Yr(2hJv6f}FTVAr+x zaOeJ>yx|8>q5Is5^0lf&xIgKH8N73{UO(tV02=gejUFmW>Pk;PFv?{H8iZMvDAR7! zGnT&8#`;F|GPy?Te}=^XczhW-Ijs3wyKR6CF*+`8ea#pqdRT4}6SUxZv@rYIj9l2G zY7GKpEhR2C*4EbD_CEniM738SxVb0`BclmGc*QN^I8~rlq+N~S3viop%_18PfIA(pAUp~sM|%xPB{1bEoG-JGPyAKhy_oUher ztg*}}-*X1|)qBmk)bGmO&$mLMKp^yz@VdR&#W}q4SX(TmViwhrOge_(@tI=0tG>6R zQSnX6OMW#`5B|o%EwV)sT+otuW%&Y)^l>Sf{sk*$4Bjs;X);_N{p+Py^Ag*@3&^!-AI1 z&B+Ke$T5WN{e4P?>a@HkV}%}bcFzAo!2y6$y-_0Y9coJA=aSJulOl}UbvGI^LV=kP zxVBXK^AB9g;Qwp8a`8VVm`^Z?R(5w~+}-(km%TVyTkkmfX2+Dp09uV#n2FA zY13DAMmVnlbJml(5O&hlyIvaiR#H1ba><9xFP9sDI6J87FNT{eJFtCp<#O6r;`lrw zXC}wQ!mH-aYu#2(cS0d|`+V`?l|uF=_oe#ZD(!$SsFVL=seZF&y#p=4noq+ET(eK} zo;H>b!lBS?bYTYczN$TXmLD#8fwWx?d$64-CVT-ouM}>mQeDK zqMy*D2?(xO$aV93zRtBZQ~9ey_&gmD!;sUF#NQ6mgLuz>A*b7m06hwg(>OPuA*0a# z3!>xJ(*bnJtKe5Nz0FTu=u1ou9z1@TwaRBL81}yg7;*h7bHB|#1|+~ z&PGK5K@<=it9X>vDbX7MB-jD{iA=plmvAc4R4GdLtc-|S9xhUByOtX|;Khu9;tI&$ z{m-{YR5Ue%0Y!>O?zx#VqsMIlc7*TLl`grx3Gu|k8bQRKJqaqMY(c!$sVAJ7XzRJX zD}OTJXgN59O2>ja($+H@6{T4PBF-`5B0ao?3sh69xib^`cFa124P5Wn23Oz@ zcZbCh+}|D9V%KqgDZYDn%b^H%EXzoIW(e73JP_~>+! z`2xMoW3kUJG89P3qdd z65exQny_B=gKgu_zq&nb1(>tdh97iVt`M)TtlmLhlZd^vtl6D0P1Q{BEe2ES^)sw3 zi&Cyf7%14gmYV0_T1)f>&pCVkbt99g{NxJ3C{Dr(2rMiU=fgcwSYVsN$a^F=yYn{+ z`nJYquVOaZL+^eAxaabP#*S*^D!WFK(uh)6ys-b(6MXJDX952pk-OrI!&-rEQbP5S;5@8it?KGxw|ctCU&Idw?!2q8P>_6 z_)j0X2moype;DWUEIJ!Rcv!j=~YmxJ{d1>A-mOcch&rLi%gFVBowFSDiD>-@MfAQ`RA*IK~l((NYt!TOZ(C- z72l;lDds++D|5iUHK}X7S$rf&jsxz;SgjO1=*teAnsII`$%FHNT2W1w1q zZ18aUJkz-2)FBFtk3KaPtxI|B#`+|Ck2=BAB-7oc;lbVABt|St%QyN>qd~;`0 zhy7H)(G4Uj^G&^1%XX857d+596~^s=*`O%MO6>c5%P zFrDZTQ3gUn8iHG~Dkydrn<-3iM2Ad4(~I*W$fwxf&BqB#?*;7U#-713?y%5}mAN$F z37NuKt)NMfzUNS**YlQy!X7Sx=Wul7YTIb$saoLvj!YZ3OdrtoEHMLYAM67T*gNx} z-9%BRe^n_!lOPSo?dg`}EOF&37Hc~68LkgUq|_--^N(YiXX%73xLb*_qEunmxWn?| z?Y~vwe$@U(Y$Mv~?VULBVrF2=Ib*jjTJ2ACw!*>r$@-A)leYOpZkPj7eh zCkg8yBP0bzrYq~W4@vNlNfJV z;|k$PL&fyt_0VFQ`}3fe>QUg22&qk;U_15UdOwz5ITne$y#bCUQq^MCKwu+RY{o(T z`s{P!H=k(2yi*N+KfWiHXj0@7zUkKEY+BDo;eAlf|{B!$i*hu(BvhmWa zQz)6ie24dO@2Tm%#My@;xbG_N@}c6|FrWw`Ul4vk-qX+K?lNdeJTc2S<4^D;^CVJ zYYGPKO4-q6=~>A)29zr!j08-P0YDI74gbE{Y2l4_R%6#D*2rc6#Hj49X{!3c8Uk)O zKM|y~=&N#$PgR_w21bnAl{r|%BON!@R)9=HI~TYBwfN%a69Uijec%9yWC+uU9H~Q& zgq#B{s19kbJVaT-c1sD9g&DUqPk;Qd9@z_cRq{;r!Eg)kvPO&R;`Y^J zYi|x5U%v@rz0{``E)76Mt<3=^?Gvh}Y^$B2$?0I`a0;a5s{R(^fo& zUO8>T7sr2@wa63&r^pVNSuD7J#%{SCIeOgEwr=vvlo8|QK&DXHfn6fPHXw~7J!I+x zzPPD+^;Ii*z3$U_w+oK2oJ*rma&DOieUuW}9-qBQM-VEf&(z!XnUXdaJ|#DXeQ)%T z_@Ytz+9j>NqciPLMFbT;$|1haoOgioKq~$i3U>{8gc(V{1M>pr)W9{02;nC)=3EYZ z{BaRd6W8qQ-5F=AxHKZRLe3xPk0K&U;=RmqaWyDd!;d=rKddtvlibiY-3*|oQY~f= z-`R1+UAABSC}d9|le@;cPn1+105Yfd24c1rEDuN1jOC#NVSP;M^im(5tUy;=MX7Sj zR1HqGGyS%uT=RKI7HG%QSWNESFE59!1zZwJ*I3Ip1H7aBychJ`c8^NJ54Emj*G|0? z&u>yOU(u=2`7GUTBAa+h&RI`cV*&meGa)eN)ABJDJbi^z@V$*eyN z{k*WXHP_CZU@(*)DgJDXa{QbDeByHNKuI7vGh~K)Sz0sxcPgdh>0|Za_biTBr;GHl zSuLjsBp}v89tprAO81p7K&W(`Nk8LQ4!!K0pK5?v00--(Kw$Z)rfJwtZsRb!NDV*x zFW@ewS!Vx-z?i7^rtG_6-rw%aK(xB;v{&k7Zg~7Xxd@2FY1SM#jku?a&DKa)f~hns8R8={%(&hlxJgp|Q8#B> zNm)Un;-^ z$+jP5pang78$>4psdZ@f>(w|g8~6e}=*6Sr6#87j}gjhx@66|O%Y-i+zZ_%Qeb8*6xr0zEFoJ)IIqDk#Qp*o%&9 z>(Bq0n`$Zr4@!qbKQ1$n0>?Srhu1;2;q41ZyDP(>}exG7K zsdWK&7R`xghW9ul&!;X8fI9*}NsxTqh=)C7&UN~Q z!&90JN56ly(3@B*>ZhFT7k+K*LCU#rH<9c^ErGjxL>-6-84ogxY%p>%ghcXvogD$=EdbaxLeEg;?9-Te;t{_lIw z?^FD69DZ|R@3q%nXC24LaMd^NakyQ~h&fNeR0SLR^!~>fCK-GUwGcA-NUl#N!=I~PLx+@y_N_QeUNg~-ivA4Br<^KW6Su4#T-6x%! z3-_BXE*(kgm~oLrKH)Ti@j_i2LD8h?(oY3rS@6xWaem>Kb_Ftsd+T$!bY3_;6&m-Y zg9X=uPqoSl8puBg=ok2Pp>!BN)`oKni=2Qmtxu<-;=$}cvC>pku}=2PVr+O z$f9L-yNnXMMpiN+Weq!>xmg9}R4T5c<4DDozbNL(=3qQ{#3!(vPr1J?9rtLeh9;9# z#}K7pc9Es_;-K^w5yrcP8M}a5zO)KZlpj0kYWb$-+V1~D`On$Fk|&jJrhj4vbSnZ% zes@33-qpORprj$0r;dl&w>j_b z+V{H0DB^2a-i5HN)boVx>z+#~^_EMCNJ}1)EusUhZ+qd^#M0qZr53HKx1J4LuIgH* z7#4%r$gvt#`xrfntRX`93B*@cmx!W^j{Ye|?Md8KnB}ajxs6~pHq+$jH5_3sE5nl= zUjVu@=w4DK7@PMN(Y`i*r+47vn(NRqADoro?lVaJz*A!xNC|(u5UO|d*ZB==k+NI35Nim*PX z?}%QTO7}a7GRsq%wjKbQWfX9_bVPh$7;@&6uO*Z+^4WN3^vdG#n|54nN)+|}8)9O- zAvy9lr0W>Jm($vb@$rV(rI?T^e&1H=YlsxH;FWx0-rpK$bH>M~EASZ`3i3aXtM4h{@tz)1j;;X3r8l%^?wd`yw4a=geGbaN*4xX^XGBRY8Bt=3IDH|p`-}V zZ_RwDF>7}Zp$PK0zx`hxHR-YIez%qX&JIWIKE#c)Fi4;vbz`Vq#ypC~eB6px0Mi5a z)fIQbu~2LTc6E3q53IP9^LAWx!E>IboHE3YXJp?Tw|l!y$sFxTxAJ;NT9-55P|)|| zeMHn0%NQdx@*o)=E{&87rA)wT|GuAKSKj8d(no@rK1UFENO0B@3dyBAXnRGwsir?Y z)GyszYIQLccU#idGtz#gC-j6>-vz<>1v%lEv(~=6G=TRzo?ahBdYrQ!P*!sDY|t+& zZT@7u*~BeCWQKeCshAVTc#-?ftcgsH?(-bDX|;OGW(vLmT?)AV3iZoL^5Q*0@$ts} zaeJsf5fYwfqk*I!-ovE=Zq2(K2a;>}0#VuMml#kXITR6mlOQ zAK)O-(1mm|D@}NB63*Yb-0u+hmz@Ijrzi512euE-KaN>GHl;`MUFb=DiDKEGz85KX zOT@lQD?Zc3?dQMgdb93vO~t_Gkz1lvN=r+;B9n%dkS{jn&L58l zClA}KftQuf(aXF?#D!0dST;95sTOutkS83k&v6Kk_K*7s{VT;gu6J|4R4JB05{+(~ zjrEX#Z7z$oE-IJyTg0iiFcTnWMEhku!-#w8{K&rUiJ+nl`m6x;`9tAXei`mu65=tl zRBaFCKUuL$y}kVG#leH{0xmUz<_a02dx0w~_Em`SqVFDeukriHAJzyz(~FEw=$7aJ zho^Z~!|%(60N=uH|NP{(B5J=rGqL6o=}GyJ}ED^YNpkWv8kXPnT&PYlP4V%6|cte;rbmgWQb zj^~EessemBBt&@!u)30yexW#2+g$p%iOd(3`?xS z7`p@A=WwwnpO zB7;WL^(>~9kgLLOgKI8=QP&@e89oo#Vi#H~3u#2;;|{u6bRzn3-xLIXKhOJ0K*R7`H%Dyi>Z#p{eE&@9>^08+ zv;eIC>h;vWXe_=nl5$F(aDEty=Rfm8_#NVU_uNU1l>5$80w;5kEU=ayxg#rSyN`fI zG@W!kBu^#lXy%1~SzMKmhg}-uuLC#0ln!YY2Xvk2luQ2^(IJ^vMWYQLH2co^5Y8Q1 zua@n6U$_caW}FzUOIaxrisyW{XFGIk7SpQXzJ%{wO8Fp!=W)!}dPTS3D zn5GDwXT8?y-_vJAD{6~}N6n1Q9IzB21X{Xk6iPB1Ex&XSS+*@0AkxyvBbU#Y>YQwl zmZs9#>}n0-b%$FLSu-R$fstM^#pFHWI2HG)9f=Dr7DCK6a0i zCmfyUH`XO8qL?%1derdMON6QUHh-NbvBf1uy0Us_%kuT}113iYN8-#RJiRWdKW&HC zza*KWcaV=medPyN_{#nDkI2b*eu%dpM!dWA(t9UZlwUf62<9w|=|NUUN+o8*?ME=i#{@DCi8 z0+U{*5gTs^jmJ1`+m3s2+)Ce$v(k3&9Upq7&N+&0wn!8i7mc*Nf7|3#7p>}7-J}_n z>r)rBHyW={YAa)CBkXy=KV0F3 zUJq7eSod$L;xgQ^R?!fnT8F99hf2`RSxJhX zh%F~)x=DDu`37h8Iy>srw6qklM4T6y!;CPnyfx)2Zat=Jt{*TAjDK=d?5_trHmZo> zto4MOC(bXTH;Y8(<<|mt@Mo4`=zTXB@tJ)jcjOr%t*a{2ui+>Hbq^*`Y&q)31~--~ zL}m*#OJL!xo%RpWP!)w7%*r>M)wa7gHX2E&#}{k*1ACicu2>75Ii6@`Ebzhk3iDSN z+GJ7M%ssc*GQ-gQ2j3E0wIiZcp+{v_HL60%;D{9_ysaoP^A%1{%@sbHW=YmhXwK7{_7fECrr${O*r>&jq(~bf^x!l5wN_D@n zME3(@UT4(lWf{w8EinK26L)p?8P114p};v!9o7*N9is2z%T8YViVT_&(jzg?nG;RQ zmi>IWbXLHwhN~kLs#|6ovc%TGaW$N+zxJcg?#$Hth(1>~_-P)Shp|SI>E;xx(P&!) z;v(c8UvBR?Zkd}!4V#|5mc;H*xV2^liGY60vqSbzk4vQAVq=KSuYz|Yzu;9Uy0<&M znvPEKhdvUCk1L!FqKcF|)hT4*W5}x-TIv3!zA=!4^r#5Ew7NIS~1LFz2VnVQZgv=aI4X@I;T;(aWge-C#H-}j); zqnpo{3W?`V^uR(%x0#3}vu;{Q8b!~=JEbmEX%02hSQ10FU4d10bK^+5??UKRZrI=Y z{z-A$`N6EA-Otcf9iiylTih5)Oiwj6Hb^4jb1Q1wYk{xc5SYTtiWxz4TDy~Zxv91?OnUxWJ}ea5myj8uUKjC)~0I9&O)%rjuU3?=QS zVm-?i9$~-E%EeyRo|Cll zsd6_e$$*J_tN8(zu0HR{XQf7cTpbVvtmE!JDR={F-ui?V3yhAXdB(Pw(5tJP=d~HT zW<9tcRbHrpn>^pK^c$WgJ@;~9d0jZ19a)c)l8*Am82vZtt}$A_J#!+H2`sQxu%b-wqUL~#-{+yZ7 zBolN(0bTKc%1UkmHto>2;RMtS41fM=y~zG_lL|+c%L&P@4UsEcWFdyf)^cp(n4z>J zH73ycz}{AxFm_z(A8b1?M3C#B;QXc1MN$C8hSxNPS!2!8IEaU?P&v40clv6zfl1n! zZjJii7{PtRtU+GJiyP0`Hrc#ZYF7Ml2bOtzR6@l%@6t{<3V@rQ$_!!*KJ{6w-wk}Z6^w?Qn}9!$6G9?I2vukO&QJ>|FuduH`xnkX?n()KT!6Y10zsn zHBg_L!0y2X4))vkU}Y^$_{^b_vmq9Hlmt`zhVdn z?88IST;T{QpN&3{99;pWX-q~&^xT|wkyb4h_)Z59-aKi?fJ_lNK0ZEpfr*+-@ymk; z#KJT&QS;jN&4-X%eP1giMYNqoh>r^kH2T7U$IZ5!yVbTU6e%HGdURK@~5Tx53zQ&&M(Ih;GagbnzZO~kEYTc*szmC2Je$gUGw}PN}fR~sn zo+ZO6DvDoqaUm`-`>|qb$_Tzt<@~Hrnf59+pe6V$S7;S>cVe-Yp!R|(-)AM?uZOBB zg`5VS>>2S=5RNNpRsLV6U1mMY3-n&k9qP63Lv(4r z>2C}qsaJh|M)TqYW|LCphj;IgnwpfoV@Aiumba&mV{wFLDq7>0B`y%p*W>Qh3=K-Z z6t_^un>@Q%w*T=eG4U1@D6$7e4N5A*FE@;qu{l$#>jN8>SIwI-t6u0It~*_kdyp+R zHSSd3?=c{-F8b>pG}!8SAqUQ5tEvd)||jD zEXyQ8uj&5W+*`qmr+>TRk8)@_`Gf^|t&uItOeP>&=}qv`zPbcgNrbrpovqd!a(vxbcKveS3{5dcP+1GpduG ziXXR`q>izY2R(LAN4Kfujy~DYuNDHhj{WxZQs>`dl!Ao>)>BIxS@&2ai6%Ne-QT-% z9_eQ|x}tN~_P-sPsaO_uvs5heclmdG(q~N6{P2p21i|}Fv(@o=lU>u<2eRCt>L`FO z^oB5{LPMUNbMqMTamjF+Ild}*70lqxUrk7WaXcZ|Qt&RS`q346c6f2@x{-3t;!8F6 zo5;cpE1_%J4N+Cfx{;ktaSC`hA$6)hgJ}M26FU^Y=l9NMJI?Or;e;FKD`-bQMNr799z4SZqsk&~@*?z&u*OzA~}koNWWcMPQom3!a1%(WB? z;%QzQwhs?uhZC^2$0~@Ty1I^G#yIin9HV}2`-WJTthZRYkw$%}uL>(tT~x4mP5Z7` zs7piWwws~Y-2Jy{mqIzeLskp5a0jHdG*mnfT0uHI&kAex<^Resa+-Q>`J#r#>E8gs zVzgo}jXS#zG(!LCehZVs2%?`X;L+W3R5CnHT{E%s>BR;43Y6}SbN&{Ruohm z1tGs~y8^$no4yh#M2|lQbM^;9FL@)h-K^Xs;P3@E_Q93F)nWI;I2ns1f4zfog6Er? z`E6q_hgbfUrgUrhw+Xzj%4?ZL(?)Xp?;iP@u}EoqC`(XX~NJRWSI z3s?Jsqv2KbNa)uXx4k;SC1URQuLFEjm&0=6i9S}1d7&;s% z4GWAJ7QRw*);L=d{Ppuek23@Lm!E&=a7fHB6=NwJX%xLhoT*kA;?$L-UIdnd`_?_2 zxFV<;yuH)$vNt(l7jNOU{wbA-C@c4uUpsNNM9X}s`WANlxeR(bmw` zDtLkuz#f1h#60+wOz^gwEbt2;dfs{)HxC2ET`GSVWApWHF^`>L%&Tx5e{&tHoVTQP zj_>(KCK%!TFuOWCmqI9gVCf>BA2kas<_0-1+=MB*BS}Mm(sZf3ha&K!H2VN%KC>uv zQ1}&|>(|}eb1&gNBiZAW(Vg4teHkR(`}5?Rir6?J&cYrdmHZD{#&Ob|ruXJU=S}5@xW%=9la7MmKz$oQ)Sa3j_~&x}^l_ukQ|@iyzsU^mfJZgnnS}Uia+cPtXqC zC8jOzn=1oBC$V)k(7}MdZZ`QvRvp@DjkaX+JEWk>I3h7tas=Om3rmI;K6`f2&p?M2ip_1u|&i$7NS@W zS+B~#Q~ETg`eXLsRU^@l+m{op0tSfl#hlXpO|Xf^UW9xHvhUmkTB7Dx#4AI2y$H++ z1=x?6C^|KUZ#qm$$Y-OZ2-jTUIe*yVYHrqSB(ae}V){9D=DLsa-xaV8=jUw9MSQV8 z8=_LEN=+{nr=olrFxS=w4JTyp24|8m)Fx{b+Fx2v;%E5LQ6@%IRlGW1AC~foDxdGE zx$#+exTkZ`$!pD!3eW-Q$hUcKVBzQ8mD{~KSL^M`%J2L-Um1fTMZtc zR=h5@C|DrK9aC7n_TAy8O`{#+=iCjtIhkK+Omz87vdJrzo8C2k8&?;j7zzSRUE#Ot zDFQDFcwe&*XUA2qZ56c6F3MiV6vlnJd??6^e?eD`V}`4;`1m{ELjy=TFSuaOI_}eO zMmj!~`VWp4XhvweepjlQ1m%!#6Gzb-_>=B6V+jfZ>(=a3K?Na4FA;V!Z(+Gw%EI!i zJ2x6ojsVW*eGmuU$v2&X`#T1LBf!u=)ybB}N&>&ge(H$DM#Y2+ybLc$ZnYO8njJ&T z7)Je~B4pBIBls=oya!Lf@`?L7(w1z*UN+Dm$UVqI^7?GLGWjj^%FQ&0G9K&FUozWM zY^3*#+rcI?!{{g6yPZ0Suvh`|IpRyYU++pfFF!F(5vKUxzEu$nd9~dENU$d13CjBh zC8{!k|4g;1!y9o;8dg?JAVdepjD%uRQqY49U==JohG~e1h)?ocUhA2lr&DQe>v3ur zO-7^m((5-#a78?jY^_cAj(X!n?2E47j(x_HWQ~ES)HeH~T^}{GV1On4ra|+C8Lhre z41(g*q(d>OO4Gh3dXPBRL(bAq1412OmhA;ODLM1s=GoABzEL|Gn0+0P5sw)hVm(VQ zSNkJ2D5Y18fi*}XACi%a)?}jMKrcFzGTkvYfs|guyVDZqzRsmB%P8R_Lk-IJ9jmQH zoM%F~|==9y@IKw)yPxFD=gZJX~e$5-=a2uskX=5QMwMP~B6{?c3PY`M7`F;BNQ! z8{LDQ)K!|W+fS_`7Cy6H4BxzeydAM{H15c&VMe`l`nO{m_lcdzSGZT6?n4rh-3LG(qIIFt2ooSyjM9Vp-;PJoEpKV@shIL19rxtN zeRX~=+pcobW~1g09{)`4tY>R5!G+fw2zNk*|5SajsFy+bY!^C#lD*vb(MLK*vFYJv z*UrJA9bBo7d50e}B+m_*if4pXXdM37D@jm@y0t?MM99X80}hH&kQN7-dxo}N4b5~elz3!NS$|#d zEg}RRh!`CvklU9ck6){E4TQ+}oQE0^dA%y;bLcM+7}FvB4tA}h1kMKLj{NjTYfxBH z6{V23lQexY)gZXWAFqp5iuxBIBW?LSPle&r3$@I5ZJF;%B#R}c_2u$)0WtrH+3ZvC z$IOd1XXlto4(OqvSw+#I1=hbF)WjY07g zp8lyU%&4~+zWDphuEy)H(BEf)USuSgKPy1%b`laTxg0WF7D8s(SJY>rJbngDut z+H&l#LqjlAi)0Tmlk@Y6;*EwjKFif`IWjhnu-pT>_NfDavA;aE(PN}Rpp~h3+&kh{{P20g z6wWG{H0l`=iGzglwAb;p$&R=ls00+zm|{M$Sf4A~$bq1lPQD!icy-y|4b-%)aHadr z!F9K^krXeU9ajgJx=&9c-+@|WF#RJ~D@Pv&yuG}ch_BZpTM0<;w}KLSkcf;Ef0plj zJ!}o*jm3bNnwb^mKG4w>EM!l56N7{C>J3he;Ft-rrR6T&;bv*;L;Kb7D){~{EQ;?@ zBjf3QYcc}uE??{?u;fKh^t2XL=Z7!N6pu!}xt*_5sWFpz3MjIBzM>Pz;fm1pMN{FK zi9PiIor!tI<}==VOEUn{#`ORCqkpv;U_3W-6pS}HMZ2zM2`BUZrJk3HB zft$?_y~cA@i@wBWqZl<6J%a1?Eq2a!O9aSLB)9F7^v<($XY@cq!*#zn@|K>VY}T9= z%rFFDz-sn@WC~~!>c3qw_W`#VLE0<}L)O`kcXi_K``$I9?i*woyTQTrY(g z669I)tZJf@QX+{}MM*+?ZaubGg?N}4DTg1)LKGSKe)byq0gkFncr?L>Uhw1Gy`fc@ zOxH-tbTNhYh$bNS-0r`L%c74h4#&1I#<93aE_MK-lB#grhZT3pFw8agOhSRra_W6D z_EGf@&?3oTTj=4-{Z2|=2cK7Ar$55Ac7 zGl@@K4!7e3{(lU0>~uCJaRwKzBZ;^>QN9W&9(kg!^ec|X3)BZc#(E_p=^hO&VYbRC zlv&M3?ie>7(`-&1BYaBbKr0(0zZ6h%ZjdM3_n4RBtA{k` z$a(pdVy7j@3u%6j_Q<&+s5BPQi=5T2v4`DM5#+!_u>KIm9vlsidQ!pvkHd z$tF}WSZ}eci^;tAZRdcEeQR+uu@Y}I=1fMt2I&SF&|Mu0gkwkh+s4n8KVNd*(^F$V zzFae~QZQ>0=7msGJV$w{lhS*i!|$?}cyqek5n=@9Es@8PU|>LV@lb~58x-4FvF=RVdd@@*~ePoSJfjJyrqA zx!`rP`N00&@L?jntQKckUS8lNnSt606;Sz2ujh|e5-;|rCk9^!+z(?targXhCm8s6?XcrpoV?)}TohOKa zjeIjoC#tH%8`kxqpZw+B91Rr^sxotpJ@qvRJlS$qJH%{uG2%Uiq|1CBzcxfKH!Y>8;chf& z6BFJA3d3UsE|x`#I#K#uB4IY~sOG*&ftvB#OY$E0Nt^ zgQzKzgs){EStNvuMrJz*X#8Cuav5mM1?9UEBrflAc?8I{SeDuryA<)aJaN`e z9V>!rv)&fc^EA#T8-e)pRAFr?Z^ybS7mICRYz3=z^N6W5h^)o;9(6zL@;ggi#{8xa zaq1z(3lJ23F|OU&Qz5Mfh-A#h%(0{Sq~~dijbNSI-?^|21PT9Rnj&bWv-p}6BLx*V zcffm2dZ=<dt(^vHP9zISxO2uHzV$@};kA0Bg1cfObMXCSY;d2CB*cvouSoQ+OUvzh{#`Eq zPYb}c$xA()P@wyKLuLJSuS35E1ALj$v3rEa%;pE9`%g^8Z=D>V^7I8rUyb8GXQbwx z9{kYH+82;Sp=PBfI69sW@JX!LwkoW=x1yCmXix`jG&KMJHdOt>b6$JwPZ$s(4M*p0H=zjJJ@+5EyfalwjHBS+F!x%#EO1M z)|69O0Rv>TrRfEyTT+GOOW4UXpG2`bAr~F#4%gh+Pen#I6(v4jtjFWSdV+|)_P!lu zxRR3nW{UK)9@aFTvzdJVDA8=y_%Fo~bbWJaypGiMu)TbQpNZ+nj(OM-}CR$B60V!_ix+M~o8Ei7JfLw05Sc=~+@ z5jV>4T@Q*>jLeMKI%vkFNa&UD6Hdh0Qu0Qq*X-!hw5qZWc!wyky5#uG%|nglfENXa@dryy zT4PAshl1w&T0Yla#2P5kBclwPUvd?O!eKHZN27J-DSd&mC_(m&IZ| zNnY`n^h&V6fFh_&-_CU}v?XNFOtFeCa5r2czwf10>7%d2S?^k=xkLSv;SMq+C^)#i z`}ek(1u!0{^SKvdgD@wOI#JOR;P#iVb{;+4i?uiy2tsbZ7lA!_qNMwLNjS`+Bc3_?rc(?Kf< z2=H9Q>{EfD;l4a+%9j@YGF;H~kD*GWgf|U31hmpHj>^?cP;LZ&CNx$L9hOS;u9RvC zOp(Q9s+eRka=}m)urJ?AVuxUPlfPIxeX3D~Cnm{3pHlxc$+&o+y6MECXvSAZgyAI@ zEdQIQWy)h?T;O=?{NHJ{;a-^tphoE4q9SY-{Vl2y3Q$S~G6Cpc15Lz{w{kzcKj6Tg zWgt^$_)Eh=LWjj)F1`Mv<_#KWp%iz_YD{gLBR`o8??9+?J6d`@kj#UQg@q;k;e+Lc z+c)W;?Xldfx;i3cY>Ljc-dG|1gdjU}EU5WDf=NgsRu3&amI?sf#+Cat)!kyT zwwKcy?`U|PM*RA?!`ULbc6EDS^+aCQWB+;46gQW68DI7 z45}BlJ;nlM`;PxhRUf_8Cwbp)Aku9qkAB1TO^ZKIuQ) zUyE7YY_b9A;rF4vhOHkGlDW?_0cjYIdD-@_g~B7PkE!_6(p0h4UUTZNO56F4rBfn) zt*~V)Yb5hXvaotNg?eN^prVwY?^V0ZtHed8KtR~%>;!&JQMhEG{hjf7|L6jLNQegZ zNQjg<)nR)yXzcD=TA+U@hm=O4!{+?w%a*Um_aX=X_9@H$VeDzAG{o642|u{Noq8Ws z78j~-OC3-wi6Rs^OiSYv+cp3w#U|e?0-x~yfA052K(h5%uDcU9(>Ck;cfB8S?fYnSoLP@@YKB-MPU6N&gLK*#k`X4oT2pcUsxY`J`k*Kl(?2 z49kqq``sEtD7IGmUbGDscDj@ej^IA@9pQv@b- z-xzt+A=jkK0Wxf?S{EG(n{PsEDt|XKnDK=bisUL3LY-M+hl10+Cjp?^{o|gXh zC@s(c0GpTEE8Z-r=x*Sp5D45Hs#b?k%u`iS?U>>H)Oa3ZTq_e`ud`Zp)IjHGwf>U` z$BQ{U%P87c9yJwM9T2^N?eMY&3Nw6n@fm@G*UAsGhBby-C~k0n;?8X$Cxs`~9*_nt zMIVmwrbRF*;z|E%oM?OB``6Ss zLFRZiP8JA9#ARNaRQB8oX|0UtmK>FFhx7;jOs}ja!XjAVHBl2#|H}ERmXz+yMah?y zQ0c4P^6U83`KAr{fXL%Ao}Lv0(L`=o;W(a`kS3@G_q2s^fJW%J7i8px306Qv(Ewmx zZ^B9$#a)w&E}FDJDzAD*5rm^;GB<1^n4McFbLHjbrRha#Yim1Xjk^6Adlvp&4|I5Ds?E}~a)@GY>hdc`PWlV&IjeC- zdJ8bx*6ua}e^66QxCxG85jC$xc6LBzGzZaFNmoUpLVVg!F;M`paW=LRnx3C_0-cx_ zWn6%xYqjSYqsG#oNHVheZ-LMS7j@n_LL<+HB@a_Zz`C#g9cTH`o9u(#W#k8d=hLTM z4bVB8F)HIpE$Vz?+GB6?n6NC62#4jt$$kQe==AF4IB>7b9L8?kwvruL9=Pm~8^2#yM zlP>U>x@3wZ?rWmw7owfb(DiREbA%rF*RbZwIZ|Y$Yg;KP}B;RBsB62RvL z?0LK?oGglcR~1L!&5LYqP7q`Lpzty2=~2K{(D32znzdmNG{)LEFxophX5C$bw&+ys z;HitZp?MnJOt?olR!io_zdJz$2LK&rZ2x-b*RSosO{NTdA4Jxem>2>x4BSlHm#1N4rP zbIwOLF}5?mcQ^l$5=3jmR7p6B-(TEUj$0@|e=!KG%&mR%K>vk{rQ@x8;H2V2K$ieT zfh;U60Jory)#ySlXZ85Ea&njs4h}J~v4P3SB*2gipsCfqIA&n{$Ga5uUM-gx@&iRn z)i7B#FL`bJqqrb&^3V+U__sHP zKz4=r2)0WsXFUc?aA;`gYQJ_dCNVKo?C}oI-rk;pfdTR}`GS;!A`0k4+ZJ2Dbw36< zQ5EAZ5tss&&avN@oVDwyYFAsSzp9KSh629R2Hu7e;RhPk3a$P>3-Jt%rnoQSJH!Vh z;2|^|iTw@;4#=zid#f8*Dao?D5OsQBaSDY(gBOsfr$9D4k_ zfkieKShc2-vbO3BxE3reUMWzrp{T1MWT$VGoM)aahOD#L|Asu}>;F97FZpMXCf7p> zG&D4-IGOPdF%mpt;_26oG}-B<*y4^pr%;%dMotX#u5UUq(w5N<6jsEqQnMK8EpBZx z)-&T?NOz(;)5kjAe8mbuB6@s38|a7X3n$}#Z{kn9q_EzKU05#ar%J=D{*^_@jqm?H zfW%Is732u0ey)Ic%zo=H^^dqX9$)x8nSW~z?x)DOnP%3E(5R=eTCzg?P>%Z0r>X|w zyUd64omNW`ji-me(Zk&nLPcLdDX_PA5vE#|U!Yt?7LZ+yM9&Z)G!=tejC<6@WuXNj z>&7KQ5b0^(|BakF9g!Tw=keYZxOrl8B^6Cah)G2FZ{Wnl|A3@;J+|)c7T_-Kdip1y z{En{PM+y={4H`r)(ypKUB8#j}do;^6-s;Tem4o&ma8brKUI>Hy>1vZ0AB)vEk^*mg65!x?bI@DUdKjU8?QI>*c|E ze46!aBar@%fPErmK|z7OiOCnBif9J}9WW17)DyG##{hwWFn2U0bg19^op!3y47$C3k{^#5vw0p7oQJ&R2tVQ>z4Y~# zV1R5%`XSjA|L;Sn3%{}bS$#0yhzubTbjtIHG(QT~bNquG-O~}y-hu*Zbl@25^oIh~ zT-p(i1CofIG6ZQVcm~;9HHQ~1jl{DRLhFaCeO0krH0Srs7Q(&=**F0Gn_7`8HF_s3 zj{UZmOTj}vB}^hV1?*dCFZ?QM9=P-{5F&i*?s2NSJs^tzU+{?UHG!zAs=`5~@odGlz}4VQCS zZ|QgmvH5l5TlCjAp9|`%2bHH=n#L@Ug^fIpq@lXqQe8SkbNVG!KU_|})j(mTdM5pm zwsfWF{};fXo?&Kc3IZ{%xT)w1hPJM)85hs~h>wSZfS@r05X9$L#yUVG!g0$_6wrP7 z-gO?KIHZ}#c=p(MO?p|;Ou00Zdrwh%sI9-F_O}lqLK8XlPF)D^qbl0V#||Cz-zR$B zV*_x{j}Yu$xxV^q^M8vp2tr2$!N9;kfMjQ9=S>zB75#1YbcMk5eD8Cd!HS56hgez5 zQH6hX5=<6>q>w_sCu&6%kRX~BmzJABnBri?@N?BM+pHM_v#JKCem%UenNi$KG3|Dg z4rbD_{4#%oLC<-1&zYU!Pnw{8$^k--qqwyFa{K&U1#TCDe8V=T*eY! zZ8^EJ)_X(u_|){}wV6W+Ho$rIY~+_GEy7QP~8WA*osH^~y$hpdq&dbw0E+PYN=F)wNkldOxId#h3|m5Uh( zacVE5o-Tra+K1Z=U=xxFIJ{i&zSIGh&M3f7b?4k>zQN#dp}DNQ9NJ&Oz``PFVnVAP z|3#gp`*P7Y8h{T{yUpxEk0}fo2fGpbqsnLO$*sJ-*5DfvMA-)k&-*ng*nvAZ&5@3S zlKR&`YgdNuRz5hfv`-zM15S3Aa}Xv(^S>bApyWp!+}3#5Qh3^cibqGq$cPFIZ99s! zYN-ibHT9 zH|%gQINzpromRrG<~=?%4!)B|HN17R-4Ji3i)Fj*Vi0jaEGJ9es;I>2QmigCdp@~zKY6`Q z&CEzJJa1oEpm-C4)rCyy1-uuVOl@1u$LN6RF#N;ALyYuG4sPzi++2DJ-#h!gg=SJ9 z3}^S;->LF|@Nyw0nrL*IBS0cs9y2!1^e#DI3q2g3BPM^xU_m@{)zr)wmH)kI94yFA zaalHa4t{1l(+|K*az^ zk7!=8?h>8GGOud~>6|&3qP;z5W>(fSa`MPXQa;1WgLxu8o79Q@MW0)1ud@*;eNx^O zPbzM1Jh0gL-Hxbut)~3EZ!dlU-%#rX<`J;Fi;(p_zCC{%$CFxpWb-aHpRN?Tg6%jN zL3>;J>vcag0f*|E5=M`EL~gJ}4!FJlW?J&TH7*FCim~HO#&clZFWK0z95)7DYiJO& zu(BSHNKq~W=@GyE3PL=q77|2SR<<+yee6eF-G$#h{J*Irv zp`l@!?V_k`5*H%mqq1^PzEUm?E$!P~QBWVWRj`0xz!%f&Ntgnvmag^wgmTMCwx^kd z*a1E$f?S9IA0J=y^0lVCBZ{;UEzEvXb}T zs3=1-LdFOokvYl~5)Bj$B2s3B%#t}{NlHqYGejYoWz2Z~_om+O`>y}F-uL&rUYxzx zUVE))xbORU)(tVAGk5u})zP8l@>LD~()Bj^S5ddZgSel3GiVTOXllAdx~SVb-p|jk z|3&JfM{8Kti7h3_m2?^1>2=Ds2P)jSaRUv6xwEpe-hKFRU1hsfZF@sg6A3G0$jDPZ zJ1fFZTUl!sv?I%2xqf|9dFohvgEvpT!=n-SVmI+3y3)yLa!lwYNWf z;K0?XKXn&vZDlb_wv)pqR9@cfZfV%y<+30Wp>;cNdf|h4$(>wUYfBMylOnQ1?TedO z|E`}E2iKWi+*<5mp<|&d)A#{KoI-GNa{j7MHF#Qfca3W6^**#?PQDUaTr5k-VwRqk zAuWhcU(|cg=5|ky1!1;3(O)cC;ZxX5k|onnf8VYe{vLN}WF_t9)dQ}Uq9419T5i(S zo%j0s_skBg+hF61Ydv^Rgo}-G+WYwWE+IyLeOW|zpq-YCZr3{5=6A{7v1B~X#LWB~ z>E@5err}>CW6vCoPU+R%EG&GYpa>H3EIg&c@mRugoHpW=btCXpZ+Xc+MI{c~t~E{{ zwQsu5r&8qqycvb|T=dr+PsMHOrp@A7$8E5fn3B~?)Cn|bNCQK*zF=RT)_ zH+Gd$HyawX_m4N6Mc>sDoDV^6RtJe;U7G^Ww6kSdbIf$l9s6}0a_w7W@7KM1_s(p1 zs;{qacmD@=b7e{$2az9~vXgl8;noND2Xo8gl9P%mhCJHhy_){@jh}yhitJepef?JE zk5yGKzP9($dB6fBUlEj!@a5X)XjmS=M=4$8z5g z`#^Oi)~$b>Bwf6P%{y31UyySVwzhc)w=lL2IRUW5_WB!PlHuAni;AY#PK1So+Y3^)qYGYjzQvw_~wS2Fr&R_serzO~I<6;>wua&X-ZNjxcm!xc_IhlaTStGvogZ z?v4ALQA8y7qsO&N>~(fIWS`IR%FP`YYxsHi$dNPe?r+))$FYC^{^ywJj*gDV*x1F` zcYk1Yc>Ls^yH1>E*|2v7(jbf9Zb3moXRD&+dxl%EJlNq-&W~|ZYR!JfO2Y;>+84zh zO$j*kihr+V@F7{T5AH&TFaO!6n>qchk@|bhXOYPFLsujD{?D5*!Q!`GCPyJkL}2l6 za63IUGSS?aWG;Wo#v~@eUFUeY=lX<{l((x+?7v!7rG}QwKVEw>+d4RC8Vq)Kdvge! zG<UePC2jY{D-?A0Am7oJRYz#ATX?Cn0s8zzr{?w?SE}I-V)I1#@ zKFjr3|EE@v2E{+Kw6A%+Tjp1#yWK^FTPVy+4)y&Fx-fr2#t2f>pDABGv*x*A)32Q@ zCCkEVxDsuN z7QIkw8@NCsH2=cUO-IRi47T{=;$eOPfeRA@rvm~4JkG44r>8Ie_%ZqP1jmLAcjDr< zHy60B1~E%_b%53gD0i$&3iql#`=+ccQ3koU&2Yn&*{S%O*Qr#h*g8UkKTsR3m{~VU zvk;F&O<4=42l7t#-xgCwN>}`m(Aa)TuX-slKDMihFnqmI_DkfOFkbp=+t!|!`ESO| zS3Y1S!X+INO3SzGqCHX_?m zX4!S0i@Evl&sthqI($>E#+yvT>LZHL1$qeebl1wB*4>|aB0+gnHpJ7xSR3?UxLoyA zN&iZ-j8z^s<^i!#R|H8E=5z<}wHH(BqrsEIo%<>l+^J2es)7q`vVdX1UYr93Zvw=qqJhi#8`o_+2# z_-)&D&*(8RULu6KLSk)_)}UQF4TTHetxAPf?qq z`NQP`@Poy{;+Bp57tomd78F~H2ogtta5njDU8+Iqj&`^1m>?c@CW-p`u{~h!Pe_dg z)b*F$S-Z&lFdgydXNZk$``cw^$N99mX+neY8Ye99 zh>K^wFJon4sbSLg|7re^*ds%{kvnc7{){%|yQm`Re;d*ag`lN`&*9C4ETDiqX=QEg zHE5oH9U;)uq$HL^t<*ZnZ5W*U+$1luZYL3fh_zAA4eE%Cc5Je?CQ!^Y&ZcjRusJbXs4-DcaBJY$twGt6c+!XNo@juKIHKr+6%l zj=W5p>i!*+KR<2@mm8%8Tq-)V1_GroIJn5B<;rybjyo*RwHnwBOGGxX5b-ylARxfN7+ygePpyzld%xz?SqF7)ke#&8vFI-5RNX1)5)?rb>Is{i(@y# zA7izbpPuRMEYRXx;?bZ?D=3WUSpVyi@ccF60x*O}AtAj3D?Hc)teA$*hx8O#yt-nc z@p0^sg+gbQeD|A^BkQW&Dde9I?bS455^3AvsYJ<=qNQ4w+)~TT(z>AC;=H+`RoQC5|6j`yxegjwm> z=zd-7LFIa&+%J~8b9VLowR}4+Fcu@li-)Cz*{(t+<CncW;*JRvuHJX0_qVor?hIR(sNPDmhb9)fg z_RS$v5s;MB!5J{MPmbK?=Q-GxFe*4LN=!7c5a#-#htE(7Io6*1-Ph>8MDAOl;L-Vk zscgm?AhQF!Zx~MPAFhj+t)rHRWZ=N8)khaMq$d+BLyVte53N?Tv=-1EYxOLtdK~f5 zQjs=4NyTTK_2@*3?oPa2ZzK3y3Zd0Q>g;%v%?@wI?RWZDtgxB1td1Iq%}n{2sAViy znw~tzoWdtQ@H1R{{_e85`m?jKsr_&rp4i6AyE=W9?T#k;Ha|P9fbhVx1>-N| zq@Gx6+G`(avay>p_;A2K+xNej_=%Z#_fAe86+oB02j91@U$?Hu|B=V*24%`Or_CSj zTT)b`6Bwp7jpdH)n+OT8EzW;CYA}EA{lR5hiKkDVP~~CNcKnuVy*zT48hmpOm;P8&Pst*Z$Ie6AzY5q1^>(S6-zJWa zSyRIKQ8kfI|INK}`vpc>`E}Blb8yPs7{Yo1At9xk%Qx0-be}oXcy^5!m2dA}Z6wWg zNB{@#si=BNBoP$_I+RP6CYH2JY`spN_+4AK&#!6O$Cvb&Z3~xhUy?=3jw-myRFu-m zl^>t#)S77l807NZbG1Fl8-I6O<|ZcDk|F;NA^G8n-)i;e#G6-WZu->c=YQ`}z1hOL zYzuDlEzmu$gv{>L%ykE+7c5Zy zQeSJzV*Y)3n|K&L(Rwu>vq4@bAKz7Bxrf#6gy8&(F7AevTC;IuDgYx(WQLMJBhsc% zgh|HW2fa}@xnE>0UBvOqajD!KeOs^P&4dHS_xBxqVY;R&%ju(Ky+e>iOI!G*g*8)+ zeKYjbSGwvN19OIHC3kJAK~cl(SZ{qj637f3u_2_B5l6R~ez*ZGwT9U|0u!bNJ6O9% z&yVPI?fhrFa&BM;6ipB8gL7HNC7JPNf@S?~MXxizke**SW9;!58@RnlsJK8tolaz2 zYQ*8v7QQLgi8u3RpU`{V&gKEn7DOc-)0>GmZ{EoHj}Nr;`adF$^`Qx(=B3Eg#yvi^ zCuFmBly!yCv{cG&QU@*1?MCk|eC{vVMhNTW@W>}%&v_5Vt3LJW){or0H<+$@%gfly z`2z2Y9QXY^p8HiR)r~G@d`t7{<@4XO^;Yor-ky8^{tEdjMD_}oPh7nCg8Ci}{vL&S zj700Dr4mVE{Y783qPaFcsz0Bv%ari*-?V+Wq(}Gs!1cxc(a0PE{)8)q?3P#a_VOY= zT@>8;K7Xi{saWBAP^X#&ge82;@mn@yB|4p?Ev`D_Ma%8cVKYxw&Zg87U4|*~j|GoSey!^fAD%N1G!zApEA|9&8 zhGTA7j|BnsS?_3f^$Tsmb)sHOyc#SeBKB{$9camLi4J*^cH!bPdyAXS$srg7HSn zm-gNMNVigJYBW9~n!}MS*-fU{-`&8}>OkJ{ODbxdv1HvQ_fW7}=jpAbdyc3Vccxd( z30GKhZS>EV8kSk`sFC#hQy8@U0pN<^h%6z4LcP;3?fn9R`UL)Lk%wX|9W-1NBzfs5K4L=TXivpK0TgJ&RZXv3@l`-tiLY|vVP>(iwMa2EZT~%Dkv(} zVENn6%{a@e-CZYMlX80Bk)tR5Me9{!^c3@+gq1z$aBtzLc8Ro!4B0%D+9yzTZ#j18 zHz5Ws|2pIO3%0RwhvBf;WF7IbftIG0GlYNi*oCfu2bLI8Ngnl#t2vV3`k$rb@S^8wJ1mjwRaYAJfC|U5$gNqM za>*jdepg+7-Q%8xg_c0)d!ABwdGf`TKi`+kLW?5lK9`RkCdrFrH>7C`2Jf-aXE=H* z;T+IEg1~^HjFr!NyV3j{aYOx+kF*@pFe+E0BvzWjvg2stnUDAhDm z^-nW0GGcvNe2%>8@9!Vr?~j_~p46jv*C8;zXjc=*l*A~TD&RPnn@ne0PsjI6cK^3I zwVSH{jcKp%zC}aem&nkB+PRf(6&u?qh)wNQ86@`p4<0x`JBtiLSzlk@=gyrPEQY5z zUMxqzN`vxjM{IBXpAY`j+e*`^9kHT>-HYh{-I}S%w9O3laeo#f2@hRtZCePA{G(4M z>vUgVF0Ep091Ta@QrMeXqz}aR^J;_3Q6)E*CmIx8!Gcyha`QI-8_8KM|Iz~tkww1l zN4y?%wJCHDuZ4*`CLqnaupD;7%{RYvNW_(Je&~C){*5@d^XR<|@i<@vo;h)-bvEy%?!%UWoQsu<&phyr@_s- zKpobEe?!qF?HL@sfApI-hpU4lA|j~fAln=#Htcg$^bhd!Q>CnY!ml&Zd#c(YUFS+{ zFH-~&>Z0E`5$`N?E~BI0Yxm!+auRc6RULbm+8tzYB`pOhrd@cwA@o8sx_l|2}|Urn(|f0OExm`CD%FDFXTIM>@bnw|4*#- ziUNN8Dg(-i3Ej%c^%57!cNzm@pWsfLn}X3XPSX}MThZ@pTbPs1C+V=WC8I7WE4M1; zU?oxU{$W<7Xlmxm<4p0I7j`bpzCsCn6?g(aYO;QTj%lU7b>0Tib9WlEZG;i(ntj>4 zY{Om^XdzOOiVMjDt?--ca-S;`+S%TCu4ztJX0ytf177CI2L%ZCSp@}Y6aC(bp@rR& z!0&sU*lk{Q*nQ^r&GEt3)aTDddS3$RAT#o18>RK6rKRzHz5If1bhj22kxT=^RVUB8 zmJN*tAFrFbWXHu+Y#0&iVdA1=vlN|qdf-_6m76}U((?n4)41HsC3{NGsl|b{e#@3E zV<2(N>yl1QA)*Ql3rj{(Xvnt3i$2-!?DuixKE5k??7Zsn>8c8$R~&V88;VXyX1&j^ zGgmC#zkdF58VcCjg34xQad`5<0TC63B;S3{n(0~!DTVqyRY@<6oI+N4;H&xl@b+@n z@moy|=AFGtptBych|1#`v+6llyYzKTLt`h8Dtnn1;8a_4@2bKRA1!x2-MUexUgqzO zbg0DDUFDzN*G}cw#b)XY4R7l|XG~tZiY~wRP<)$R!Q~j%r+PHK`EjYl`w{fzW)t4G zUp{(hqSHIuN^j`m$dFBR86R)zdR(uw?BPMnyz$)mKaQ;0LRlK%XFY9mTQVwPwVzhE zsTSo|4`1Zd@L`%44$D-G4nlVSUrR#!^`P)`fo2 z`2f%37_4HztN7d9Q~iA=)s4w}*ux3??zJAK%jJ&kc~l=ax3D&8IQ??_qNefIcRU7} z)@#fQQai_REqK&W)rK>5j7>h;$a3s{G`( zc>ZfS+y3P2^Z1cJT6LVY-t=Q|4B~r<%fbfd&cy<5O#+Y&&;1MNp5u{QaR-t7>el#| z#`b(trTg4{NcKmesKFA=pdKgN0nz!(6^lnz@e&4?e;>)CzkOy+u#~H#w@ir$3j;m< zudk*N4vv@N4*IL5n$=$z8G5WQ)c4?XmQe8({VT)c!;a@gvbtzT#{5EOS>~^0t2A_r z6VOMJ4Cn15oVQCePB!=!6&0oA4M>ufK{w0li_)H!jsXLv{SDYB<>D#&OQu{Bk5~6N zPFt<=P?n(yiyo1D`6{2Yr9QWE{*pxf9R&jt>act{K0NGSYV-xMV=Ydf+jGP-;n%T{ z@z#(KEiI2!x7yQ3@4*3dGZt^@jR_UdN-ek?eXbyE`tQg-4&7oZzP3)>A{s6nv2$fh z)L#3(hMh{GdsUG-sMy**PtZ(^MURu7S}g-j9i3qH@C&2ARA(z`Grp4czanqlS~@v3 zm4M0aY0OL{5vr|JDm!hHe^pi0=%71I7j%zCrbR9Lh7C!aLONtAv?M&ex>3PNi z7Mq`gwW`yp#_o#^@1MADcRdYIRm~BmrB467ho@CqM5Cn3Xk6Z*?KKd6I=~wUV9~xr zPrq1P-3%RAg7-P4kgfrCFlch^Mz>Kv(8W(j?d?L$)ztj6tiC=$oNtD0GX)029ufQM zDG8*PY7qO5LiiO@=RV!Phr}z$3!x=w(!Rm`^olh7VcM9Lm-bUkg z?pR4)6fu4snh7N#@!87DYl)K(8{sP-8x_?3nZ6Wnli0{K{=ssxAnN?19m&6Yug z{ZIO~KM^qjmqykx%FZvJ`H@$Ym%rGS0<3%+PVTOrhAN%Rtuqi3g~_<H~e{FecIV$vp9J83l5GI#}V@}&z@Q0;88Ml>X+^!7nYu0D7CwlHkWNKc`<0m zX)>E4!9R0zbC;iwO*5baQY&*w&W%5Fx@tr)69$+A6i{%Ecd4}@Dy#uS;Ae>5nk5Px@u+4haJ z=Y`*Z%C=0OI(5ngN1kt$-_vY+eVcNqS@ttXSV&7vmG7-jtp_H2oU1ppytPcX<*7x2 z)9psjfi&Q0qZjn}w70=lsLn1JOrP&Sv22QFPa;kNkmOkFo_I33D!cWkrkuFhXITU; zs?eyLtnoTm8E&ln;0STE+=yJR~Hy-c-} zUU1`j>7>_yF-G!ulfz@<<)N!oK0u66iGRje|1=Q|$ep@^T0X>rfCF&9?vKSi7IXsG* zSZG7bKZFnqHVSL?II)dNmGf_*wxGKwxd&iJN!kW5@zzewH+Q-AVaE1ac{_N4l+igm z{F`q^yA;-@p+$Y2uhjS>YBxAdmx&f9u7St9Onh4Au3eezTz&I*0|213D`h6$nf!LA zn4kAgx_@*zE`8P(uyz!R5!ylp1$--OTgx!^>k0p1e_P~Iy96KA~b@p35W^UWu2}7NWqm;4bVL4EMbVO96U1 z*QIxp-~o^id4B-mrcRZ3TU+Jv-wrPODHN^5ybVe%nSqZV^Sv+h%%zfXIJScs2Ag0| zREV>$x0IHZ^}@|af-V2%+{J~p*^QMiC-MdWjz3TiRRI$YxPLzq-Jb+@?NUR;g(P6vI~jlta5ibYw|jP131!jV_v|gYrf? zqo%p_Ck+x>5-vwD&W|a91^Zi}s>P`_8Ckg{*fi3Gj=?G+B|h|W@@GgtHbR~jwr>P1 z-1p~F4QoLN@yR3nxDNpubISxWdmV#T+mdB5E=RBArBktx3f~oQN!r`~sR-PvE&NXl za4k*JB{j0I&x#9RwPM;y2W1;dmJ)GvB2e(*u-mrz;h-I73d3FlB-3!taYhqk-POn3 zrbaKA3u50^11Jv&4o(1}oq(OmI+1Kp^lhq9^~H z0BH05wWohHqN_1D9F0n|s%Y=AG2Gp!Sy$wX_K>uE4-St9wYXItDRLA`iS3`Y%$$PH z3%Pe0%f%Zd_gGaDf?5o1QR4o7u49e7Di%gE-RlpfgANNNX^$Y=1L%%b8da`vsv?v)k_Fic)RX1CCK88KAT>4LB%WU>HmqaWh zsyvDA7rISvcsmR%#O&(}6aQ%JzC?6Gi=CLAnGrkRyjR}Yx)fA)aEmD->0+)7wKqwVhBH2E++X3hMFHTbxj=q~?w!>AO#{}c)W;^RBT zw{3fC|EVS4d1UmP>mc9${mqig8aPN-4b1u!)HOZT&wRI?2!}4vg``cuMtdXt+am9C z?_M4D?(x~ll`B`e%uRNU`MS>>3KBLr3Gv8SjrxghM1fj>jG$;p#3A5nB#(2^ZiN5s z+#qf4guec`#5bI?eG&(*t0HI#cO8(xI+L4dw)3T1v?vq8DWxz;&97g-GJ$0WBw9XE zs<9IZqMC$`huw>qL#-k8Po^LCr_Vo_Y+$D!V=*Qv9(V?<@vhA0k_rRM)`-ZSke-Z& z;ARxNBHqTJo~i=L{WCNX{e_LD1)Lj?W|hqg7mNTw4U(6~;lHzsW|h3Xy@Ms}VqvYt ztQ*DPc|V?yNKH+RRSvTVTWlpDCI=P4OnnT){+ z-KV(?;H}B{6uf$Y#>80P*YwXV~Q%(@E{cpC(%RQc@Yi^shOz3i`mLgmO+$ehTTc zLPje*Ei@%??mgcM@&pbo0_}~)uWjEm)y~(|S_&J)o2Onpx{#I6!(zz4^$^kGAosfR z!Y7JIElDpyHSc)mm%_SSGt`>F5Ik8?lOoN!S(9n0z_I`JaBXPfUX!RLY+`EKi+rLM z?nh1rbeulN;bn`BM@%g24<^n`J zh~va#^0gs>qWtr(4p5pgLnL)_a^UN(wK4u$RAouZq`F3*w^1vB?@$ zTux4LaR;pgh~sXH4Vx`S#B2XL^w9mgDUXD~uOcx}M>^Zg#iUZmSN#52jHpaV{a)Gv zIvMZ)uh4y}n^#^9&WK};_KOaMqes24=(mzf?e+(wMdjl39Zn(NH$wXGR7PUce}i1$ zyoQH|tH4JCVfet-Vh6oKuN!RKj(aOKPvv62fy%y;*wNp6BW}U2Y5w#(B+F6~_j%gG z|HXPbwJN^iQ{7b?dn+t67Q`Y1@6bDXL-Wp`KNbA*3kq5`Fs3+|gwtKNe;N?zIM(=M zt9a2zZ@&d-E+IjINqDUP(p&wbt&78=+j^9JnC9=NVG=mUQjF@HeR?u> zKkpnoc+e+*kEp0RNkSv3bV7&)2%?;06WvJ@$wl+j`|W|=H~$8EvKwV?!D4B;qAZ8t z>iNb@@2=VTdhI5PCx112+YN*0YFe4bNQPry%-y3lEPhEJiid7hxQn zZ7PqwJQ_~YJof}x1X{foUzmvt{BHAfL~JjVUGI(3%s|E<(!3Hifk6RGibHB^0oRk( z|Jp;eA(NPX7|GB8K&14?{}~=Q5V6y~>%_=DWIXH8y=J#){^1FQcOP0PL_O0n#^TKaB>!^A zvf`fvgit<8x5AdKiLuzfW3b=g&$6;?g~ve^ma1-{8#v}i6xRbor)-48Zj><~DCU-X zsTBzi?-GK!vA(9`M?p|+eW?K)Zou-qeZkC1TrVzg2V@qN4@OWU{@koHUy`MJt z`b_uh!T;HkJAm-ovgI)qKVkDA;n>XRm;HJx{{PzwUQS1r_r;AWJm)|^f89^V-cm5? zO+23Alt+GoTusLMx1apA7&(i9cFJ!6zxHd$SMB`xym31v3_Z;JR}*%TLyGPSc+T+W z59i33LkpFrviisZ$C)5W{xaawQn~U|BMwqt3;UI*2jsigyJfA1=T<_!G9>l7WzP1^ z^H(GA_toUz>pYp1yhYD5|3h0Y@+u8k#A=tthQUe(E2QV{nwm@ z;)0oV?IBFSluaP;m?mj>N6Q%qY;n$ChvYhh|LA zq4tQ7eJ|YLsrq;4-2-1qg@(*!UmTIKkLLTAp=<-JPvPbJvSLLZV=>FZ#voDvjbkE3 z39f9SNrTu0NhbNpSwoISK@7KCx>Trm{%6Id&cq|OY65K#&u_}bJO{xq$hh*okBwM-}^Le#|{=Xk=CFkW@MxR)9 z<`K?G*Xbr3#Y}6;PnLzhUl{qjcRRI+#ee952tV4>X)I4J(|`N>_WW(K81{bV{e0yC z`tx{i;+elyaVI%wl^67F&eNI2a{pe#{hOF0`tGmYv6f}4Jgh&_nC}GSyXmc2SbGE> zp0AJ1Qc0yY~uMhs~jHWNTTD92kqd4&qFNbAGCT|*miQcu3|^34xJw*io9oW^;)kp zK{M&vg2UtEj)+@Qwn|7yAPe>n2oFzz%}#`ZRfFz{#{BH|t@Y4Siw+fls*nJkaw#8D z1IyvcrY2n^z^Za`OKT;F&}|qCA0HnoE3}lA?|~Tyvz-7q)amBp<*x@7$N?}kfL$p=K)we^-?)Z64e%4CYKb)19)ZUMqooFfCoGQsT7?z#)&&sTYSK5H`7&o$#*wy;_!^wN5%HDTRLqVEIO;CAP_@ zaxcaV6w$5#Ro1Nse4hb4fs99x=+=S+s6n#D6`O0fs;upkjN$;8q!L`6GODfl)j!t}13!yBj zOPK<6k$I+MF$9#y0eC&6S~omDfH?gG5KRU6m>B<1iX6EZi8w1J$J5}xid(*4x`QIZ z=ZCq>h~}G2vb-)+-OmEU!*}4oAvZt`JykKc#|jYg2mmc1L%JV7evIPTdrMav2-bE@ zgYy8UbYuVupf4{Sda3eJQBmu58Q*FNM=ESF(46ZZ4SttIM36H*FKkWL-9vIcds^%t z+~bx?AVFS~GN_0;9QejxUxUv5$u(vU*A_N>PLT+)3fD$PvFV^iexuplDqhr>!Ht-Y3g>yR+w@a#ldNaiKSp?EH0A z4#*Lb?}0IFm4{L30hALp?K1l7j9URT>CfJhm0XsahY_$>3uQO|ur+w~iU-vBv8V{A zKSO88EAOV5)z4Xm%sjx}Ag}1bbO@8e%DsQPJed$)zRV<}lLqmt>&>gHfyfOF-myjSstt~>T>ymjVe`)g9kj(AZOL=E`!R8&_040dSD zx$u-MUT(3kR+7BfaSP~C6sm7OK#-=925k4my9iO!PXXQa+G|ZZ9O3QvLI*4Om)DuAQl;rzc$>RtNrsE1~rqcs+Hb4_9-m?X)SaO$Yg@?@pnLG(wN3Bu8$-jG)G&*4d(C=(| z{|jh}_kaXsRXa_}*|$)sZ`Qfb`2r}}XJ68LKW?)J_w^P?kfoa{ zy*F#lNlSyK^C?^x>)4cB;`!3uc06e(ej@UC4|4Mpxn)pO1bZ*y>#QE!AZ z*R0|rk*Lp=-C!TmT2=7|?|>AM%tyl9l!J1k+p~87X6((Ek&<$cVS)G3^_&Q2}5@vWGAY>Xp*dR#E+L?pM92R;<5TxSPWx9{(;RefW3Nw?LHvzD#9+)Rp9xJtqgp|NGlI9hgdJ6o8__>Nl2P53Z z`%WSLP=Ik#B~e>+DnJJBP743X2m{3s=E#j>DSngsVDo#^2lGdiNmGp9tI5ielIRQ+ zLAj09Y8{!&C8f)t*}en{Xo*3VO9GVNzJ1eyX9b3qfM7&wsCdy7#wCt4%=6~W-k2bL zwXRQLqzH!u7N9Rv0prXK644q2kw&YVd~{4q_dwoY9b`+R5c83;v*C$}dTav{qa)Rs zDB;k0di3rn9;vA%0#}0K*$iBRkGHpKfy)>`dhqYDCLbP}qxm#xS{5(oPC~`UcAnzP zGgUxD#&GtXfNZ=_mz8{m6~>0(mnVx_M(c~_ zG)Zy}u?k$x80p+F$Lr911ow{yE%(g*rpJdI35v5)91DjB(R69;sS@nsJ>{($q@kv~h zke-`u|GBJp&N_wAo6(w}C$sox0`lU@fT1tUT|62%3AgI-bwbMSr($o!7B7~yM#jd* z?89MQYfx32q3@2DRX0u2JH{p^Ut2~}-Df{&berf>_S$^QKPWK6q;UMWC?Xg|%Vq0# zi&58N4G0a_JAHr6ke4~rBPkE9gZLf%@N2&_DClHYi@GFplxnU8^5Qrh?lyj^w-A-z z9BYr8v)}nrUth0{x`>u=R4J|{o%zWx5Phq;p36_O{eIBWDNsmQSQr}eRu?JDIzf5o7HvFaVacnz?$b@JqgZ7YXj;cA zBMR4`_APe+W_5?yCsa;jT3z!<3+LvqfjzjnH3Bp%lm+LqrCmnN;49^!E|7E@2*vrE zar4!At~0;Ol3TxHcSD_d(K6a96*AQUjl)=$-!DnL>Hy4_ZfkgSYB=VyVM*jOo5 zRn;R`#;vnkVo9Oz;pJR%t5r`=w%Wll+}`lXd|GO~R}_g9`;(-@EQ-<`FBt&O zC&Omw;!lun77wLwRUvEgYQ+lPr3jP9dYrApu(AX%+Fu@R(M<#%b!@7)a&FFses>t?4!=gU;Pf$WO8HPcf-G7366Iy>{YkSk5~ z8qj)%3NW>noB2T;!d*M$q=L(@qk9Np$Wz4&?4~tws-A^=AZoCDRlZz?J%U{Z6HIoQ z>TfM-oq7<_FoRmf{_!sNN&&5u4Ave&Zvfq;k{e9Gs~s5-den@Xvg6(nl$`a9vb^SB zU&K(mVIROB@31$x5))2s0io7;<^0K$56L<{c{RLPrr8jmYY4&2S`3D4O07uz3Jcr6 z;zCK%AL!sXJVGf*xrl%|K(fiHw-ciUehqyusf*Ty>%HCF1WNZ{t3ZRbI0g-RQP(E_ zx0mBUb9`uM=w_KcGHE%`&4@!o3C4^>8tpOnY&1y(QmV$DhF7dUhU^g@7JM~BI@BG% z(~p-|R-QmWtsXUqa9k&mb)vti%eE5cNDTUSQmYJGM}{v55YEmzLpHSa{h4dRG*@X5 zhfqPZoLsS)4kGs7PeML$*y(Nl@AX!PRnA8vGY8fZTCs}KuF z*8l}VA3LuMASJT@eo+5fH%MH87Znl)bKy_e0w>`F6(@@IU9xgXq#~AtJ>*2@aSvH@ z4#htmS}uWTD)nT{0n-UGkL9mOl23`_PRA=>%{Ode{JCp|A!`U zi{ju`R_F54_U-snxwRDyrAXY7bSJE>!tcJQd?cj`;QPsvJ@_dxbn^i7HE_!MEom@QHW?NA{`DAS7Hhvzkug+5QXqFN~WIubGSV(J59GNG_ zscpVKlQCkGoxAT2)2g){T>~qaG&M~0;`f)lD2Igu_q!@5X+cia^_4nT@XTP0rW?cwt)KEb>pSp(Z zIRTJY1L7w|SAF+c2g~XH>Ju_T1_lPz(|Eh-+Zc{PZO*{V$v#;Dhthj%7+yLK+~HOq zhhh&(UD4xtrPRevg~`ds?hihF{8*cFp&fVSkavFC^IbcQ!-Iu_cr~snKVNXiS|8Qg znz##Nv||wnK;YW9e{n;#dtH60=2wo1!T=|YDr{Ww%YSU=siF)74WzmPW%$+EtydCB z(F(bFTM97iH7Fk!LmX{lY;1<4U<&K-sVDPlew6c(QfiX@Nnk1~?|{MsyawM3CK1}c zENjzGHvM;@@CQ}G#k$jPgBEL}1p;1@vwi=7S3p4EI1X2z(cli5~Nqnv5jugLwhpje)f^dPXQG4$yt} z{!18>pdNCoVC0{O0-pfTAZcGHgGg!F3uS$N-wCW^VYt;O zD@#DPL;quOMNKhC5gAhYlh%!u}=tE)2?xMPzvW?Ab#J}c1YdcL)=K-_ts>8A}?a1AZYmjJXY z?=~zZ`XLelo~VUajZA{1=Mcjdpd~6<>!_83LLR44dP=ERv=?tOFISC`wiKPd$h17MCl3r>& zx^Qs2&AA{q;$UZwMgh`ka;bnY?HC+PML!&ad+jGqvSUgl$11qvCe00 z$QJyg5rB~MMFP*T(@9*GS=^V}C}em`raLbk`x6j{^MMtF8?Hr%x>QW*aeqLZXsgfd z+PAZpmLfenst&_+9MLW*DP!m6PJ!>5Me+sECX5UMJUj>p8tc9qY--zu>WPaltSP(0 z{zApY=D@Bq&=U$Vq#82c$0RCQAzB&m>l^T)UI;Nh!}?|5TzKR9WlFZA3{mxS#U)Uz~;^Bm36OLwy0qA@3k3 zij0I%WJoHNI8YvmXc~;TE_@y-_=tBMEU33PKoJYcwv!dk=qOc#o`7UECM+;gKgG)Z zBX@qZw==s9pyO5)`PRuGfYeoDas0x@0Vi=1NOWz?@zqi@LnS?R$syf^xE*Qz$=TX% z^DT+2@WKNr$jhrDvVrt0p0a51FI3=Y!ipU)O!r}5wdyHsa(8CEel-~tf*vRFdVG(Z z;(?s@dj|(+k+3D1{u=z6evAYv;Q8%82xXf=e1M|a2bl@gepuL+NcBhK189-959$V)_fsbnsLA6Ty1gRrkefPAxXJ zJ9bE3Po$jTh&-jbhRPn;%%S3IlFYnu9lg#Ns>lr1Tk5Bt37>Wzn0~Bkxg({wu<;=O1Ey4K#^!4em4!ZB zX`l(W4fz+H7+OlVczt1-2&7mmYXxDWw)62_1+e-2=Gv92nmapML$x}m_h_<)c&yNT z&D_l#)3#AXneq9Jt;NvDk{^wNH3T(*zINC^HimxhiA?&2Tpz84UmZRieTO3ZMBy_# zq49~hoOJm~ITG6#<9tt-N z@dNTVs!Bfw2bVuqJlA8gLY3p}V%$A$fMRBn(m))JnOSW}+$RT%PXZSP5g2)rPO$C^ z5&e+M@XV=Yi3^XodGplz`CkT8;+iSinUq`V;zpjp= zl>GQHiV$)djFKT99q|E#9Zlx%#dd*Y^yWWcyLl&!6-{Q6pg`uA6>Fq}9x{u+iy z|4r}TzZN3mGM4Zh_n4pBTesegF1#u>!}ZJ|Mec^HR{Xs}0%iUeVW&MtFlm2ZUThgT z>A(8uDx?3ptr}JBky&x)X-&8p{O!{${h< zTk=YAiwRE_JZt2;4bLye#cIw!ZoxOidOU32_{SEfWvUC`9KDZ%*30Lxkz(~(zX;r% zUqFrY)WN7>+4a1|cidb4&SBit&dmoj7d-@nC-h~dF^-B;XJ{e}MUA;$S#@b4uUM?R_A{EkQ6 z<>TAev3=e;i~rX|E_}^md}HKO##{gP!dql-t1gd!H8rbE;7CeNRt1PM+V^!8SuHZX z4tq+w@G?3Y{kn?M5>m1I+5ydwd{M%CF;x{!d&uTMJUqmX9Xp<9WZXEs`2b)N5#T3% zEqSYVJyz_`hcP>YbbM%HV)ah|HoMWzOyt6k!+QCbRNF5P54JfF=DvFiLn}#~h@xZ9)}mn?$&b`#nTr zPY@U3fZS^_FN(tocwnZqd5OTVQL}+PyrW+?vo5Pnb?S)ll)Ya9M zqq1dS=c1YaVaS_aO&rtE6jUrAo&*J2EHRY5uv~UGWfQqD5}4q>4Oj)vd1Jcg2O3x7D552gZa{eO8hCEp=+{NX$R&P*m}x_S?JMN2D_5^x zf)wfdC3KA&Ld%slxU5^gzGX6Vzn6!Np!wJ$0`W348j!`DMRO@QyTck90XnvOwrp7h zBu+c~JQqTZtH0aJw0=5z-|@lTT@=5Ypfu3mAQy5RKZIFN(2DCIVTSf~OBopt0EfQs z=XVvH=HX+<+R;B$C)F=7kU>U9X20t=3tstK-iXWBz*Iy5q`EZvYb7%6tAMG^H4p+p&Es~*@om(lPPEnaI}?m{6!zAHqZhtSm=a*8<n+OKz0p3lq@sca+!oo5yy^l{#5?Q6nGgab0`EY}wE&S1FjTUKTw8DN zZG(Ey;cvZ>N*AQn^6~;ulK@4{gCZ$*f(#32!=&{G<@b z&+eh!A%DR$P^pK2;e0{gZT8ix*YB^)Fs~khaU+V6((VMi^IaQxj|lh|vN?eqPCLfp zU&+RH4MUP$BWmbBA)nw&rVyw^ zyag_ie0r2Jk&o~hahA5W$mqe?G8xaqi`7~deQs=i+2^fMzTj)7bYGq2`Jtqq`ef>| zON_#JF)Zd=z;+Y@jQ(!v=|0iSoknSEpjo(ym#d$DDavsMKnmIVltQ4&&*GI#E-WDr z79=dJ8Z)tN+ctFfen5EesmI1K(AXvRCAu&F{Lp!usf~@V`)selQYNO?7~{~dFI(=` z$Mb=yQPtKSZ`lr6Lc42Ed;4`r#py{R(d-_K)6%a2P%x@8=TkgjJ+FXJ&&kVs8Xf)r zs(bUO9^1D4`>bThJcN{lBFQXKQ3#cpqN34NR0s(r$~;C&W{HYMB{D=IQ$-;v^PGeR zii*_lbCm0To_GD$de{5>-uIukb>EgN_5GgT^E{6I*!OMQ_HAzj!9QC;0%%Bk^r$_A z)mStZx1~^Q9I4qQY3>N8HuI<4TF&* zJ+phio-hzaepL6Zj$zxVEsA{QtUnYKyyfX$T=G^n`9v9YFBlblmlkM<9+S$A9xH?!WSB+|Y-nrozp6;Ys;wico zkIvJlDdO7?ibXj|l%HpMo_;4BI;G4*9tLY`R&KgaAu36LVU6e7*tAUEkLJ`7ghhN@ zTvH!Vh+F|U8?(NcCwUYUT&2&o^UwtWIcGpD@CMYfnUFuW_ELtvTKj`tNaPx8XmcpF$~yb>2Ahh1}Hki;k`?eSOva!Hex#eLt)_%G2($JdOi06yRQXQKENv ztd|k3r8&)glx0x**};H~F&;;lDt_chi-|iU)91g2XYwzUaT@)c^IN7$|4Hx4cek1^ ze04=hYSX5T+Sj9+^LHBR8=t&Ms@5VrsgY%zOw+r*BJHzv%bVi#`&~-^=;Ob}&8c?x z`k-+f`n8of&p?ano@S=-*EIwZgz;@&4V&5|-W&F;V&xMG|Zs!+d4J8d(ykx@M z>Q{?70^`aq5gXcM&Y*1DhE&Z(<52S>QGe^Qmm%n<4+*#i8iCDlsQDUxVSjjETN!-@|h=3o|v0Xa5@ovli(uf zLVDOm`cU_^J~y>E(r5wIPDqy=)EaB1w_5f zha=khPDZ8Hm=*^nTZ_U1QD$lBgahNO*a9RsG1fli90N6)@jXO9)5GY)_s%*=2b*9&i0wG(_6e z?u`L^W`2FmFuedTuF>hTHRN;W_T7G!x<;A)kS@C+N?IFPSrxJ z-yzz$=bMC@C6D4i#8)CI}v@Hg~=V11Lj^j zvD(+P`U!dEd&B{cN947kL43^Z7o2EASM-A#)g%jZC{uxsPJ@w`BskRZ3m3*dJAget z2yf#7wpS$S_>Qx^uzkZ*Wtd;$;%nc(+jZ*Hsj9l#Wu!Y9k>m&ILbmcA8ItuKjMg1) zZC3>KeXO@RQg4t5aK)>o!E*IAW2BZ3Y$LjoLDCz*MO9ms5M(=Eno}?b}>cwMvXF&CRuNF-&9MG`CsEE!~0iNK_b%ow&wheMr~0 zw(XV90wBt+9DPRSmLOe4&Xs=eo-vbZ;MVpIZdoudVn!cY-ACxqW;K$`fK?y^G4Qg`M&0*<#8(fE|Q=SqUZ0M$o7C*z~0l zx0e2@a0x$oQW)=Y_>JXg6*+3e{q^bL{}}gLLVl(sDM?R(5htgpZ17p}X~@$#gWQ|# z*0{5OZ&%AlHoXqd{#`oZ*Y1-xnj#2T97Lr1l)}P^oc*M2rklvS6Di=DzV2<_4Z&d| zuC{Lr>1RxG6Fvby;hbDjHop^>lXGzwXVzT^tQjTM(d7bna&~fx@VtWY#8+M>?LKCv zT?)IQwzBo$gvo?jiS>*fgz^7V(R`H+I@#3dCP{%cM}gTAUU%V3Lfk3N;E;;a&j62xwON=i!JJsomM^Iw-1NY~+3%z45u2?QE2+{9P%ozynyR-O>XtXD>QX{N z8|qdz0e$axVsxdGk+7`V*~rc3e>nM!_+Lk4Nz(;m848uFjlo3x|;J)9-$ zIhJyR4{nx1XX>1$U9+>^`~NwWWi zk{(p4|E0Uq&L&dCMIA}Js-|{s2kCKujLF~c{^JOUsALr1&VHCwj}nn4cJULR zXhDhWy%t7dTbzgn?3deP(uk4H=70{x#D#;YX9f~2Wb-rIz}zp5|@+V>;MIY%vAmH6DITo3p>|5 zJik7m$H9|t4=S|%+xzioDENVZ z2DHwNWvh4JW5UtxoMzGXkN;|MXLRHO{;mYumKWS#HZyvqm>FC$;D>YNqPVzfWcV6p$Oq zNW0_VRT>q8mok`=!k=5w1WXWE*Mux^{CSVUDOd3L*}8M*O@i;&*50fC)dK7}c5F95 z;v3|T2Xx#za!Ffod~uyK6+MZk>eMMy#tS%y< z`|NZ{8H}tPx45p~)a(Ba(tK4p1GE-@f3#6bw8X44QBUK3_pMQNNrd%lBrkuQ8bx5Z zM~`+z-}ivN10TrOf_+gqDx-96w0QAifx&VFy=A_Gm@>f!dg%R%R|$S~86=bo52fTz zID2*juOZeocMAx?xVQ$KYpZb&(!DlDPe-lZte)~|Fi8~OvYazVWVqTZeYj)Y+iiP5 zc4U?{(fsAbCZTOPITg^<(vl)0W&n^Oqv8?18bnxu=?L5p(o@B7t;&iOD-@GcSx)E< zW|v$9}eeiJ~S57eg49*)fvy~T1r&K z0`3GS66thj!?jnk-A4LUwSknrMxxy}sh8-%>7w6-ApagSoAm7L+n_WeGv;&5Ew14z zH*VTQx<9)33IDznFH(U^f=L-s4P_^ZIUvXjTq_SSBbP)~WE6-l1`dgNWe@>UsHUeU zQ2Qs4=-H(YI5;}3Pez3id0L~Ll02(fsFOQ%=x{A9O_pszCcb0IS4Jak04aro-bCW4 zZIu|Fb~UT*j$5m~#9dgvt53gvipf2|rX7$Ab-6QgRhhtE$2mB32P)n{84yZ9F`Fk^ z7m zx(L{15ZW5S&Y!b&BOek|spw$v?>=@j?cj~kH8p0ERo@@B+)}fqwVqr7`SuO0ADS`C z9ZcXY7a%4`qCPpf;`F3mW0x!$NPoAy)U-v5n{Tdfc>D2VW70UJ*$%+C;`Q*{-orQo zQTw5%st1b%Wp48FW}s|0S+#>4(i*)zf~P=Zfv!Uv?CNIN zieCpF?`v~`v?17gH=_StR2^?w%}ld>#FX0J&+u%UN0J(RXn$n#pfL{WBKR{N#xK$L zisA;}Wg=28Up^F1nvG~--qK*D`zy5_E2I64gbf=uklDp8eIafycp&(Jz0eh1HyjuI zr^yeC@L94oV@9Z@f439QDKIEAe3f;@Cp`Oo*EaOpHf-98-3Zw?Z`sm>A;1FRkB~x% z5rF#~D3a7}TlkCc1KS4!@lqbBo-Ecxu~B3lgERI?@ZoRhi!YB$D*s3R(=1lt?cj)_BO?9Zlv3) zu93rJ&h($*Qj*r{ng|~i`%jIV`u4)Z`{F(~*`l&BDA?t~jcSBO!RvjO0gi9Fa6y+7 zub29g&W?OK$&VErC13Y}6Glw;B6EKOfGluq`XS<%enr-B>W5zN($ZHWB}($$d2f13 z!$XTOmA)1i|F|?VOlgo~BMJzF#@m@zGYPAbMUzkk1uVSr-OgL-OtYx2UB{Y6IDwUn24Qd+p(adS+%C&*<^b zgwU&|PBS_LYwfSGRxIe#+Octc?z6{K#B7u?UzS!^CSkPSW%(a)3y0?sP!vb!A+q(? z^UMsC6TNYsb1mfeBK~d~8fz8tZtd2BORK*1I6e20Y-r7D^O{#KpGIaHwr6Mub#F?( zi#F%#1ab?h#^z&;S#5GsQc_;r`K_*l8%zElpxq=ts-7=3S=(#6Dcn_`rC)B_V#8_R#!N#_2Srk`SKy9CCN=Uq!f;FZCaY2t&^@rYtF8g|C>Ec zN_+XzW`56RUgr;w>T0Jmg6xg)agxaITy0z|x_!TP*g$z|5M0dj(4@T+Y!&wf2XEp! z<{<>*XXIGu5#I z7uFa?GH>-rq*;S%oLz_Kwz?+SAB|6p8q$!ORdi}Hv5AVxvIl>I<4M~w!82M+!xXp+ zKE2*YPxVcozj(22k*5nPwb%B@)QI0V1i}O6M9-WUJ}&Y|F@;nh4X`9>Beqvkfli^R z$2|TLp!NK(1+$gGSeF;&$P1X71e+DmTP0|Jou0nF+soE_B2Uhf9yC>GUe~H=(W61L zUuD{)wmvqfh9;r3)HTV8b&}eE($YVl#gI7Ax+WZ+?QF{#M#T95ZLkT>UQFV(OmbDM zrOlzsqLcwcCTm>VSFL4p_AilJ>)31X5U-gn2CL35yz2YZ9a6sH^f`;`xw(qW9TZQn z3@D#6WF7(AGR0d*ycdM&4S+B*M7dC(-Ha9-aBF0j8(Z7<#D8`=eK;ab7G&w-#r^Bg z)G9!3wT?R`Uoq=(BcaigrUbT)g&?T~y9Q`!xv2>-;+#$m%}r9~`W>RVYQ(IiDbQu- zXOvBS$QJ3_sBb}W_76|sv7@`soja#s6W?okKbiAGFKoQzDg`JtB-8QXbWADyj#IvT zW}au89k;}~hSLss+AAm{fjM#xI}O@2eY|t^-}NnW zg6(Rnxmb#xY87uFH}%*uhcz7kZ^b-nzr>cNT~(H!deQgv1*ik-J0tYJ;Ui1j;siA z13MIbPwq3)>$`DNsf*h&gEfbJ)B9x4dDZ2^oroDxIj`2m4*`Sk>oGCZyui?SnD#d{ zapZejWF6J-Bn2?fj<9-s^u}Iv*NN6X)Ab7s>uQDobDvY4I%!ge5M6+dRg0aSH{-G| zNvCa7wu@$n7OAjhj#M<`EzdZAZxl$JR+wXFq3E^mHBq8%w{E-9DoRe1$3*vSzb8^_ zNo%8?-^$9a!R@03*hB`=eUW9b!OYeYT1J~9FS{$N)^W;6PRgf0pY85K4V6HHhg7iA za)!a_;t*TiXeGbAk6nKH**CCm5BtWDO&rkk)S_K^Gs&s2@uSgk042n8%8Y(wes>Pjd$F+F zhuT4C6o%wy zV@k?Mzr;Xio?2K|?p0sk<4X$x&^MLd{I$1kF~(9+T4lKLbvY(y1eN+^+Qw+bo_gbR za3xY1<EQtNt&o%j|V)Aaf4GqD3eBN?GFLwKp%$n9W(Vnt>TK9iO^bP zp&E^Tn?HU2yeEZ7oZck$b`B)U;tq?>@wu0l$%7>3=uNd)A*t#u*F=Qt$20GtrKLcx zRoWmv;@rzCvKDv%jZs>5hr&w#>I3L0RHn=B)4SM9=wrS;A%QsqA@z1X=xJjq%KA*B zCni)|8w&-VTegvhH6{7 zINvtx@3*;~ePsB$=fmYDwd`HLv?I0|_Ymv_11YbOsJG0*B+6c}4a)TZ{##H%dKu8_+I<75oeBQz>`_Iv*+kUyrw4eOC&kj-iM%&^enIIoPdKk5TEXbn3;tyW+ z4f;m3-YT{l;@M8r(5nT15Wu=xNX8rs-_X{!=Heh_A@T z4$_0I0%;+*Is(B1jw>ej>f`$GguFOa^e<T z4%!^rruq5#g#onhJ}EX?XWcBpJYwS^z`BO7=+PIW4)4Jc;T`v`>T2C@L)`sk#YovD zOZ-62F^AmL=}{SK&GObdPkqy~V_u)i=;cP7Vn zL-XT$&b}bVIIfvVQS$@7~Rj@7p8VOQUYg!?R9%e+wxqMF4A_dyuiIrT(6eqRpUz^@~uy{kG!5 z9D>H@eOgM^efRx0b!xPOxD|dgT>5N}xMddKM@Pn?p5qt4)$nf;`JbO_vw?iRhAW74 z2FH$+RWMU{YWrh&f9_Og(n!V7Loy3Ag8I~H zIQ|DU|M%^BshRz*llk|da)-8vE1yPEv(GfR39=KBIG8*Acll|O*5Om<&UK(X5eC)M zuvBo!hFa4)9E2Rt>VUV5@qqSdb^l(M?MTSQ8|0IdQ(Aa$yRzz=EQHk^h*gp|Uo{va z_=OF|J=P&Y64WOwY+6=U*5ip0`5o#n9pUe~gYx!Hc3FRgvh&0!iA`_psO*XDn+fP+ z)!Q9sC-$;^`6c7^>uz}T(jU}~h%_lhMW?l@ff1;HKp~Rm@m@EGiA$jWe6UWjys~PL zaGYFkk@E$<<=9uNX>a{)hva-l6?uT{@j){yk zXgm@pejmgq)%IczOO%f)&=HH_gi$4=MAK_@{v#utckNV z`^EG96d_0*$q49JsitQN^@5mzSw+A5CXkqzuHdfvu&5jV#yO6_G_+w%;GVxeH|NWC zFmA2TP;;l&MnWeozTyKoeR5(4uD^NH*#ElW2oMe*8kwcc%=JNmfzfVW z#*$czZH_{@|9!|2%ROa&qF^vlUk?=QmpCaEpdpO71Bywm9X7(J-^j^n2cpWD=dnU3N>ij74T^=LxgJTUSR1Gh#Y*Gcdq=b3Et+S}h3L}! z8Buu}5{x~(0KEw^ChpA^5+wn4BhjC(Z`h!3W=%AbDAA@B51~UO>aBYVzd`eE8>zPw z4~WU2^Dn(;xIr_x5`NiY)oE^c^ZW6%IlV34rnLbcP%#u;hPc!93uEvl`ryek-jWBU zxq*J7TQqib6#qtkch4+KXbVX65KE3ed7!f-);TxhubOpw$y)c;-A3I+NU?+_9kV!7 z3m2ED+q-U4icnCFAc!ty#@*LfDayjgO8+XK5T6y!l4<%YfM<^w`l^B>q!IannIk>= zVN>;qcEV>PPeRiG4}h~iNnN9V?_RX-R2M1T_~hDUdQwbx=sRxo~I`}QYb7acuxSg@_4Sp z-zVVRbkt21Z(X!#8_4bFkwc5blhx6fnX6_go%pOkd*q(u2=R2B>;yUL(Wg%{`p~Zn zBx+D+ji3xvbJ~sSWaZ?(u|B#38&b*MLewKf$7%xxw8q+}i`>@jV$`nC#A0qJ5J}lRZ)xnsNH`YOM$u7;{f_W z1~=woue2514Y;ZkqEC^@km~eO@dXT*jGDIFe#`N#js=F?mY1*`wg;8$%picAZ~Tf# zM}(vmmw|z>_X1(-*zwTS^~Xl8o>i;|-lY4cBp9B$nbFf)4>pr)c_qpqTs-a&lyb9e zP*;l-1_Bp+RiON4cL0rd;>mOzpi77xMTn)8SYWfm;Oy*twV5 z(f{3M*X4^Buid&O4<-VWX>x9Dug(7(#@Ph2d$w}#U^4~^kC%*)=MLTVuz-EW;)I`z zo5Q%*1Ce^7twZLs23~!l!(`9mh^fbVJSw!AIUwxW#kvrM=Dm9doqo$hpRG`vA<*KW(ZpYa-*ONqmgf{8uA_DQKhTu)u)% zmx~^Kvj`Fr&tJ2LT;t87JR>aDq|3Q~UuZB{sH`^R%~16ky93%-P>2G6BT_B-lGpEO zVV{!1d^*^UR?c^G>$@}8bN6@YaIfu3TQ{%mAFR=O+R2EB?sWeZS#o9&trR4Y!xGgi zQWwvitCCvvuDN8yA;^}sEB2qVRV0&I#`IE|W%g35k61d}(0pgV|Ndr&!PeQ+>%qV) zH8nnQcgROuCOdmEY-IyLf*_;F1;&C;31u1G{h^$5ln$G3PfCxzIKk%F4o9QdbLSq1 zUh3;sRrWT`BiC6C1BA3kSATwsH90RDOK6#B47SVhtVS$?1sa&Wew)@-_-6}0*T$j# zj?EI1Tq;Khe*VOvj2G8sO0J$d%%yT$Zy)@v`r1{1G23n;`8eyA_9LQ znMiy7{PO3Hlf!%0uX@kcMs8mJX|}U%P6<5@5unUo`E7_}?B6LWo#u2&^SIUjy1?`) zFJ&aRyx6v~_9q>kEM_gTFgG`kC?}?M0;rdj=(za5z-M~*@2965iG~K61-9OD6wn{i z(fAyl_C-RMGBPC~)o9x}<075ojs9)IvQ_4Qm6tN^h*6gfz5sad_aG)LXmi|@?IQG< zfojj-ws!(=#ocM&zWt^`nuyzn9Om@qaY+I&aYV16x4|6@i7u;4{E`*f`KF}w4wVk^ zvfs6@BJ1n{kIhkmY<%OVOyessj^01NZX?=*Ufz7wN8rPB@5qSu zkYO8nmalxuCtF%sb?@D~i8%BCjiqaOd9&iK=)k1&ohRoPDbwvLvH(W{>Ziw1?ct2} z043J_DfLBhf1Q)jp680Z*&<^NFn2{rjr!w z)Noj}P1(idQU$OcABGWJTv@6rCXjl@8uy6GnQ}E59^D>@Z%o3}U~oCmfCsemMC6a_ z(Uw6pTd7Lk(L!|%JFkH>%*K_QJ2epo>y9{xaSmv)z}qM)YN&X4dfp;Q**R=K7Y(&d zCbIEfHfEMhIOZREXTx6yFfycuLdy{`pk<)}XvfxV+XiJG+E#HL_4df96;k64W`2Jg zBfaA&I{qJ`RcI$>|L#IR)NXK=?(ofkS*SxcBYB2|6$$>m!@8}pycvIN)S96o13KNjd)BBr~p^5UQOe7M;i5Rk?1S|8q~Vn8vTp#JP?QnwmD#*4BPgQnL7SL7RttvjcoRY;Tvo{>O)cHz$ZFon)@y(A@HGJ1Zw} z%UdKcA?B6RxmlA!oI2TyNXS?8af+q-UkEnuE~PhUdFsFaME-!u)OwZKEO@;7N<8v2 z0ube?c)gT)krA;6W7aF?PT~xvX*Z_Ki_Bul(xp-B72D?4FQTY7n_QbEocp>hy+y*Z zf!7_9mUmdmYd2L^==>gAJE0Hes85TvgED) ztApADjd=K}^)t9!ZJAyT7jWXx?)pm|@E@HaZ`Hs4t!Oq5l5tcH-RHk;*1vbeyx}VH zfdf&vw?q$Xmfc=exVKGc%3<5(^gv<@dIx+RmhwDqhS`$W&C}; z0dnGi(F~9G86XT8pf5%jBNiuJ2^7+6jr+?owxd9T9+h)0bZ+EPxwgnvBcBgsG zkNsQz@iK`~yG0C%`x}f9#-g)uaCdJ_P7gG411T{Pek_F(BhPcQIXjhw7WLxNd6w4JX#mow ztUl$%DT~}n`2QWzrGJW$K`Fk$-@he>?=7cJo$7;IjGzoHUY>q~4)4IIHF`9n$S5l- zi$hLEMhrpSosA^!dji?L5aqyKD84!i7@3C1RM%N97JO==S2SS6_n+ciT`Z7JrCv9@Kt4@*_S{#H;d0f*K+W) zGy8x1S#6>8-93Ba?>~okZYSw0iU~IvQPt$%R9>f#mR1@kAjeX3RL=qp0XvyMJ}_Z>MyzjjG(e zHIq_z(z}S7mc}JEYBLIS9go>Mv=QAKpLA}wxc1otLm;7!^jo(1STrdKWONvbp(Op~ z9Yo~@prEZiwD3CpX`QD`$(&p(+K$P9$7+kyp=&*;;YB1JP23lHD?vW;OD0Z>XR6)D zev3s;Is(2h+vOfv+#7OF>ekUc_NZ~WWNj8*g7AMXzyFa>|KRdUcle#yITu>-+7^1X ziK)KvTFz47@rzB=_N1CC`x!N|)Iao3AF**Oz6Y2(X;z)BO7cn%w+NcI42{d%ORj2@ z_K@bbshMeT|IVEWZef59I-Z3tdjAvUu&mH{B6M3Gd!EQzbS(Zc>U!gqx`r=Z1pr_GC^?q5D(d^!qE-)EM z&@d`7osZ}{Cx3h;Dgwz}DUBRV242jhlxjUq>;zEbh)N$3ajmO(X<#M{W<| z)^#5IPtzkuj|z7HL5PHegrv=b=$HJqw6=%|v8}CNfRd%WDDLq~7EOWyChBmEV^}yu zjCGWK8{${vmT+vkTY{GLt9%_> zKRvO7GHj)=j-?YQP#p_IJK`<2(;JZP|9IhX--V2USS6|PFPKQCM@ z;(mE!8!JY99em`{cXn-Q;TmElX8Gq#?DqE)Lw8smD?7YF?W|Goa%<1kuI@;Ge_&r# z@U;8zIiq{ZMI`>Nl}R5X`(T%_6a=fjbn)UROoc2s|Fb-BVT+}AKa|-*6KQCB!2T`q z@{Ws%aa*UGMJQpLO$_|Nv_)jLiqXZC(4PnRR_}o0KSk+J2h#M)#}~H9_LO88$45_FLtVbT(v*$Q+t%);2N_S9k2djIQym;#TeQIWk zt)0W(h|1PR9rL7fK{S^kA2AKY4&Y$l>ivsw82RX}2-J~Bd0zZ#H-CPiV>~c%g>~YO z*0xjd@1ZShF8~K~5@sxX6m(amogYiuD5~J|=V)kbQ!CRmup6DCZQ;}Jy+3l;p3a>a zXomyU#SmOhqr`dcrCBgo3(H^dJcAD$&@{!y-R-mo*lu9rzSW5un9+%Rq4cu5)7w9C zMzRkUi@vY&CwU!<(djaGbf3D-)wMRHZRb`S3bQ)ZPAh(uVvI)LMo&D&h0j;Xnq{3c zR%%F38#UtcG>4&eK<2(Ts~mUh?PBB>B8`JRUkL}QA#I(rkSMvS=HuRKuwvOV;nJ&K zM1&9{P8o-swE__ zld$xz@M&V^q8P0VoLXH~1=(NaGm%wt7%B@)Ql1Ld0Moc1l61bp<(R6WQ zowK44DCHlK8Ael-f${>*Q8D;|4(wTo;sM2-*V+k!!SzBU?GJr$o)q)ugp>2-pLdRT-W=S> zUgL`s8%yf2jimu-mZMTMf3mO&&DJmd;9VlrAasQuj7>g_R z8aO}Q^VQR*RtR`!;C-6aDAfG?YeUJ(M0$3egZy4ntHk`_2@#IBwtugvFh`L~Or4*< z_QR3d!y|Kt8v=tpMC2pfK>N+T_~7)KZ98_{t*il-F7{68v45ZX%S&!q<6GCyOp$Im zLBn`sZJC54Jkk0Sn>2Y@!FKg1Tv^3b5X`~(TX4)Pl6x`0Ta4onv($)uo_~IMVig-h zw*UEI*cLY7DWry0DW_v%Vn$WHkz*N55d$YlIU~US#sTJtStnqwr+~nt$h|)E0QX$^ z$)f#2qV7!(rf^~e6THhB7ibFXlo;=P*wc_PGrAyzX)jCtF$zP)Hf;h* z8wc+ytCb1R-t5p>%=vv`GgHX2Of#Lm@ z>c*RVSs(Es#;T{x%=QsQOJ|IT4=hn)@t8dzJqX^jr#onsM~aT5sa5H5pGPrd!u);P zN?&WX+bNyfaOAQQK%k=KsMSYPI3q6qcuU;2SkeE&z_2>8>j0iViOH}CWQy}zZz(9~ z?O*lls_*NJz1m-oH;n%Y?1y(yQ|0|GGmx3Q zIfG@zi4YVN^ryX*{ABl320+Wo%6?t@(((O=xj7@bZoV`-h?gubUZnb(q~h9lut);8 zY2p_4r0|Xl6fTE%@j7>Yiamh8(va~p^2VG&VRT_)x02fdj`c6%m$Qxp0LmtY@Ph@h z=92OqFyTXFR#mn#t^8s;tgeay;LAw>E+r>NvlD0K_k|wsmi3-Bb6vZfvsVnHSW#3z zj&OK1^wF)`w_nYE**j#_nA4|EuPABjw}t6d`Y!02!RCaMp2z4ZZiy2qlSEl`uP7NCpUOWwrcD`fs*XlHli1Q6P4osz+I!bsl&Ac$0=}%k2Mg$Fqlc1?JKx$K>f~q=3Q)5Nurx zMnW+yPFIGqm5S`vu*@Cs#R-DMU`zyDm(uTO7$WPLpo9yY9LS-qY z7!a_#Rp|juKPF9eH=V&zZ_Y8*WO4h33)pqXvGeBXIY+PE=#9n@BzB)VcX$f>TkEZ= zc#f4NNGvnc(*wxbNwTKpyp(zKrlc_BMy0#SPDfB6914mNbq@ z74$Nj{VGjqqN&}Zeq2^b(BZ>}!(#`0q|DcNp)Bp9ohYm3c=zA08m^xoLR?b1MhT0lz*dyK^<8&%`gO8es^= z^Er+4>w)`&6xtX~N9#PDZ%#~B*4)rAtmc|rf~9u!^svnoC*kB&eKj;Tab=g6u2`|J z;r17ll-TCH9IeDuHu0)ouhFr9AJ>4Q#gY`z~HhdsSdUNgX9Gw5tZ_YJK zmRmI(Q{Dr2P%vs=52=5HHy4+!EzGXAQEA<&ou6h~IJA#Tjf{-Asp6mgwv6@hxkBIl zoSc_N&FmEh*8dLI(h%~S8Tg1+3CbapKn!5DI_=Lf?EHhJ{&B%A0@ujz={+@G^t`rq ztNZZ5@054#tlHeOXX@MZ?L$(B3?6*XY85qK(B;dk_t-R#m|b)~`C4~Ulo)UqjhFtv zf2Q}`9VryDFI$G1w0XVcpPRZWO5a*e81HjqkKzQMA;q00{hh5QsW8K?;li%1V?12L zU;hW+JaB7Zi?DEgv5@%+T`;iWnULq(H-ss^+vM-#|Nr`%nj4S#)>?_KAc7M`U6fg{ zRus2D*tfE{&5~Ce^%o11%3vtP9v&w%Euziw!RLRp!Q3dhloFr%%Zwc?GF0=^b4RdT zC-R?_y>AC8fFZKZGBY)*dNc{z9QP6XcT!h~%I$2C)n+rHMEOpYv^ZV5kq zK?u<->?g~DSry}lkOncXpU;|hMmh$x=}rn{N3Ds&Q0Cz8)ek&5SkCxGLN)3N&E!?sEa?mRnE&s zlg>L@1P&q%9&-Ws%4Mg@h{3OauC2WMT8YACT&1I*N{H9TgFf#E{cfwGvJlj^;CaVy z{y7*gEZVgF8~dy?>9}X=BBl~`q0|`m9J%a%R1DUW;#(y2V;(j1Lx$opQF&N}{MI## zNqccsLkaqI*mJei>3ciNH2u(%>IFK^oPHd<>V{%rEj+6csysl#u5L=EJ`Mh|f(cOj zmbkCGqPnJHx@YODKdGNn>07pD(2jZ(9MP|@XYY4lxH30o5;2?6%a&)x_*UKM_BGE= z%@DOlfO>A^^s#t6Jw_oQev*StO!*e`megIS&E}ys;zMif)7XhVVJ5-L8@JQUN}O;QqrA?X zTUj2pVl7lpoWxM;Z!6m*I8trgM(5~HJ-{Hr&K%>F2C`-N2oLu4cW1=T9@f?uA2Mdf zu(_4jwH|-{#Bdxq?&8`IM`y9a4<7=|+tO0^+pC!ZlDq7tu36tXXO2GNTCzHq0>^}u z6T)G;cX!WN1@7SwvuThIMmqKQcx#z)ivS@vs|zv6Z4i~_J16ovvQ#TpcmL*p;&xM` zYn!`1+a?2OE zM^=v$z>9{d$QTbFo+GxDFbJP zcsxC3$EFOW=)CUwhgpVE0G@&2Wcvv_o0;7dGtBS{a-}tv$?4h&29@<8{nolhYW8D) zP6^~NrVt6vS~kKk0fpSD^xyIwlJM>Jh)Y^ODq#LGLxy>wo}csBMowWbt_7-dlqZk# z35)%WG1YQ74O1(-b{=L^rwVs?bx~MW?~2UaRZ6Z4Iv2XtyE!VtmwL&AY;Eue_>c z!2XpzYo|ZMfAr9urxS;MF!l-4#ya{Tu`h{kX+WAoGjB5d+<$HXlaY>-HJGv_<2l3K z$oK+dr_Q{v>9=m?c7mjsj0t`KigrN&OYqho_2=xY8oxB*Xs)E{$OY$cqH8!!4$7Dc zR3(Bt88wO1>EX#uIz-_gSAH?Hiu&ig*QA=TaItNvwujWI4zxE48GMsBp-z7~cfD~S zrT0k|$B29nzPnx^SRgbaA=O#-A$?4K&(!I&K1}*(nggFMa&r3Qtf4Jl@W%=$35`5nmEYu}$XyT-%p=ybu)ap{0>r9g?Q?dvpMsD1~ZGE6N?Yd&kgJ3%o| z$Nkk@;Y>)x{(gjadB#?9;iD{Dy7VCh_djPW6>Robjdv-0`}Q~usJ66AP8Y`*_VzeR zjT=AS73+C13x$i1f{nnjY|%0V2w&R0)@;R!?z^miw9SF88PtBgO~K6iC61Co7xT_g z|EI>gZW?`=tV82lkeI?P1~bgM%TDMEllPcdVrunmB?Uj#`4loShAnNVC^W3ILgSNd zv4v~eM~ViY#K0C|0MChIA-jzWj}uo+ueLftR4XMjJGG7%SF6Qx#_#& z>%^OeX2PC~w)WGMeRRq7Wsn~`m;E&88j_xKi{bc$ghqJh}%!vx@gLOdeA>O9E z*Ab7=vqEDtFO@AB{q2~?TZebuZWV)RhyjlbIa6ae6O^0@hg{Z$R>U0P;VGUPC9beV z&B`(o7a@=r?5xE50*`CrUp)1os7eiLd8%TLt!W_iWKU?>cgn4ViH4Y85FXp-2C+Qg z<{#x-Wd8PJ!Ry5kI%Im{sr2ukSGRHmx7O62E zu*tU0_@mtFdfshaZju}~32p2rwLWfJf8h7BXy1;VJBRKD?Y8^yM|L}t>VDj_>*;a( zf==D9{HyAwG>MwC5O*ZI`KcFi<_T0sL@Lu9+L0=rxy36+_vfB1K>%Goys%;b4XaFb zG6|%p-e0OWflBw=ee-`eTDDC>cTLT7gI3L(pS5P@ zhrXDS7alv#jtpq7_8PhEfv``frmF%jh_NcU%fz46&Hn*(TU2|Z*^`-OX4`?GB0#iF z1BJj5Xg2A=k8SS5*Ax2=-ywIu>)HQz`9!iMk)Fi-c>nI*IUp_583d*)rYo~l49QF&^Ef&c@`bJ3-&6!9MhQ&+y5{DM-nf{G zJed8h?(}kHU1?oyX~=W7wl#^e8PPfO!aacJdq2WNH}Kk789y(-{xgLR#lUd3ZXf8W z{A$9#n7{`0uva~$`s3lBpGQsmAhKFP{UGgI7;@F8I#CQR^!Vo|#%hO7wIK2MXaFa9 zP~&B@&~suGDlm5(;F&-wJMI_v0E|>1p7}+HkhhRojQxgfAQn6O$+Gt_|8BOwiV++gg?oFo(Y2C zW)eb&FK#z)xLos-5$TG0(bM_IEWpSqcs+sUVs>+|3AD)TnYb}9I7MQZDV!+aek5mv z)}TStDFJ2xYoHemSL{2>B=b;-GQ|Qqi$d2hpX%??mS;}j$A5w-{E1|AKgS`Zy(h$4 z{p??;eRWfswsJrScKq>_&HMT+>F$wV!!aR@nW=n#>5{~4cTEU1G*!`os zt*O`pzu|Iv&I|CM6tv7aJnUiM-@V%Dmp zp9va6hCHI{&^Xl-@X_TTXXlRZV{Y8|YWvPQkdM^$@FL=zKX!IABcGhMh}rjzsU;tu z=Rx1L*m2A8tp96hsqs*UgHi^E+GFPD>_eP>YJQ@#aHXRv5w{w?`j=%h(r;D4=LkNcQeq(*YhE|g{3EUQ27?glB zlUan+XXdFsUL2KCtSeyN3*@HLBJZn`w&yZ2Yc^+h5iq4#C+=&@m0_)}Ow&Ijd&`Bj ziq2zSc&v_uo9scBG&N8Rk>{!l`gp)f(KeCm{LYi>6GPhJe)^kk z_xHEp>@AHk6c4Or{R8`&KCs1)($}vi)&3}YG(q6`h>^lXf&T5?Ey247hrqzL4uIG(vtp+tNdB|I-XmY`TDh8;5o__@jz#N8E~{%HnF===W!Y}W)n>u~?{Mof!N6dC0 z%g1v^634OmBpRuul)wJwP z4jIgA-xlFyY~Wjp8~j|=^Q$@RU<0~rR>!5`E;8JVN4JSw{u5NacnJ)3I)gG^%06}S z$uUS3y$28?^*GpKexUyIR!$s`m zOd9qai`xqiMd+Y>QzeQPt}ifnwLw-MA3I>=fu- z_5R^{)*tn;m{vCIw-vp5hYQtR@T>yMaSkb@p(T{DuV#Uq%E9i+m9(kZz%T7pA6fk& zNI2?+3?>}_IdV!3>&&gs99$n^R(yx@J>>R-g=qLXkpzC|Zf0lBKRfs5q2+mS5|Qlr zz{I?wyM2DUlU;M%KUw&mBPI}(BzG8RagRQ3DQO6=o-Pbig9IVJ*`HNT))Tll>DBQR zA<3Y%D=?gj^^kJRi#SRVF~s|2yx7lOC^6X0K}Wt+)99J^;$Yr0@ffvN`IpgO^xM5^E=gqxJsRGfy*q3-!<%wp%bP4N=di3^oUpX| zyOXI^MjY$r(Ea&SW3omaJNC6t`cuzDi>Mjna9vKOC1hmf<#XxPA;RSsaCO+-jvrIF z0?i!<#PB`)-cQ|%Osy{eekLqxntP_AHXs}iId*@_sN1)1Yr&d2|Di%S?Dps7(FFVq zBEn;nT9zL^IhSKO@18;budADZ|1YS=|DOnGh`hR=X%5#%i5AC|vRux4Ek;iB@pf0v z<}C7Z_ZtI=f6iz9i6NgTEM`3yxmoZTyA1TG0 zBQ}tnn#1aW@2@vAGE7N?1H(CyG{KwTV}{+uOt^?{&a>71 zJ$w!{90SO@n;sf6f6D2XOU??ah?$&w_9jiggE@%DQQ>OLOy@qP-ndkgWw@L@#(k_# z-4!^4YNp(?>nI;f%Jlm)dr`y@Nivgm3}%t^bZBYB-qsgdssEwoVc2swVx6E20VSe+ z>uRH?9bYdvu}|oSa&cKyH}x(~%CF#Ne2gw6X@hHz5uJfJnFF)@Ac>8p23^LvBoJ^? za2)gKshbKGpOL#6>jp7{7ckg{wJQo=W@4W}5=9y@k$Wb}dl7fJUj>K1YMbIB_kgvC z?ffYqo6p=7p^Ra?qu5ho)*+8l1ZPa#pwi^oj`}+s+-T;Nq>{Vfz&ecfAwJ#vxcAXQ z&do<0tII~}(t1l!pEx7WB(1+qvgWX1d9Hr|9i}7vTDJ5|?d1|u&R7zJCzGxEntm&<<`I7T0KI^XlkDx!@*ZJV`p0J|@iLZ3(%TdhqDL?f z-}f@GD3Ymqu8UVKyTC!)XW+mGD*ZYuE6@34BMuTX{d&Q}lY@W732U%r_ohjN0rW#q zr=$a5Q*DU~i4;FdtWz8vs?Ur4EkYFQj%Bn5B;W)6V(8#LL)7O2nJ?^Br|tp}DekIJ zdU4cbkZ$nE^4SQdiF threshold).astype(int) - return 1.0 * np.sum(y_pred_idx == label) / label.shape[0] diff --git a/PaddleRec/text_matching_on_quora/models/__init__.py b/PaddleRec/text_matching_on_quora/models/__init__.py deleted file mode 100755 index a52665d4..00000000 --- a/PaddleRec/text_matching_on_quora/models/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .cdssm import cdssmNet -from .dec_att import DecAttNet -from .sse import SSENet -from .infer_sent import InferSentNet diff --git a/PaddleRec/text_matching_on_quora/models/cdssm.py b/PaddleRec/text_matching_on_quora/models/cdssm.py deleted file mode 100755 index 334cfebb..00000000 --- a/PaddleRec/text_matching_on_quora/models/cdssm.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid - - -class cdssmNet(): - """cdssm net""" - - def __init__(self, config): - self._config = config - - def __call__(self, seq1, seq2, label): - return self.body(seq1, seq2, label, self._config) - - def body(self, seq1, seq2, label, config): - """Body function""" - - def conv_model(seq): - embed = fluid.layers.embedding( - input=seq, - size=[config.dict_dim, config.emb_dim], - param_attr='emb.w') - conv = fluid.layers.sequence_conv( - embed, - num_filters=config.kernel_count, - filter_size=config.kernel_size, - filter_stride=1, - padding=True, # TODO: what is padding - bias_attr=False, - param_attr='conv1d.w', - act='relu') - #print paddle.parameters.get('conv1d.w').shape - - conv = fluid.layers.dropout(conv, dropout_prob=config.droprate_conv) - pool = fluid.layers.sequence_pool(conv, pool_type="max") - fc = fluid.layers.fc(pool, - size=config.fc_dim, - param_attr='fc1.w', - bias_attr='fc1.b', - act='relu') - return fc - - def MLP(vec): - for dim in config.mlp_hid_dim: - vec = fluid.layers.fc(vec, size=dim, act='relu') - vec = fluid.layers.dropout(vec, dropout_prob=config.droprate_fc) - return vec - - seq1_fc = conv_model(seq1) - seq2_fc = conv_model(seq2) - concated_seq = fluid.layers.concat(input=[seq1_fc, seq2_fc], axis=1) - mlp_res = MLP(concated_seq) - prediction = fluid.layers.fc(mlp_res, - size=config.class_dim, - act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=loss) - acc = fluid.layers.accuracy(input=prediction, label=label) - return avg_cost, acc, prediction diff --git a/PaddleRec/text_matching_on_quora/models/dec_att.py b/PaddleRec/text_matching_on_quora/models/dec_att.py deleted file mode 100755 index 4c3fecbe..00000000 --- a/PaddleRec/text_matching_on_quora/models/dec_att.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid - - -class DecAttNet(): - """decompose attention net""" - - def __init__(self, config): - self._config = config - self.initializer = fluid.initializer.Xavier(uniform=False) - - def __call__(self, seq1, seq2, mask1, mask2, label): - return self.body(seq1, seq2, mask1, mask2, label) - - def body(self, seq1, seq2, mask1, mask2, label): - """Body function""" - transformed_q1 = self.transformation(seq1) - transformed_q2 = self.transformation(seq2) - masked_q1 = self.apply_mask(transformed_q1, mask1) - masked_q2 = self.apply_mask(transformed_q2, mask2) - alpha, beta = self.attend(masked_q1, masked_q2) - if self._config.share_wight_btw_seq: - seq1_compare = self.compare(masked_q1, beta, param_prefix='compare') - seq2_compare = self.compare( - masked_q2, alpha, param_prefix='compare') - else: - seq1_compare = self.compare( - masked_q1, beta, param_prefix='compare_1') - seq2_compare = self.compare( - masked_q2, alpha, param_prefix='compare_2') - aggregate_res = self.aggregate(seq1_compare, seq2_compare) - prediction = fluid.layers.fc(aggregate_res, - size=self._config.class_dim, - act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=loss) - acc = fluid.layers.accuracy(input=prediction, label=label) - return avg_cost, acc, prediction - - def apply_mask(self, seq, mask): - """ - apply mask on seq - Input: seq in shape [batch_size, seq_len, embedding_size] - Input: mask in shape [batch_size, seq_len] - Output: masked seq in shape [batch_size, seq_len, embedding_size] - """ - return fluid.layers.elementwise_mul(x=seq, y=mask, axis=0) - - def feed_forward_2d(self, vec, param_prefix): - """ - Input: vec in shape [batch_size, seq_len, vec_dim] - Output: fc2 in shape [batch_size, seq_len, num_units[1]] - """ - fc1 = fluid.layers.fc(vec, - size=self._config.num_units[0], - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name=param_prefix + '_fc1.w', - initializer=self.initializer), - bias_attr=param_prefix + '_fc1.b', - act='relu') - fc1 = fluid.layers.dropout(fc1, dropout_prob=self._config.droprate) - fc2 = fluid.layers.fc(fc1, - size=self._config.num_units[1], - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name=param_prefix + '_fc2.w', - initializer=self.initializer), - bias_attr=param_prefix + '_fc2.b', - act='relu') - fc2 = fluid.layers.dropout(fc2, dropout_prob=self._config.droprate) - return fc2 - - def feed_forward(self, vec, param_prefix): - """ - Input: vec in shape [batch_size, vec_dim] - Output: fc2 in shape [batch_size, num_units[1]] - """ - fc1 = fluid.layers.fc(vec, - size=self._config.num_units[0], - num_flatten_dims=1, - param_attr=fluid.ParamAttr( - name=param_prefix + '_fc1.w', - initializer=self.initializer), - bias_attr=param_prefix + '_fc1.b', - act='relu') - fc1 = fluid.layers.dropout(fc1, dropout_prob=self._config.droprate) - fc2 = fluid.layers.fc(fc1, - size=self._config.num_units[1], - num_flatten_dims=1, - param_attr=fluid.ParamAttr( - name=param_prefix + '_fc2.w', - initializer=self.initializer), - bias_attr=param_prefix + '_fc2.b', - act='relu') - fc2 = fluid.layers.dropout(fc2, dropout_prob=self._config.droprate) - return fc2 - - def transformation(self, seq): - embed = fluid.layers.embedding( - input=seq, - size=[self._config.dict_dim, self._config.emb_dim], - param_attr=fluid.ParamAttr( - name='emb.w', trainable=self._config.word_embedding_trainable)) - if self._config.proj_emb_dim is not None: - return fluid.layers.fc(embed, - size=self._config.proj_emb_dim, - num_flatten_dims=2, - param_attr=fluid.ParamAttr( - name='project' + '_fc1.w', - initializer=self.initializer), - bias_attr=False, - act=None) - return embed - - def attend(self, seq1, seq2): - """ - Input: seq1, shape [batch_size, seq_len1, embed_size] - Input: seq2, shape [batch_size, seq_len2, embed_size] - Output: alpha, shape [batch_size, seq_len1, embed_size] - Output: beta, shape [batch_size, seq_len2, embed_size] - """ - if self._config.share_wight_btw_seq: - seq1 = self.feed_forward_2d(seq1, param_prefix="attend") - seq2 = self.feed_forward_2d(seq2, param_prefix="attend") - else: - seq1 = self.feed_forward_2d(seq1, param_prefix="attend_1") - seq2 = self.feed_forward_2d(seq2, param_prefix="attend_2") - attention_weight = fluid.layers.matmul(seq1, seq2, transpose_y=True) - normalized_attention_weight = fluid.layers.softmax(attention_weight) - beta = fluid.layers.matmul(normalized_attention_weight, seq2) - attention_weight_t = fluid.layers.transpose( - attention_weight, perm=[0, 2, 1]) - normalized_attention_weight_t = fluid.layers.softmax(attention_weight_t) - alpha = fluid.layers.matmul(normalized_attention_weight_t, seq1) - return alpha, beta - - def compare(self, seq, soft_alignment, param_prefix): - concat_seq = fluid.layers.concat(input=[seq, soft_alignment], axis=2) - return self.feed_forward_2d(concat_seq, param_prefix="compare") - - def aggregate(self, vec1, vec2): - vec1 = fluid.layers.reduce_sum(vec1, dim=1) - vec2 = fluid.layers.reduce_sum(vec2, dim=1) - concat_vec = fluid.layers.concat(input=[vec1, vec2], axis=1) - return self.feed_forward(concat_vec, param_prefix='aggregate') diff --git a/PaddleRec/text_matching_on_quora/models/infer_sent.py b/PaddleRec/text_matching_on_quora/models/infer_sent.py deleted file mode 100644 index 67de901b..00000000 --- a/PaddleRec/text_matching_on_quora/models/infer_sent.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid -from .my_layers import bi_lstm_layer -from .match_layers import ElementwiseMatching - - -class InferSentNet(): - """ - Base on the paper: Supervised Learning of Universal Sentence Representations from Natural Language Inference Data: - https://arxiv.org/abs/1705.02364 - """ - - def __init__(self, config): - self._config = config - - def __call__(self, seq1, seq2, label): - return self.body(seq1, seq2, label, self._config) - - def body(self, seq1, seq2, label, config): - """Body function""" - - seq1_rnn = self.encoder(seq1) - seq2_rnn = self.encoder(seq2) - seq_match = ElementwiseMatching(seq1_rnn, seq2_rnn) - - mlp_res = self.MLP(seq_match) - prediction = fluid.layers.fc(mlp_res, - size=self._config.class_dim, - act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=loss) - acc = fluid.layers.accuracy(input=prediction, label=label) - return avg_cost, acc, prediction - - def encoder(self, seq): - """encoder""" - - embed = fluid.layers.embedding( - input=seq, - size=[self._config.dict_dim, self._config.emb_dim], - param_attr=fluid.ParamAttr( - name='emb.w', trainable=self._config.word_embedding_trainable)) - - bi_lstm_h = bi_lstm_layer( - embed, rnn_hid_dim=self._config.rnn_hid_dim, name='encoder') - - bi_lstm_h = fluid.layers.dropout( - bi_lstm_h, dropout_prob=self._config.droprate_lstm) - pool = fluid.layers.sequence_pool(input=bi_lstm_h, pool_type='max') - return pool - - def MLP(self, vec): - if self._config.mlp_non_linear: - drop1 = fluid.layers.dropout( - vec, dropout_prob=self._config.droprate_fc) - fc1 = fluid.layers.fc(drop1, size=512, act='tanh') - drop2 = fluid.layers.dropout( - fc1, dropout_prob=self._config.droprate_fc) - fc2 = fluid.layers.fc(drop2, size=512, act='tanh') - res = fluid.layers.dropout( - fc2, dropout_prob=self._config.droprate_fc) - else: - fc1 = fluid.layers.fc(vec, size=512, act=None) - res = fluid.layers.fc(fc1, size=512, act=None) - return res diff --git a/PaddleRec/text_matching_on_quora/models/match_layers.py b/PaddleRec/text_matching_on_quora/models/match_layers.py deleted file mode 100755 index 314d5b2c..00000000 --- a/PaddleRec/text_matching_on_quora/models/match_layers.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This Module provide different kinds of Match layers -""" - -import paddle.fluid as fluid - - -def MultiPerspectiveMatching(vec1, vec2, perspective_num): - """ - MultiPerspectiveMatching - """ - sim_res = None - for i in range(perspective_num): - vec1_res = fluid.layers.elementwise_add_with_weight( - vec1, param_attr="elementwise_add_with_weight." + str(i)) - vec2_res = fluid.layers.elementwise_add_with_weight( - vec2, param_attr="elementwise_add_with_weight." + str(i)) - m = fluid.layers.cos_sim(vec1_res, vec2_res) - if sim_res is None: - sim_res = m - else: - sim_res = fluid.layers.concat(input=[sim_res, m], axis=1) - return sim_res - - -def ConcateMatching(vec1, vec2): - """ - ConcateMatching - """ - #TODO: assert shape - return fluid.layers.concat(input=[vec1, vec2], axis=1) - - -def ElementwiseMatching(vec1, vec2): - """ - reference: [Supervised Learning of Universal Sentence Representations from Natural Language Inference Data](https://arxiv.org/abs/1705.02364) - """ - elementwise_mul = fluid.layers.elementwise_mul(x=vec1, y=vec2) - elementwise_sub = fluid.layers.elementwise_sub(x=vec1, y=vec2) - elementwise_abs_sub = fluid.layers.abs(elementwise_sub) - return fluid.layers.concat( - input=[vec1, vec2, elementwise_mul, elementwise_abs_sub], axis=1) diff --git a/PaddleRec/text_matching_on_quora/models/my_layers.py b/PaddleRec/text_matching_on_quora/models/my_layers.py deleted file mode 100755 index 374d7982..00000000 --- a/PaddleRec/text_matching_on_quora/models/my_layers.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This module defines some Frequently-used DNN layers -""" - -import paddle.fluid as fluid - - -def bi_lstm_layer(input, rnn_hid_dim, name): - """ - This is a Bi-directional LSTM(long short term memory) Module - """ - fc0 = fluid.layers.fc( - input=input, # fc for lstm - size=rnn_hid_dim * 4, - param_attr=name + '.fc0.w', - bias_attr=False, - act=None) - - lstm_h, c = fluid.layers.dynamic_lstm( - input=fc0, - size=rnn_hid_dim * 4, - is_reverse=False, - param_attr=name + '.lstm_w', - bias_attr=name + '.lstm_b') - - reversed_lstm_h, reversed_c = fluid.layers.dynamic_lstm( - input=fc0, - size=rnn_hid_dim * 4, - is_reverse=True, - param_attr=name + '.reversed_lstm_w', - bias_attr=name + '.reversed_lstm_b') - return fluid.layers.concat(input=[lstm_h, reversed_lstm_h], axis=1) diff --git a/PaddleRec/text_matching_on_quora/models/pwim.py b/PaddleRec/text_matching_on_quora/models/pwim.py deleted file mode 100644 index 7b60ec48..00000000 --- a/PaddleRec/text_matching_on_quora/models/pwim.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Just for test `git push` diff --git a/PaddleRec/text_matching_on_quora/models/sse.py b/PaddleRec/text_matching_on_quora/models/sse.py deleted file mode 100644 index 621f4425..00000000 --- a/PaddleRec/text_matching_on_quora/models/sse.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid -from .my_layers import bi_lstm_layer -from .match_layers import ElementwiseMatching - - -class SSENet(): - """ - SSE net: Shortcut-Stacked Sentence Encoders for Multi-Domain Inference - https://arxiv.org/abs/1708.02312 - """ - - def __init__(self, config): - self._config = config - - def __call__(self, seq1, seq2, label): - return self.body(seq1, seq2, label, self._config) - - def body(self, seq1, seq2, label, config): - """Body function""" - - def stacked_bi_rnn_model(seq): - embed = fluid.layers.embedding( - input=seq, - size=[self._config.dict_dim, self._config.emb_dim], - param_attr='emb.w') - stacked_lstm_out = [embed] - for i in range(len(self._config.rnn_hid_dim)): - if i == 0: - feature = embed - else: - feature = fluid.layers.concat( - input=stacked_lstm_out, axis=1) - bi_lstm_h = bi_lstm_layer( - feature, - rnn_hid_dim=self._config.rnn_hid_dim[i], - name="lstm_" + str(i)) - - # add dropout except for the last stacked lstm layer - if i != len(self._config.rnn_hid_dim) - 1: - bi_lstm_h = fluid.layers.dropout( - bi_lstm_h, dropout_prob=self._config.droprate_lstm) - stacked_lstm_out.append(bi_lstm_h) - pool = fluid.layers.sequence_pool(input=bi_lstm_h, pool_type='max') - return pool - - def MLP(vec): - for i in range(len(self._config.fc_dim)): - vec = fluid.layers.fc(vec, - size=self._config.fc_dim[i], - act='relu') - # add dropout after every layer of MLP - vec = fluid.layers.dropout( - vec, dropout_prob=self._config.droprate_fc) - return vec - - seq1_rnn = stacked_bi_rnn_model(seq1) - seq2_rnn = stacked_bi_rnn_model(seq2) - seq_match = ElementwiseMatching(seq1_rnn, seq2_rnn) - - mlp_res = MLP(seq_match) - prediction = fluid.layers.fc(mlp_res, - size=self._config.class_dim, - act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=loss) - acc = fluid.layers.accuracy(input=prediction, label=label) - return avg_cost, acc, prediction diff --git a/PaddleRec/text_matching_on_quora/models/test.py b/PaddleRec/text_matching_on_quora/models/test.py deleted file mode 100644 index 33ed0ecf..00000000 --- a/PaddleRec/text_matching_on_quora/models/test.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/PaddleRec/text_matching_on_quora/pretrained_word2vec.py b/PaddleRec/text_matching_on_quora/pretrained_word2vec.py deleted file mode 100755 index a6df8055..00000000 --- a/PaddleRec/text_matching_on_quora/pretrained_word2vec.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This Module provide pretrained word-embeddings -""" - -from __future__ import print_function, unicode_literals -import numpy as np -import time, datetime -import os, sys - - -def maybe_open(filepath): - if sys.version_info <= (3, 0): # for python2 - return open(filepath, 'r') - else: - return open(filepath, 'r', encoding="utf-8") - - -def Glove840B_300D(filepath, keys=None): - """ - input: the "glove.840B.300d.txt" file path - return: a dict, key: word (unicode), value: a numpy array with shape [300] - """ - if keys is not None: - assert (isinstance(keys, set)) - print("loading word2vec from ", filepath) - print("please wait for a minute.") - start = time.time() - word2vec = {} - with maybe_open(filepath) as f: - for line in f: - if sys.version_info <= (3, 0): # for python2 - line = line.decode('utf-8') - info = line.strip("\n").split(" ") - word = info[0] - if (keys is not None) and (word not in keys): - continue - vector = info[1:] - assert (len(vector) == 300) - word2vec[word] = np.asarray(vector, dtype='float32') - - end = time.time() - print( - "Spent ", - str(datetime.timedelta(seconds=end - start)), - " on loading word2vec.") - return word2vec - - -if __name__ == '__main__': - from os.path import expanduser - home = expanduser("~") - embed_dict = Glove840B_300D( - os.path.join(home, "./.cache/paddle/dataset/glove.840B.300d.txt")) - exit(0) diff --git a/PaddleRec/text_matching_on_quora/quora_question_pairs.py b/PaddleRec/text_matching_on_quora/quora_question_pairs.py deleted file mode 100755 index e21742ae..00000000 --- a/PaddleRec/text_matching_on_quora/quora_question_pairs.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -""" - -import paddle.dataset.common -import collections -import tarfile -import re -import string -import random -import os, sys -import nltk -from os.path import expanduser - -__all__ = ['word_dict', 'train', 'dev', 'test'] - -URL = "https://drive.google.com/file/d/0B0PlTAo--BnaQWlsZl9FZ3l1c28/view" - -DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') -DATA_DIR = "Quora_question_pair_partition" - -QUORA_TRAIN_FILE_NAME = os.path.join(DATA_HOME, DATA_DIR, 'train.tsv') -QUORA_DEV_FILE_NAME = os.path.join(DATA_HOME, DATA_DIR, 'dev.tsv') -QUORA_TEST_FILE_NAME = os.path.join(DATA_HOME, DATA_DIR, 'test.tsv') - -# punctuation or nltk or space -TOKENIZE_METHOD = 'space' - -COLUMN_COUNT = 4 - - -def tokenize(s): - if sys.version_info <= (3, 0): # for python2 - s = s.decode('utf-8') - if TOKENIZE_METHOD == "nltk": - return nltk.tokenize.word_tokenize(s) - elif TOKENIZE_METHOD == "punctuation": - return s.translate({ord(char): None - for char in string.punctuation}).lower().split() - elif TOKENIZE_METHOD == "space": - return s.split() - else: - raise RuntimeError("Invalid tokenize method") - - -def maybe_open(file_name): - if not os.path.isfile(file_name): - msg = "file not exist: %s\nPlease download the dataset firstly from: %s\n\n" % (file_name, URL) + \ - ("# The finally dataset dir should be like\n\n" - "$HOME/.cache/paddle/dataset\n" - " |- Quora_question_pair_partition\n" - " |- train.tsv\n" - " |- test.tsv\n" - " |- dev.tsv\n" - " |- readme.txt\n" - " |- wordvec.txt\n") - raise RuntimeError(msg) - if sys.version_info <= (3, 0): # for python2 - return open(file_name, 'r') - else: - return open(file_name, 'r', encoding="utf-8") - - -def tokenized_question_pairs(file_name): - """ - """ - with maybe_open(file_name) as f: - questions = {} - lines = f.readlines() - for line in lines: - info = line.strip().split('\t') - if len(info) != COLUMN_COUNT: - # formatting error - continue - (label, question1, question2, id) = info - question1 = tokenize(question1) - question2 = tokenize(question2) - yield question1, question2, int(label) - - -def tokenized_questions(file_name): - """ - """ - with maybe_open(file_name) as f: - lines = f.readlines() - for line in lines: - info = line.strip().split('\t') - if len(info) != COLUMN_COUNT: - # formatting error - continue - (label, question1, question2, id) = info - yield tokenize(question1) - yield tokenize(question2) - - -def build_dict(file_name, cutoff): - """ - Build a word dictionary from the corpus. Keys of the dictionary are words, - and values are zero-based IDs of these words. - """ - word_freq = collections.defaultdict(int) - for doc in tokenized_questions(file_name): - for word in doc: - word_freq[word] += 1 - - word_freq = filter(lambda x: x[1] > cutoff, word_freq.items()) - - dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0])) - words, _ = list(zip(*dictionary)) - word_idx = dict(zip(words, range(len(words)))) - word_idx[''] = len(words) - word_idx[''] = len(words) + 1 - return word_idx - - -def reader_creator(file_name, word_idx): - UNK_ID = word_idx[''] - - def reader(): - for (q1, q2, label) in tokenized_question_pairs(file_name): - q1_ids = [word_idx.get(w, UNK_ID) for w in q1] - q2_ids = [word_idx.get(w, UNK_ID) for w in q2] - if q1_ids != [] and q2_ids != []: # [] is not allowed in fluid - assert (label in [0, 1]) - yield q1_ids, q2_ids, label - - return reader - - -def train(word_idx): - """ - Quora training set creator. - - It returns a reader creator, each sample in the reader is two zero-based ID - list and label in [0, 1]. - - :param word_idx: word dictionary - :type word_idx: dict - :return: Training reader creator - :rtype: callable - """ - return reader_creator(QUORA_TRAIN_FILE_NAME, word_idx) - - -def dev(word_idx): - """ - Quora develop set creator. - - It returns a reader creator, each sample in the reader is two zero-based ID - list and label in [0, 1]. - - :param word_idx: word dictionary - :type word_idx: dict - :return: develop reader creator - :rtype: callable - - """ - return reader_creator(QUORA_DEV_FILE_NAME, word_idx) - - -def test(word_idx): - """ - Quora test set creator. - - It returns a reader creator, each sample in the reader is two zero-based ID - list and label in [0, 1]. - - :param word_idx: word dictionary - :type word_idx: dict - :return: Test reader creator - :rtype: callable - """ - return reader_creator(QUORA_TEST_FILE_NAME, word_idx) - - -def word_dict(): - """ - Build a word dictionary from the corpus. - - :return: Word dictionary - :rtype: dict - """ - return build_dict(file_name=QUORA_TRAIN_FILE_NAME, cutoff=4) diff --git a/PaddleRec/text_matching_on_quora/train_and_evaluate.py b/PaddleRec/text_matching_on_quora/train_and_evaluate.py deleted file mode 100755 index 303dd415..00000000 --- a/PaddleRec/text_matching_on_quora/train_and_evaluate.py +++ /dev/null @@ -1,314 +0,0 @@ -#Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import os -import sys -import time -import argparse -import unittest -import contextlib -import numpy as np - -import paddle.fluid as fluid - -import utils, metric, configs -import models - -from pretrained_word2vec import Glove840B_300D - -parser = argparse.ArgumentParser(description=__doc__) - -parser.add_argument( - '--model_name', type=str, default='cdssmNet', help="Which model to train") -parser.add_argument( - '--config', - type=str, - default='cdssm_base', - help="The global config setting") -parser.add_argument( - '--enable_ce', - action='store_true', - help='If set, run the task with continuous evaluation logs.') -parser.add_argument('--epoch_num', type=int, help='Number of epoch') - -DATA_DIR = os.path.join(os.path.expanduser('~'), '.cache/paddle/dataset') - - -def evaluate(epoch_id, exe, inference_program, dev_reader, test_reader, - fetch_list, feeder, metric_type): - """ - evaluate on test/dev dataset - """ - - def infer(test_reader): - """ - do inference function - """ - total_cost = 0.0 - total_count = 0 - preds, labels = [], [] - for data in test_reader(): - avg_cost, avg_acc, batch_prediction = exe.run( - inference_program, - feed=feeder.feed(data), - fetch_list=fetch_list, - return_numpy=True) - total_cost += avg_cost * len(data) - total_count += len(data) - preds.append(batch_prediction) - labels.append(np.asarray([x[-1] for x in data], dtype=np.int64)) - y_pred = np.concatenate(preds) - y_label = np.concatenate(labels) - - metric_res = [] - for metric_name in metric_type: - if metric_name == 'accuracy_with_threshold': - metric_res.append((metric_name, metric.accuracy_with_threshold( - y_pred, y_label, threshold=0.3))) - elif metric_name == 'accuracy': - metric_res.append( - (metric_name, metric.accuracy(y_pred, y_label))) - else: - print("Unknown metric type: ", metric_name) - exit() - return total_cost / (total_count * 1.0), metric_res - - dev_cost, dev_metric_res = infer(dev_reader) - print("[%s] epoch_id: %d, dev_cost: %f, " % (time.asctime( - time.localtime(time.time())), epoch_id, dev_cost) + ', '.join( - [str(x[0]) + ": " + str(x[1]) for x in dev_metric_res])) - - test_cost, test_metric_res = infer(test_reader) - print("[%s] epoch_id: %d, test_cost: %f, " % (time.asctime( - time.localtime(time.time())), epoch_id, test_cost) + ', '.join( - [str(x[0]) + ": " + str(x[1]) for x in test_metric_res])) - print("") - - -def train_and_evaluate(train_reader, dev_reader, test_reader, network, - optimizer, global_config, pretrained_word_embedding, - use_cuda, parallel): - """ - train network - """ - - # define the net - if global_config.use_lod_tensor: - # automatic add batch dim - q1 = fluid.layers.data( - name="question1", shape=[1], dtype="int64", lod_level=1) - q2 = fluid.layers.data( - name="question2", shape=[1], dtype="int64", lod_level=1) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, acc, prediction = network(q1, q2, label) - else: - # shape: [batch_size, max_seq_len_in_batch, 1] - q1 = fluid.layers.data( - name="question1", shape=[-1, -1, 1], dtype="int64") - q2 = fluid.layers.data( - name="question2", shape=[-1, -1, 1], dtype="int64") - # shape: [batch_size, max_seq_len_in_batch] - mask1 = fluid.layers.data(name="mask1", shape=[-1, -1], dtype="float32") - mask2 = fluid.layers.data(name="mask2", shape=[-1, -1], dtype="float32") - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, acc, prediction = network(q1, q2, mask1, mask2, label) - - if parallel: - # TODO: Paarallel Training - print("Parallel Training is not supported for now.") - sys.exit(1) - - #optimizer.minimize(cost) - if use_cuda: - print("Using GPU") - place = fluid.CUDAPlace(0) - else: - print("Using CPU") - place = fluid.CPUPlace() - exe = fluid.Executor(place) - - if global_config.use_lod_tensor: - feeder = fluid.DataFeeder(feed_list=[q1, q2, label], place=place) - else: - feeder = fluid.DataFeeder( - feed_list=[q1, q2, mask1, mask2, label], place=place) - - # only for ce - args = parser.parse_args() - if args.enable_ce: - SEED = 102 - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED - - # logging param info - for param in fluid.default_main_program().global_block().all_parameters(): - print("param name: %s; param shape: %s" % (param.name, param.shape)) - - # define inference_program - inference_program = fluid.default_main_program().clone(for_test=True) - - optimizer.minimize(cost) - - exe.run(fluid.default_startup_program()) - - # load emb from a numpy erray - if pretrained_word_embedding is not None: - print("loading pretrained word embedding to param") - embedding_name = "emb.w" - embedding_param = fluid.global_scope().find_var( - embedding_name).get_tensor() - embedding_param.set(pretrained_word_embedding, place) - - evaluate( - -1, - exe, - inference_program, - dev_reader, - test_reader, - fetch_list=[cost, acc, prediction], - feeder=feeder, - metric_type=global_config.metric_type) - - # start training - total_time = 0.0 - print("[%s] Start Training" % time.asctime(time.localtime(time.time()))) - for epoch_id in range(global_config.epoch_num): - - data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0 - batch_id = 0 - epoch_begin_time = time.time() - for data in train_reader(): - avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[cost, acc]) - data_size = len(data) - total_acc += data_size * avg_acc_np[0] - total_cost += data_size * avg_cost_np[0] - data_count += data_size - if batch_id % 100 == 0: - print("[%s] epoch_id: %d, batch_id: %d, cost: %f, acc: %f" % - (time.asctime(time.localtime(time.time())), epoch_id, - batch_id, avg_cost_np, avg_acc_np)) - batch_id += 1 - avg_cost = total_cost / data_count - avg_acc = total_acc / data_count - epoch_end_time = time.time() - total_time += epoch_end_time - epoch_begin_time - - print("") - print( - "[%s] epoch_id: %d, train_avg_cost: %f, train_avg_acc: %f, epoch_time_cost: %f" - % (time.asctime(time.localtime(time.time())), epoch_id, avg_cost, - avg_acc, time.time() - epoch_begin_time)) - - # only for ce - if epoch_id == global_config.epoch_num - 1 and args.enable_ce: - #Note: The following logs are special for CE monitoring. - #Other situations do not need to care about these logs. - gpu_num = get_cards(args) - print("kpis\teach_pass_duration_card%s\t%s" % \ - (gpu_num, total_time / (global_config.epoch_num))) - print("kpis\ttrain_avg_cost_card%s\t%s" % (gpu_num, avg_cost)) - print("kpis\ttrain_avg_acc_card%s\t%s" % (gpu_num, avg_acc)) - - epoch_model = global_config.save_dirname + "/" + "epoch" + str(epoch_id) - fluid.io.save_inference_model( - epoch_model, ["question1", "question2", "label"], acc, exe) - - evaluate( - epoch_id, - exe, - inference_program, - dev_reader, - test_reader, - fetch_list=[cost, acc, prediction], - feeder=feeder, - metric_type=global_config.metric_type) - - -def main(): - """ - This function will parse argments, prepare data and prepare pretrained embedding - """ - args = parser.parse_args() - global_config = configs.__dict__[args.config]() - - if args.epoch_num != None: - global_config.epoch_num = args.epoch_num - - print("net_name: ", args.model_name) - net = models.__dict__[args.model_name](global_config) - - # get word_dict - word_dict = utils.getDict(data_type="quora_question_pairs") - - # get reader - train_reader, dev_reader, test_reader = utils.prepare_data( - "quora_question_pairs", - word_dict=word_dict, - batch_size=global_config.batch_size, - buf_size=800000, - duplicate_data=global_config.duplicate_data, - use_pad=(not global_config.use_lod_tensor)) - - # load pretrained_word_embedding - if global_config.use_pretrained_word_embedding: - word2vec = Glove840B_300D( - filepath=os.path.join(DATA_DIR, "glove.840B.300d.txt"), - keys=set(word_dict.keys())) - pretrained_word_embedding = utils.get_pretrained_word_embedding( - word2vec=word2vec, word2id=word_dict, config=global_config) - print("pretrained_word_embedding to be load:", - pretrained_word_embedding) - else: - pretrained_word_embedding = None - - # define optimizer - optimizer = utils.getOptimizer(global_config) - - # use cuda or not - if not global_config.has_member('use_cuda'): - if 'CUDA_VISIBLE_DEVICES' in os.environ and os.environ[ - 'CUDA_VISIBLE_DEVICES'] != '': - global_config.use_cuda = True - else: - global_config.use_cuda = False - - global_config.list_config() - - train_and_evaluate( - train_reader, - dev_reader, - test_reader, - net, - optimizer, - global_config, - pretrained_word_embedding, - use_cuda=global_config.use_cuda, - parallel=False) - - -def get_cards(args): - if args.enable_ce: - cards = os.environ.get('CUDA_VISIBLE_DEVICES') - num = len(cards.split(",")) - return num - else: - return args.num_devices - - -if __name__ == "__main__": - main() diff --git a/PaddleRec/text_matching_on_quora/utils.py b/PaddleRec/text_matching_on_quora/utils.py deleted file mode 100755 index 71df5003..00000000 --- a/PaddleRec/text_matching_on_quora/utils.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This module provides utilities for data generator and optimizer definition -""" - -import sys -import time -import numpy as np - -import paddle.fluid as fluid -import paddle -import quora_question_pairs - - -def to_lodtensor(data, place): - """ - convert to LODtensor - """ - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def getOptimizer(global_config): - """ - get Optimizer by config - """ - if global_config.optimizer_type == "adam": - optimizer = fluid.optimizer.Adam( - learning_rate=fluid.layers.exponential_decay( - learning_rate=global_config.learning_rate, - decay_steps=global_config.train_samples_num // - global_config.batch_size, - decay_rate=global_config.lr_decay)) - elif global_config.optimizer_type == "sgd": - optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay( - learning_rate=global_config.learning_rate, - decay_steps=global_config.train_samples_num // - global_config.batch_size, - decay_rate=global_config.lr_decay)) - - elif global_config.optimizer_type == "adagrad": - optimizer = fluid.optimizer.Adagrad( - learning_rate=fluid.layers.exponential_decay( - learning_rate=global_config.learning_rate, - decay_steps=global_config.train_samples_num // - global_config.batch_size, - decay_rate=global_config.lr_decay)) - - return optimizer - - -def get_pretrained_word_embedding(word2vec, word2id, config): - """get pretrained embedding in shape [config.dict_dim, config.emb_dim]""" - print("preparing pretrained word embedding ...") - assert (config.dict_dim >= len(word2id)) - word2id = sorted(word2id.items(), key=lambda x: x[1]) - words = [x[0] for x in word2id] - words = words + [''] * (config.dict_dim - len(words)) - pretrained_emb = [] - for _, word in enumerate(words): - if word in word2vec: - assert (len(word2vec[word] == config.emb_dim)) - if config.embedding_norm: - pretrained_emb.append(word2vec[word] / - np.linalg.norm(word2vec[word])) - else: - pretrained_emb.append(word2vec[word]) - elif config.OOV_fill == 'uniform': - pretrained_emb.append( - np.random.uniform( - -0.05, 0.05, size=[config.emb_dim]).astype(np.float32)) - elif config.OOV_fill == 'normal': - pretrained_emb.append( - np.random.normal( - loc=0.0, scale=0.1, size=[config.emb_dim]).astype( - np.float32)) - else: - print("Unkown OOV fill method: ", OOV_fill) - exit() - word_embedding = np.stack(pretrained_emb) - return word_embedding - - -def getDict(data_type="quora_question_pairs"): - """ - get word2id dict from quora dataset - """ - print("Generating word dict...") - if data_type == "quora_question_pairs": - word_dict = quora_question_pairs.word_dict() - else: - raise RuntimeError("No such dataset") - print("Vocab size: ", len(word_dict)) - return word_dict - - -def duplicate(reader): - """ - duplicate the quora qestion pairs since there are 2 questions in a sample - Input: reader, which yield (question1, question2, label) - Output: reader, which yield (question1, question2, label) and yield (question2, question1, label) - """ - - def duplicated_reader(): - for data in reader(): - (q1, q2, label) = data - yield (q1, q2, label) - yield (q2, q1, label) - - return duplicated_reader - - -def pad(reader, PAD_ID): - """ - Input: reader, yield batches of [(question1, question2, label), ... ] - Output: padded_reader, yield batches of [(padded_question1, padded_question2, mask1, mask2, label), ... ] - """ - - assert (isinstance(PAD_ID, int)) - - def padded_reader(): - for batch in reader(): - max_len1 = max([len(data[0]) for data in batch]) - max_len2 = max([len(data[1]) for data in batch]) - - padded_batch = [] - for data in batch: - question1, question2, label = data - seq_len1 = len(question1) - seq_len2 = len(question2) - mask1 = [1] * seq_len1 + [0] * (max_len1 - seq_len1) - mask2 = [1] * seq_len2 + [0] * (max_len2 - seq_len2) - padded_question1 = question1 + [PAD_ID] * (max_len1 - seq_len1) - padded_question2 = question2 + [PAD_ID] * (max_len2 - seq_len2) - padded_question1 = [ - [x] for x in padded_question1 - ] # last dim of questions must be 1, according to fluid's request - padded_question2 = [[x] for x in padded_question2] - assert (len(mask1) == max_len1) - assert (len(mask2) == max_len2) - assert (len(padded_question1) == max_len1) - assert (len(padded_question2) == max_len2) - padded_batch.append( - (padded_question1, padded_question2, mask1, mask2, label)) - yield padded_batch - - return padded_reader - - -def prepare_data(data_type, - word_dict, - batch_size, - buf_size=50000, - duplicate_data=False, - use_pad=False): - """ - prepare data - """ - - PAD_ID = word_dict[''] - - if data_type == "quora_question_pairs": - # train/dev/test reader are batched iters which yield a batch of (question1, question2, label) each time - # qestion1 and question2 are lists of word ID - # label is 0 or 1 - # for example: ([1, 3, 2], [7, 5, 4, 99], 1) - - def prepare_reader(reader): - if duplicate_data: - reader = duplicate(reader) - reader = paddle.batch( - paddle.reader.shuffle( - reader, buf_size=buf_size), - batch_size=batch_size, - drop_last=False) - if use_pad: - reader = pad(reader, PAD_ID=PAD_ID) - return reader - - train_reader = prepare_reader(quora_question_pairs.train(word_dict)) - dev_reader = prepare_reader(quora_question_pairs.dev(word_dict)) - test_reader = prepare_reader(quora_question_pairs.test(word_dict)) - - else: - raise RuntimeError("no such dataset") - - return train_reader, dev_reader, test_reader diff --git a/PaddleRec/word2vec/README.md b/PaddleRec/word2vec/README.md index eae86615..bdff6ea1 100644 --- a/PaddleRec/word2vec/README.md +++ b/PaddleRec/word2vec/README.md @@ -4,8 +4,6 @@ ```text . -├── cluster_train.py # 分布式训练函数 -├── cluster_train.sh # 本地模拟多机脚本 ├── train.py # 训练函数 ├── infer.py # 预测脚本 ├── net.py # 网络结构 @@ -97,11 +95,6 @@ python train.py -h OPENBLAS_NUM_THREADS=1 CPU_NUM=5 python train.py --train_data_dir data/convert_text8 --dict_path data/test_build_dict --num_passes 10 --batch_size 100 --model_output_dir v1_cpu5_b100_lr1dir --base_lr 1.0 --print_batch 1000 --with_speed --is_sparse ``` -本地单机模拟多机训练, 目前暂不支持windows。 - -```bash -sh cluster_train.sh -``` 若需要开启shuffle_batch功能,需在命令中加入`--with_shuffle_batch`。单机模拟分布式多机训练,需更改`cluster_train.sh`文件,在各个节点的启动命令中加入`--with_shuffle_batch`。 ## 预测 diff --git a/PaddleRec/word2vec/cluster_train.py b/PaddleRec/word2vec/cluster_train.py deleted file mode 100644 index 11054ce3..00000000 --- a/PaddleRec/word2vec/cluster_train.py +++ /dev/null @@ -1,264 +0,0 @@ -from __future__ import print_function -import argparse -import logging -import os -import time -import math -import random -import numpy as np -import paddle -import paddle.fluid as fluid -import six -import reader -from net import skip_gram_word2vec, skip_gram_word2vec_shuffle_batch - -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger("fluid") -logger.setLevel(logging.INFO) - - -def parse_args(): - parser = argparse.ArgumentParser( - description="PaddlePaddle Word2vec example") - parser.add_argument( - '--train_data_dir', - type=str, - default='./data/text', - help="The path of taining dataset") - parser.add_argument( - '--base_lr', - type=float, - default=0.01, - help="The number of learing rate (default: 0.01)") - parser.add_argument( - '--save_step', - type=int, - default=500000, - help="The number of step to save (default: 500000)") - parser.add_argument( - '--print_batch', - type=int, - default=100, - help="The number of print_batch (default: 10)") - parser.add_argument( - '--dict_path', - type=str, - default='./data/1-billion_dict', - help="The path of data dict") - parser.add_argument( - '--batch_size', - type=int, - default=500, - help="The size of mini-batch (default:500)") - parser.add_argument( - '--num_passes', - type=int, - default=10, - help="The number of passes to train (default: 10)") - parser.add_argument( - '--model_output_dir', - type=str, - default='models', - help='The path for model to store (default: models)') - parser.add_argument('--nce_num', type=int, default=5, help='nce_num') - parser.add_argument( - '--embedding_size', - type=int, - default=64, - help='sparse feature hashing space for index processing') - parser.add_argument( - '--is_sparse', - action='store_true', - required=False, - default=False, - help='embedding and nce will use sparse or not, (default: False)') - parser.add_argument( - '--with_speed', - action='store_true', - required=False, - default=False, - help='print speed or not , (default: False)') - parser.add_argument( - '--role', type=str, default='pserver', help='trainer or pserver') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The current_endpoint') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='trainer id ,only trainer_id=0 save model') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - parser.add_argument( - '--with_shuffle_batch', - action='store_true', - required=False, - default=False, - help='negative samples come from shuffle_batch op or not , (default: False)') - return parser.parse_args() - - -def convert_python_to_tensor(weight, batch_size, sample_reader): - def __reader__(): - cs = np.array(weight).cumsum() - result = [[], []] - for sample in sample_reader(): - for i, fea in enumerate(sample): - result[i].append(fea) - if len(result[0]) == batch_size: - tensor_result = [] - for tensor in result: - t = fluid.Tensor() - dat = np.array(tensor, dtype='int64') - if len(dat.shape) > 2: - dat = dat.reshape((dat.shape[0], dat.shape[2])) - elif len(dat.shape) == 1: - dat = dat.reshape((-1, 1)) - t.set(dat, fluid.CPUPlace()) - tensor_result.append(t) - tt = fluid.Tensor() - neg_array = cs.searchsorted(np.random.sample(args.nce_num)) - neg_array = np.tile(neg_array, batch_size) - tt.set( - neg_array.reshape((batch_size, args.nce_num)), - fluid.CPUPlace()) - tensor_result.append(tt) - yield tensor_result - result = [[], []] - - return __reader__ - - -def train_loop(args, train_program, data_loader, loss, trainer_id): - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - print("CPU_NUM:" + str(os.getenv("CPU_NUM"))) - - train_exe = exe - - for pass_id in range(args.num_passes): - data_loader.start() - time.sleep(10) - epoch_start = time.time() - batch_id = 0 - start = time.time() - try: - while True: - - loss_val = train_exe.run(fetch_list=[loss.name]) - loss_val = np.mean(loss_val) - - if batch_id % args.print_batch == 0: - logger.info( - "TRAIN --> pass: {} batch: {} loss: {} reader queue:{}". - format(pass_id, batch_id, - loss_val.mean(), data_loader.queue.size())) - if args.with_speed: - if batch_id % 500 == 0 and batch_id != 0: - elapsed = (time.time() - start) - start = time.time() - samples = 1001 * args.batch_size * int( - os.getenv("CPU_NUM")) - logger.info("Time used: {}, Samples/Sec: {}".format( - elapsed, samples / elapsed)) - - if batch_id % args.save_step == 0 and batch_id != 0: - model_dir = args.model_output_dir + '/pass-' + str( - pass_id) + ('/batch-' + str(batch_id)) - if trainer_id == 0: - fluid.save(fluid.default_main_program(), model_path=model_dir) - print("model saved in %s" % model_dir) - batch_id += 1 - - except fluid.core.EOFException: - data_loader.reset() - epoch_end = time.time() - logger.info("Epoch: {0}, Train total expend: {1} ".format( - pass_id, epoch_end - epoch_start)) - model_dir = args.model_output_dir + '/pass-' + str(pass_id) - if trainer_id == 0: - fluid.save(fluid.default_main_program(), model_path=model_dir) - print("model saved in %s" % model_dir) - - -def GetFileList(data_path): - return os.listdir(data_path) - - -def train(args): - - if not os.path.isdir(args.model_output_dir) and args.trainer_id == 0: - os.mkdir(args.model_output_dir) - - filelist = GetFileList(args.train_data_dir) - word2vec_reader = reader.Word2VecReader(args.dict_path, args.train_data_dir, - filelist, 0, 1) - - logger.info("dict_size: {}".format(word2vec_reader.dict_size)) - - if args.with_shuffle_batch: - loss, data_loader = skip_gram_word2vec_shuffle_batch( - word2vec_reader.dict_size, - args.embedding_size, - is_sparse=args.is_sparse, - neg_num=args.nce_num) - data_loader.set_sample_generator(word2vec_reader.train(), batch_size=args.batch_size, drop_last=True) - else: - np_power = np.power(np.array(word2vec_reader.id_frequencys), 0.75) - id_frequencys_pow = np_power / np_power.sum() - - loss, data_loader = skip_gram_word2vec( - word2vec_reader.dict_size, - args.embedding_size, - is_sparse=args.is_sparse, - neg_num=args.nce_num) - - data_loader.set_batch_generator( - convert_python_to_tensor(id_frequencys_pow, args.batch_size, word2vec_reader.train())) - - optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay( - learning_rate=args.base_lr, - decay_steps=100000, - decay_rate=0.999, - staircase=True)) - - optimizer.minimize(loss) - - logger.info("run dist training") - - t = fluid.DistributeTranspiler() - t.transpile( - args.trainer_id, pservers=args.endpoints, trainers=args.trainers) - if args.role == "pserver": - print("run psever") - pserver_prog = t.get_pserver_program(args.current_endpoint) - pserver_startup = t.get_startup_program(args.current_endpoint, - pserver_prog) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif args.role == "trainer": - print("run trainer") - train_loop(args, - t.get_trainer_program(), data_loader, loss, - args.trainer_id) - - -if __name__ == '__main__': - args = parse_args() - train(args) diff --git a/PaddleRec/word2vec/cluster_train.sh b/PaddleRec/word2vec/cluster_train.sh deleted file mode 100644 index 756196fd..00000000 --- a/PaddleRec/word2vec/cluster_train.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -#export GLOG_v=30 -#export GLOG_logtostderr=1 - -# start pserver0 -export CPU_NUM=5 -export FLAGS_rpc_deadline=3000000 -python cluster_train.py \ - --train_data_dir data/convert_text8 \ - --dict_path data/test_build_dict \ - --batch_size 100 \ - --model_output_dir dis_model \ - --base_lr 1.0 \ - --print_batch 1 \ - --is_sparse \ - --with_speed \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6000 \ - --trainers 2 \ - > pserver0.log 2>&1 & - -python cluster_train.py \ - --train_data_dir data/convert_text8 \ - --dict_path data/test_build_dict \ - --batch_size 100 \ - --model_output_dir dis_model \ - --base_lr 1.0 \ - --print_batch 1 \ - --is_sparse \ - --with_speed \ - --role pserver \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --current_endpoint 127.0.0.1:6001 \ - --trainers 2 \ - > pserver1.log 2>&1 & - -# start trainer0 -python cluster_train.py \ - --train_data_dir data/convert_text8 \ - --dict_path data/test_build_dict \ - --batch_size 100 \ - --model_output_dir dis_model \ - --base_lr 1.0 \ - --print_batch 1000 \ - --is_sparse \ - --with_speed \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 0 \ - > trainer0.log 2>&1 & -# start trainer1 -python cluster_train.py \ - --train_data_dir data/convert_text8 \ - --dict_path data/test_build_dict \ - --batch_size 100 \ - --model_output_dir dis_model \ - --base_lr 1.0 \ - --print_batch 1000 \ - --is_sparse \ - --with_speed \ - --role trainer \ - --endpoints 127.0.0.1:6000,127.0.0.1:6001 \ - --trainers 2 \ - --trainer_id 1 \ - > trainer1.log 2>&1 & -- GitLab