diff --git a/README.md b/README.md index 202d516dcfcfcb17a320cabd3428cdeec0da6e52..b5939cc181d95c27abc8becfeb553eb8e1c12107 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,7 @@ | Rank | [Wide&Deep](models/rank/wide_deep/model.py) | ✓ | x | ✓ | x | [DLRS 2016][Wide & Deep Learning for Recommender Systems](https://dl.acm.org/doi/pdf/10.1145/2988450.2988454) | | Rank | [FGCNN](models/rank/fgcnn/model.py) | ✓ | ✓ | ✓ | ✓ | [WWW 2019][Feature Generation by Convolutional Neural Network for Click-Through Rate Prediction](https://arxiv.org/pdf/1904.04447.pdf) | | Rank | [Fibinet](models/rank/fibinet/model.py) | ✓ | ✓ | ✓ | ✓ | [RecSys19][FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction]( https://arxiv.org/pdf/1905.09433.pdf) | + | Rank | [Flen](models/rank/flen/model.py) | ✓ | ✓ | ✓ | ✓ | [2019][FLEN: Leveraging Field for Scalable CTR Prediction]( https://arxiv.org/pdf/1911.04690.pdf) | | Multi-Task | [ESMM](models/multitask/esmm/model.py) | ✓ | ✓ | ✓ | ✓ | [SIGIR 2018][Entire Space Multi-Task Model: An Effective Approach for Estimating Post-Click Conversion Rate](https://arxiv.org/abs/1804.07931) | | Multi-Task | [MMOE](models/multitask/mmoe/model.py) | ✓ | ✓ | ✓ | ✓ | [KDD 2018][Modeling Task Relationships in Multi-task Learning with Multi-gate Mixture-of-Experts](https://dl.acm.org/doi/abs/10.1145/3219819.3220007) | | Multi-Task | [ShareBottom](models/multitask/share-bottom/model.py) | ✓ | ✓ | ✓ | ✓ | [1998][Multitask learning](http://reports-archive.adm.cs.cmu.edu/anon/1997/CMU-CS-97-203.pdf) | @@ -90,7 +91,7 @@ > - Other installation problems can be raised in [Paddle Issue](https://github.com/PaddlePaddle/Paddle/issues) or [PaddleRec Issue](https://github.com/PaddlePaddle/PaddleRec/issues) 2. **Install by source code** - + - Install PaddlePaddle ```shell diff --git a/README_CN.md b/README_CN.md index 3a6a63e700bd205c2ea6b15a0232f6deb8a42bef..3b5fe85808f52a6811951ebf7efe57dd3401ba9e 100644 --- a/README_CN.md +++ b/README_CN.md @@ -65,6 +65,7 @@ | 排序 | [Wide&Deep](models/rank/wide_deep/model.py) | ✓ | x | ✓ | x | [DLRS 2016][Wide & Deep Learning for Recommender Systems](https://dl.acm.org/doi/pdf/10.1145/2988450.2988454) | | 排序 | [FGCNN](models/rank/fgcnn/model.py) | ✓ | ✓ | ✓ | ✓ | [WWW 2019][Feature Generation by Convolutional Neural Network for Click-Through Rate Prediction](https://arxiv.org/pdf/1904.04447.pdf) | | 排序 | [Fibinet](models/rank/fibinet/model.py) | ✓ | ✓ | ✓ | ✓ | [RecSys19][FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction]( https://arxiv.org/pdf/1905.09433.pdf) | + | 排序 | [Flen](models/rank/flen/model.py) | ✓ | ✓ | ✓ | ✓ | [2019][FLEN: Leveraging Field for Scalable CTR Prediction]( https://arxiv.org/pdf/1911.04690.pdf) | | 多任务 | [ESMM](models/multitask/esmm/model.py) | ✓ | ✓ | ✓ | ✓ | [SIGIR 2018][Entire Space Multi-Task Model: An Effective Approach for Estimating Post-Click Conversion Rate](https://arxiv.org/abs/1804.07931) | | 多任务 | [MMOE](models/multitask/mmoe/model.py) | ✓ | ✓ | ✓ | ✓ | [KDD 2018][Modeling Task Relationships in Multi-task Learning with Multi-gate Mixture-of-Experts](https://dl.acm.org/doi/abs/10.1145/3219819.3220007) | | 多任务 | [ShareBottom](models/multitask/share-bottom/model.py) | ✓ | ✓ | ✓ | ✓ | [1998][Multitask learning](http://reports-archive.adm.cs.cmu.edu/anon/1997/CMU-CS-97-203.pdf) | diff --git a/doc/imgs/flen.png b/doc/imgs/flen.png new file mode 100644 index 0000000000000000000000000000000000000000..b8f6cbbe5833237b7a54c60801a142182970fa9b Binary files /dev/null and b/doc/imgs/flen.png differ diff --git a/models/rank/fibinet/config.yaml b/models/rank/fibinet/config.yaml index eed0fbe888302298c66128af755fea37a9eb62bf..091915e6a41ec56824557426553c0d062d26127f 100644 --- a/models/rank/fibinet/config.yaml +++ b/models/rank/fibinet/config.yaml @@ -59,8 +59,8 @@ runner: device: cpu save_checkpoint_interval: 2 # save model interval of epochs save_inference_interval: 4 # save inference - save_checkpoint_path: "increment_model" # save checkpoint path - save_inference_path: "inference" # save inference path + save_checkpoint_path: "increment_model_fibinet" # save checkpoint path + save_inference_path: "inference_fibinet" # save inference path save_inference_feed_varnames: [] # feed vars of save inference save_inference_fetch_varnames: [] # fetch vars of save inference init_model_path: "" # load model path @@ -75,8 +75,8 @@ runner: device: gpu save_checkpoint_interval: 1 # save model interval of epochs save_inference_interval: 4 # save inference - save_checkpoint_path: "increment_model" # save checkpoint path - save_inference_path: "inference" # save inference path + save_checkpoint_path: "increment_model_fibinet" # save checkpoint path + save_inference_path: "inference_fibinet" # save inference path save_inference_feed_varnames: [] # feed vars of save inference save_inference_fetch_varnames: [] # fetch vars of save inference init_model_path: "" # load model path @@ -87,14 +87,14 @@ runner: class: infer # device to run training or infer device: cpu - init_model_path: "increment_model" # load model path + init_model_path: "increment_model_fibinet" # load model path phases: [phase2] - name: single_gpu_infer class: infer # device to run training or infer device: gpu - init_model_path: "increment_model" # load model path + init_model_path: "increment_model_fibinet" # load model path phases: [phase2] # runner will run all the phase in each epoch diff --git a/models/rank/flen/README.md b/models/rank/flen/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9dafeac6958ffb4f51c8f54527976fc4d431bf71 --- /dev/null +++ b/models/rank/flen/README.md @@ -0,0 +1,130 @@ +# FLEN + + 以下是本例的简要目录结构及说明: + +``` +├── data #样例数据 + ├── sample_data + ├── train + ├── sample_train.txt + ├── run.sh + ├── get_slot_data.py +├── __init__.py +├── README.md # 文档 +├── model.py #模型文件 +├── config.yaml #配置文件 +``` + +## 简介 + +[《FLEN: Leveraging Field for Scalable CTR Prediction》](https://arxiv.org/pdf/1911.04690.pdf)文章提出了field-wise bi-interaction pooling技术,解决了在大规模应用特征field信息时存在的时间复杂度和空间复杂度高的困境,同时提出了一种缓解梯度耦合问题的方法dicefactor。该模型已应用于美图的大规模推荐系统中,持续稳定地取得业务效果的全面提升。 + +本项目在avazu数据集上验证模型效果 + +## 数据下载及预处理 + +## 环境 + +PaddlePaddle 1.7.2 + +python3.7 + +PaddleRec + +## 单机训练 + +CPU环境 + +在config.yaml文件中设置好设备,epochs等。 + +``` +# select runner by name +mode: [single_cpu_train, single_cpu_infer] +# config of each runner. +# runner is a kind of paddle training class, which wraps the train/infer process. +runner: +- name: single_cpu_train + class: train + # num of epochs + epochs: 4 + # device to run training or infer + device: cpu + save_checkpoint_interval: 2 # save model interval of epochs + save_inference_interval: 4 # save inference + save_checkpoint_path: "increment_model" # save checkpoint path + save_inference_path: "inference" # save inference path + save_inference_feed_varnames: [] # feed vars of save inference + save_inference_fetch_varnames: [] # fetch vars of save inference + init_model_path: "" # load model path + print_interval: 10 + phases: [phase1] +``` + +## 单机预测 + +CPU环境 + +在config.yaml文件中设置好epochs、device等参数。 + +``` +- name: single_cpu_infer + class: infer + # num of epochs + epochs: 1 + # device to run training or infer + device: cpu #选择预测的设备 + init_model_path: "increment_dnn" # load model path + phases: [phase2] +``` + +## 运行 + +``` +python -m paddlerec.run -m paddlerec.models.rank.flen +``` + +## 模型效果 + +在样例数据上测试模型 + +训练: + +``` +0702 13:38:20.903220 7368 parallel_executor.cc:440] The Program will be executed on CPU using ParallelExecutor, 2 cards are used, so 2 programs are executed in parallel. +I0702 13:38:20.925912 7368 parallel_executor.cc:307] Inplace strategy is enabled, when build_strategy.enable_inplace = True +I0702 13:38:20.933356 7368 parallel_executor.cc:375] Garbage collection strategy is enabled, when FLAGS_eager_delete_tensor_gb = 0 +batch: 2, AUC: [0.09090909 0. ], BATCH_AUC: [0.09090909 0. ] +batch: 4, AUC: [0.31578947 0.29411765], BATCH_AUC: [0.31578947 0.29411765] +batch: 6, AUC: [0.41333333 0.33333333], BATCH_AUC: [0.41333333 0.33333333] +batch: 8, AUC: [0.4453125 0.44166667], BATCH_AUC: [0.4453125 0.44166667] +batch: 10, AUC: [0.39473684 0.38888889], BATCH_AUC: [0.44117647 0.41176471] +batch: 12, AUC: [0.41860465 0.45535714], BATCH_AUC: [0.5078125 0.54545455] +batch: 14, AUC: [0.43413729 0.42746615], BATCH_AUC: [0.56666667 0.56 ] +batch: 16, AUC: [0.46433566 0.47460087], BATCH_AUC: [0.53 0.59247649] +batch: 18, AUC: [0.44009217 0.44642857], BATCH_AUC: [0.46 0.47] +batch: 20, AUC: [0.42705314 0.43781095], BATCH_AUC: [0.45878136 0.4874552 ] +batch: 22, AUC: [0.45176471 0.46011281], BATCH_AUC: [0.48046875 0.45878136] +batch: 24, AUC: [0.48375 0.48910256], BATCH_AUC: [0.56630824 0.59856631] +epoch 0 done, use time: 0.21532440185546875 +PaddleRec Finish +``` + +预测 + +``` +PaddleRec: Runner single_cpu_infer Begin +Executor Mode: infer +processor_register begin +Running SingleInstance. +Running SingleNetwork. +QueueDataset can not support PY3, change to DataLoader +QueueDataset can not support PY3, change to DataLoader +Running SingleInferStartup. +Running SingleInferRunner. +load persistables from increment_model/0 +batch: 20, AUC: [0.49121353], BATCH_AUC: [0.66176471] +batch: 40, AUC: [0.51156463], BATCH_AUC: [0.55197133] +Infer phase2 of 0 done, use time: 0.3941819667816162 +PaddleRec Finish +``` + diff --git a/models/rank/flen/__init__.py b/models/rank/flen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..abf198b97e6e818e1fbe59006f98492640bcee54 --- /dev/null +++ b/models/rank/flen/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/models/rank/flen/config.yaml b/models/rank/flen/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2dad399fd98a2a888fb2d3efbfa40f52f273de2 --- /dev/null +++ b/models/rank/flen/config.yaml @@ -0,0 +1,110 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# workspace +workspace: "paddlerec.models.rank.flen" + +# list of dataset +dataset: +- name: dataloader_train # name of dataset to distinguish different datasets + batch_size: 2 + type: QueueDataset + data_path: "{workspace}/data/sample_data/train" + sparse_slots: "click user_0 user_1 user_2 user_3 user_4 user_5 user_6 user_7 user_8 user_9 user_10 user_11 item_0 item_1 item_2 contex_0 contex_1 contex_2 contex_3 contex_4 contex_5" + dense_slots: "" +- name: dataset_infer # name + batch_size: 2 + type: QueueDataset + data_path: "{workspace}/data/sample_data/train" + sparse_slots: "click user_0 user_1 user_2 user_3 user_4 user_5 user_6 user_7 user_8 user_9 user_10 user_11 item_0 item_1 item_2 contex_0 contex_1 contex_2 contex_3 contex_4 contex_5" + dense_slots: "" + +# hyper parameters of user-defined network +hyper_parameters: + # optimizer config + optimizer: + class: Adam + learning_rate: 0.001 + strategy: async + # user-defined pairs + sparse_inputs_slots: 21 + sparse_feature_number: 100 + sparse_feature_dim: 8 + dense_input_dim: 1 + dropout_rate: 0.5 + +# select runner by name +mode: [single_cpu_train, single_cpu_infer] +# config of each runner. +# runner is a kind of paddle training class, which wraps the train/infer process. +runner: +- name: single_cpu_train + class: train + # num of epochs + epochs: 1 + # device to run training or infer + device: cpu + save_checkpoint_interval: 1 # save model interval of epochs + save_inference_interval: 4 # save inference + save_checkpoint_path: "increment_model_flen" # save checkpoint path + save_inference_path: "inference_flen" # save inference path + save_inference_feed_varnames: [] # feed vars of save inference + save_inference_fetch_varnames: [] # fetch vars of save inference + init_model_path: "" # load model path + print_interval: 2 + phases: [phase1] + +- name: single_gpu_train + class: train + # num of epochs + epochs: 1 + # device to run training or infer + device: gpu + save_checkpoint_interval: 1 # save model interval of epochs + save_inference_interval: 4 # save inference + save_checkpoint_path: "increment_model_flen" # save checkpoint path + save_inference_path: "inference_flen" # save inference path + save_inference_feed_varnames: [] # feed vars of save inference + save_inference_fetch_varnames: [] # fetch vars of save inference + init_model_path: "" # load model path + print_interval: 2 + phases: [phase1] + +- name: single_cpu_infer + class: infer + # device to run training or infer + device: cpu + init_model_path: "increment_model_flen" # load model path + phases: [phase2] + +- name: single_gpu_infer + class: infer + # device to run training or infer + device: gpu + init_model_path: "increment_model_flen" # load model path + phases: [phase2] + +# runner will run all the phase in each epoch +phase: +- name: phase1 + model: "{workspace}/model.py" # user-defined model + dataset_name: dataloader_train # select dataset by name + thread_num: 2 + +- name: phase2 + model: "{workspace}/model.py" # user-defined model + dataset_name: dataset_infer # select dataset by name + thread_num: 2 + + diff --git a/models/rank/flen/data/get_slot_data.py b/models/rank/flen/data/get_slot_data.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb390d05e885f8e9db300d97cc9be46b6ace065 --- /dev/null +++ b/models/rank/flen/data/get_slot_data.py @@ -0,0 +1,51 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid.incubate.data_generator as dg + + +class CriteoDataset(dg.MultiSlotDataGenerator): + """ + DacDataset: inheritance MultiSlotDataGeneratior, Implement data reading + Help document: http://wiki.baidu.com/pages/viewpage.action?pageId=728820675 + """ + + def generate_sample(self, line): + """ + Read the data line by line and process it as a dictionary + """ + + def reader(): + """ + This function needs to be implemented by the user, based on data format + """ + features = line.strip().split(',') + + label = [int(features[0])] + + s = "click:" + str(label[0]) + for i, elem in enumerate(features[1:13]): + s += " user_" + str(i) + ":" + str(elem) + for i, elem in enumerate(features[13:16]): + s += " item_" + str(i) + ":" + str(elem) + for i, elem in enumerate(features[16:]): + s += " contex_" + str(i) + ":" + str(elem) + print(s.strip()) + yield None + + return reader + + +d = CriteoDataset() +d.run_from_stdin() diff --git a/models/rank/flen/data/run.sh b/models/rank/flen/data/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..dafe5df43d069a63b076b8bf006ecdbcc3c56e30 --- /dev/null +++ b/models/rank/flen/data/run.sh @@ -0,0 +1,6 @@ +mkdir train + +for i in `ls ./train_data` +do + cat train_data/$i | python get_slot_data.py > train/$i +done diff --git a/models/rank/flen/data/sample_data/train/sample_train.txt b/models/rank/flen/data/sample_data/train/sample_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..33cf24d3d56bd3dafd9e94312f0f8315a4de72ac --- /dev/null +++ b/models/rank/flen/data/sample_data/train/sample_train.txt @@ -0,0 +1,100 @@ +click:0 user_0:8 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:16 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:1 contex_5:0 +click:0 user_0:6 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:31 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:6 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:43 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:8 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:26 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:0 user_7:11 user_8:1 user_9:34 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:21 contex_2:12 contex_3:0 contex_4:0 contex_5:0 +click:0 user_0:14 user_1:1 user_2:0 user_3:7 user_4:0 user_5:7 user_6:5 user_7:9 user_8:1 user_9:43 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:15 contex_2:14 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:25 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:56 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:13 contex_2:4 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:28 user_1:1 user_2:0 user_3:18 user_4:2 user_5:1 user_6:0 user_7:2 user_8:1 user_9:57 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:1 user_0:9 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:24 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:1 contex_5:0 +click:0 user_0:37 user_1:1 user_2:0 user_3:24 user_4:2 user_5:3 user_6:13 user_7:2 user_8:0 user_9:13 user_10:0 user_11:9 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:11 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:18 user_1:1 user_2:0 user_3:9 user_4:1 user_5:1 user_6:1 user_7:4 user_8:1 user_9:46 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:6 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:27 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:11 user_7:11 user_8:1 user_9:39 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:14 contex_2:2 contex_3:5 contex_4:1 contex_5:0 +click:0 user_0:22 user_1:1 user_2:0 user_3:14 user_4:0 user_5:9 user_6:5 user_7:6 user_8:1 user_9:0 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:15 contex_2:14 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:31 user_1:1 user_2:0 user_3:17 user_4:0 user_5:8 user_6:0 user_7:5 user_8:1 user_9:53 user_10:1 user_11:8 item_0:7 item_1:5 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:2 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:43 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:19 user_1:1 user_2:0 user_3:11 user_4:1 user_5:1 user_6:0 user_7:3 user_8:1 user_9:1 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:16 contex_2:13 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:10 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:58 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:1 contex_5:0 +click:0 user_0:1 user_1:1 user_2:0 user_3:1 user_4:1 user_5:1 user_6:0 user_7:3 user_8:1 user_9:2 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:2 contex_2:5 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:32 user_1:1 user_2:0 user_3:19 user_4:2 user_5:2 user_6:8 user_7:7 user_8:1 user_9:55 user_10:1 user_11:8 item_0:5 item_1:5 item_2:4 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:24 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:10 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:6 contex_2:16 contex_3:2 contex_4:0 contex_5:0 +click:0 user_0:8 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:3 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:26 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:25 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:6 contex_2:16 contex_3:2 contex_4:0 contex_5:0 +click:1 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:37 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:8 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:6 user_7:8 user_8:1 user_9:63 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:35 user_1:1 user_2:0 user_3:22 user_4:2 user_5:0 user_6:0 user_7:9 user_8:2 user_9:51 user_10:2 user_11:10 item_0:18 item_1:3 item_2:1 contex_0:1 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:31 user_1:1 user_2:0 user_3:17 user_4:0 user_5:8 user_6:16 user_7:5 user_8:1 user_9:29 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:1 user_0:33 user_1:1 user_2:0 user_3:21 user_4:2 user_5:6 user_6:9 user_7:7 user_8:1 user_9:69 user_10:1 user_11:6 item_0:4 item_1:0 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:26 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:65 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:6 contex_2:16 contex_3:2 contex_4:0 contex_5:0 +click:0 user_0:8 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:71 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:10 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:38 user_10:1 user_11:3 item_0:6 item_1:0 item_2:3 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:2 contex_5:0 +click:0 user_0:13 user_1:1 user_2:0 user_3:5 user_4:2 user_5:0 user_6:0 user_7:0 user_8:1 user_9:67 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:20 contex_2:19 contex_3:1 contex_4:0 contex_5:0 +click:1 user_0:10 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:6 user_7:8 user_8:1 user_9:20 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:2 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:65 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:25 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:65 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:6 contex_2:16 contex_3:2 contex_4:0 contex_5:0 +click:0 user_0:20 user_1:1 user_2:0 user_3:12 user_4:2 user_5:5 user_6:14 user_7:7 user_8:1 user_9:64 user_10:1 user_11:2 item_0:13 item_1:0 item_2:3 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:2 contex_5:0 +click:0 user_0:27 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:0 user_7:11 user_8:1 user_9:8 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:0 contex_2:1 contex_3:4 contex_4:0 contex_5:0 +click:1 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:3 user_7:11 user_8:1 user_9:9 user_10:1 user_11:8 item_0:0 item_1:1 item_2:5 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:0 user_7:11 user_8:1 user_9:15 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:0 contex_2:1 contex_3:4 contex_4:0 contex_5:0 +click:1 user_0:8 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:12 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:6 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:17 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:23 user_1:1 user_2:0 user_3:15 user_4:0 user_5:9 user_6:4 user_7:6 user_8:1 user_9:27 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:10 contex_2:17 contex_3:6 contex_4:0 contex_5:0 +click:0 user_0:10 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:5 user_10:1 user_11:1 item_0:14 item_1:0 item_2:3 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:35 user_1:1 user_2:0 user_3:22 user_4:2 user_5:0 user_6:0 user_7:9 user_8:2 user_9:66 user_10:2 user_11:5 item_0:11 item_1:3 item_2:1 contex_0:1 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:1 user_0:34 user_1:1 user_2:0 user_3:20 user_4:2 user_5:4 user_6:17 user_7:10 user_8:0 user_9:62 user_10:0 user_11:4 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:11 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:11 user_1:1 user_2:0 user_3:3 user_4:2 user_5:3 user_6:4 user_7:2 user_8:1 user_9:33 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:3 contex_2:18 contex_3:5 contex_4:0 contex_5:0 +click:1 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:54 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:15 user_1:1 user_2:0 user_3:8 user_4:1 user_5:1 user_6:0 user_7:1 user_8:1 user_9:4 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:14 user_1:1 user_2:0 user_3:7 user_4:0 user_5:7 user_6:0 user_7:9 user_8:1 user_9:30 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:8 contex_2:3 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:24 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:7 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:6 contex_2:16 contex_3:2 contex_4:0 contex_5:0 +click:1 user_0:26 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:41 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:5 contex_2:10 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:7 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:11 user_10:1 user_11:8 item_0:2 item_1:0 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:1 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:10 user_7:11 user_8:1 user_9:58 user_10:1 user_11:8 item_0:9 item_1:0 item_2:2 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:1 contex_5:0 +click:1 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:52 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:5 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:12 user_1:1 user_2:0 user_3:4 user_4:2 user_5:1 user_6:12 user_7:2 user_8:1 user_9:68 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:8 contex_2:3 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:30 user_1:1 user_2:0 user_3:18 user_4:2 user_5:1 user_6:0 user_7:2 user_8:1 user_9:61 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:6 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:5 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:0 user_7:11 user_8:1 user_9:49 user_10:1 user_11:8 item_0:1 item_1:0 item_2:5 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:0 user_7:11 user_8:1 user_9:32 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:0 contex_2:1 contex_3:4 contex_4:0 contex_5:0 +click:1 user_0:27 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:3 user_7:11 user_8:1 user_9:28 user_10:1 user_11:8 item_0:0 item_1:1 item_2:5 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:2 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:65 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:16 user_1:1 user_2:0 user_3:6 user_4:2 user_5:1 user_6:15 user_7:2 user_8:1 user_9:21 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:9 contex_2:6 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:7 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:33 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:25 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:7 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:6 contex_2:16 contex_3:2 contex_4:0 contex_5:0 +click:1 user_0:10 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:23 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:18 user_1:1 user_2:0 user_3:9 user_4:1 user_5:1 user_6:2 user_7:4 user_8:1 user_9:67 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:33 user_1:1 user_2:0 user_3:21 user_4:2 user_5:6 user_6:9 user_7:7 user_8:1 user_9:47 user_10:1 user_11:8 item_0:17 item_1:4 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:1 user_0:2 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:6 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:1 contex_2:7 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:9 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:71 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:19 user_1:1 user_2:0 user_3:11 user_4:1 user_5:1 user_6:0 user_7:3 user_8:1 user_9:22 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:17 contex_2:8 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:24 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:65 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:6 contex_2:16 contex_3:2 contex_4:0 contex_5:0 +click:0 user_0:7 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:65 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:9 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:6 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:4 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:19 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:17 user_1:0 user_2:1 user_3:10 user_4:1 user_5:1 user_6:0 user_7:4 user_8:1 user_9:36 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:7 contex_2:9 contex_3:2 contex_4:0 contex_5:0 +click:0 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:0 user_7:11 user_8:1 user_9:44 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:19 contex_2:0 contex_3:2 contex_4:0 contex_5:0 +click:1 user_0:25 user_1:1 user_2:0 user_3:16 user_4:0 user_5:1 user_6:0 user_7:11 user_8:1 user_9:57 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:21 user_1:1 user_2:0 user_3:13 user_4:0 user_5:0 user_6:0 user_7:11 user_8:1 user_9:50 user_10:1 user_11:7 item_0:3 item_1:0 item_2:4 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:1 contex_5:0 +click:0 user_0:0 user_1:1 user_2:0 user_3:0 user_4:1 user_5:1 user_6:11 user_7:3 user_8:1 user_9:42 user_10:1 user_11:8 item_0:15 item_1:2 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:5 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:6 user_7:8 user_8:1 user_9:52 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:38 user_1:1 user_2:0 user_3:23 user_4:1 user_5:3 user_6:0 user_7:2 user_8:0 user_9:45 user_10:0 user_11:0 item_0:10 item_1:3 item_2:0 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:0 user_1:1 user_2:0 user_3:0 user_4:1 user_5:1 user_6:11 user_7:3 user_8:1 user_9:33 user_10:1 user_11:8 item_0:15 item_1:2 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:4 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:6 user_7:8 user_8:1 user_9:71 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:2 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:31 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:2 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:6 user_7:8 user_8:1 user_9:35 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:1 user_0:29 user_1:1 user_2:0 user_3:18 user_4:2 user_5:1 user_6:0 user_7:2 user_8:1 user_9:6 user_10:1 user_11:8 item_0:8 item_1:0 item_2:5 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:1 user_0:36 user_1:1 user_2:0 user_3:24 user_4:2 user_5:3 user_6:13 user_7:2 user_8:0 user_9:13 user_10:0 user_11:9 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:11 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:6 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:70 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:4 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:6 user_7:8 user_8:1 user_9:14 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:5 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:59 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:7 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:6 user_7:8 user_8:1 user_9:63 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:1 user_0:0 user_1:1 user_2:0 user_3:0 user_4:1 user_5:1 user_6:11 user_7:3 user_8:1 user_9:40 user_10:1 user_11:8 item_0:15 item_1:2 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:33 user_1:1 user_2:0 user_3:21 user_4:2 user_5:6 user_6:9 user_7:7 user_8:1 user_9:48 user_10:1 user_11:8 item_0:12 item_1:5 item_2:1 contex_0:0 contex_1:12 contex_2:15 contex_3:3 contex_4:0 contex_5:0 +click:0 user_0:15 user_1:1 user_2:0 user_3:8 user_4:1 user_5:1 user_6:0 user_7:1 user_8:1 user_9:60 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:1 contex_1:18 contex_2:11 contex_3:5 contex_4:0 contex_5:0 +click:0 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:52 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:1 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:7 user_7:8 user_8:1 user_9:6 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:1 user_0:10 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:26 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:8 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:3 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 +click:0 user_0:3 user_1:1 user_2:0 user_3:2 user_4:0 user_5:0 user_6:0 user_7:8 user_8:1 user_9:18 user_10:1 user_11:8 item_0:16 item_1:3 item_2:0 contex_0:0 contex_1:4 contex_2:20 contex_3:1 contex_4:0 contex_5:0 diff --git a/models/rank/flen/model.py b/models/rank/flen/model.py new file mode 100644 index 0000000000000000000000000000000000000000..5a9a26a1386899d094fdca9ce72fc59709e1dcee --- /dev/null +++ b/models/rank/flen/model.py @@ -0,0 +1,184 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import itertools +from paddlerec.core.utils import envs +from paddlerec.core.model import ModelBase + + +class Model(ModelBase): + def __init__(self, config): + ModelBase.__init__(self, config) + + def _init_hyper_parameters(self): + self.is_distributed = True if envs.get_fleet_mode().upper( + ) == "PSLIB" else False + self.sparse_feature_number = envs.get_global_env( + "hyper_parameters.sparse_feature_number") + self.sparse_feature_dim = envs.get_global_env( + "hyper_parameters.sparse_feature_dim") + self.learning_rate = envs.get_global_env( + "hyper_parameters.optimizer.learning_rate") + + def _FieldWiseBiInteraction(self, inputs): + # MF module + field_wise_embeds_list = inputs + + field_wise_vectors = [ + fluid.layers.reduce_sum( + field_i_vectors, dim=1, keep_dim=True) + for field_i_vectors in field_wise_embeds_list + ] + num_fields = len(field_wise_vectors) + + h_mf_list = [] + for emb_left, emb_right in itertools.combinations(field_wise_vectors, + 2): + embeddings_prod = fluid.layers.elementwise_mul(emb_left, emb_right) + + field_weighted_embedding = fluid.layers.fc( + input=embeddings_prod, + size=self.sparse_feature_dim, + param_attr=fluid.initializer.ConstantInitializer(value=1), + name='kernel_mf') + h_mf_list.append(field_weighted_embedding) + h_mf = fluid.layers.concat(h_mf_list, axis=1) + h_mf = fluid.layers.reshape( + x=h_mf, shape=[-1, num_fields, self.sparse_feature_dim]) + h_mf = fluid.layers.reduce_sum(h_mf, dim=1) + + square_of_sum_list = [ + fluid.layers.square( + fluid.layers.reduce_sum( + field_i_vectors, dim=1, keep_dim=True)) + for field_i_vectors in field_wise_embeds_list + ] + + sum_of_square_list = [ + fluid.layers.reduce_sum( + fluid.layers.elementwise_mul(field_i_vectors, field_i_vectors), + dim=1, + keep_dim=True) for field_i_vectors in field_wise_embeds_list + ] + + field_fm_list = [] + for square_of_sum, sum_of_square in zip(square_of_sum_list, + sum_of_square_list): + field_fm = fluid.layers.reshape( + fluid.layers.elementwise_sub(square_of_sum, sum_of_square), + shape=[-1, self.sparse_feature_dim]) + field_fm = fluid.layers.fc( + input=field_fm, + size=self.sparse_feature_dim, + param_attr=fluid.initializer.ConstantInitializer(value=0.5), + name='kernel_fm') + field_fm_list.append(field_fm) + + h_fm = fluid.layers.concat(field_fm_list, axis=1) + h_fm = fluid.layers.reshape( + x=h_fm, shape=[-1, num_fields, self.sparse_feature_dim]) + h_fm = fluid.layers.reduce_sum(h_fm, dim=1) + + return fluid.layers.elementwise_add(h_mf, h_fm) + + def _DNNLayer(self, inputs, dropout_rate=0.2): + deep_input = inputs + for i, hidden_unit in enumerate([64, 32]): + fc_out = fluid.layers.fc( + input=deep_input, + size=hidden_unit, + param_attr=fluid.initializer.Xavier(uniform=False), + act='relu', + name='d_' + str(i)) + fc_out = fluid.layers.dropout(fc_out, dropout_prob=dropout_rate) + deep_input = fc_out + + return deep_input + + def _embeddingLayer(self, inputs): + emb_list = [] + in_len = len(inputs) + for data in inputs: + feat_emb = fluid.embedding( + input=data, + size=[self.sparse_feature_number, self.sparse_feature_dim], + param_attr=fluid.ParamAttr( + name='item_emb', + learning_rate=5, + initializer=fluid.initializer.Xavier( + fan_in=self.sparse_feature_dim, + fan_out=self.sparse_feature_dim)), + is_sparse=True) + emb_list.append(feat_emb) + concat_emb = fluid.layers.concat(emb_list, axis=1) + field_emb = fluid.layers.reshape( + x=concat_emb, shape=[-1, in_len, self.sparse_feature_dim]) + + return field_emb + + def net(self, input, is_infer=False): + self.user_inputs = self._sparse_data_var[1:13] + self.item_inputs = self._sparse_data_var[13:16] + self.contex_inputs = self._sparse_data_var[16:] + self.label_input = self._sparse_data_var[0] + + dropout_rate = envs.get_global_env("hyper_parameters.dropout_rate") + + field_wise_embeds_list = [] + for inputs in [self.user_inputs, self.item_inputs, self.contex_inputs]: + field_emb = self._embeddingLayer(inputs) + field_wise_embeds_list.append(field_emb) + + dnn_input = fluid.layers.concat( + [ + fluid.layers.flatten( + x=field_i_vectors, axis=1) + for field_i_vectors in field_wise_embeds_list + ], + axis=1) + + #mlp part + dnn_output = self._DNNLayer(dnn_input, dropout_rate) + + #field-weighted embedding + fm_mf_out = self._FieldWiseBiInteraction(field_wise_embeds_list) + logits = fluid.layers.concat([fm_mf_out, dnn_output], axis=1) + + y_pred = fluid.layers.fc( + input=logits, + size=1, + param_attr=fluid.initializer.Xavier(uniform=False), + act='sigmoid', + name='logit') + + self.predict = y_pred + auc, batch_auc, _ = fluid.layers.auc(input=self.predict, + label=self.label_input, + num_thresholds=2**12, + slide_steps=20) + + if is_infer: + self._infer_results["AUC"] = auc + self._infer_results["BATCH_AUC"] = batch_auc + return + + self._metrics["AUC"] = auc + self._metrics["BATCH_AUC"] = batch_auc + cost = fluid.layers.log_loss( + input=self.predict, + label=fluid.layers.cast( + x=self.label_input, dtype='float32')) + avg_cost = fluid.layers.reduce_mean(cost) + self._cost = avg_cost diff --git a/models/rank/readme.md b/models/rank/readme.md index 2d2ba2522d303624ae6ba81d799367fe99d8486a..da242481badbc4cb92a4b75dc277d3981f12f1fc 100644 --- a/models/rank/readme.md +++ b/models/rank/readme.md @@ -37,9 +37,9 @@ | xDeepFM | xDeepFM | [xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems](https://dl.acm.org/doi/pdf/10.1145/3219819.3220023)(2018) | | DIN | Deep Interest Network | [Deep Interest Network for Click-Through Rate Prediction](https://dl.acm.org/doi/pdf/10.1145/3219819.3219823)(2018) | | DIEN | Deep Interest Evolution Network | [Deep Interest Evolution Network for Click-Through Rate Prediction](https://www.aaai.org/ojs/index.php/AAAI/article/view/4545/4423)(2019) | -| AutoInt | Automatic Feature Interaction Learning via Self-Attentive Neural Networks | [AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks](https://arxiv.org/pdf/1810.11921.pdf)(2019) | | FGCNN | Feature Generation by CNN | [Feature Generation by Convolutional Neural Network for Click-Through Rate Prediction](https://arxiv.org/pdf/1904.04447.pdf)(2019) | | FIBINET | Combining Feature Importance and Bilinear feature Interaction | [《FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction》]( https://arxiv.org/pdf/1905.09433.pdf)(2019) | +| FLEN | Leveraging Field for Scalable CTR Prediction | [《FLEN: Leveraging Field for Scalable CTR Prediction》]( https://arxiv.org/pdf/1911.04690.pdf)(2019) | 下面是每个模型的简介(注:图片引用自链接中的论文) @@ -73,6 +73,11 @@

+[FLEN](https://arxiv.org/pdf/1911.04690.pdf): + +

+ +

## 使用教程(快速开始) @@ -88,6 +93,7 @@ | Wide&Deep | 40 | 1 | 40 | | xDeepFM | 100 | 1 | 10 | | Fibinet | 1000 | 8 | 4 | +| Flen | 512 | 8 | 1 | ### 数据处理 参考每个模型目录数据下载&预处理脚本 @@ -128,6 +134,7 @@ python -m paddlerec.run -m ./config.yaml # 以DNN为例 | Census-income Data | Wide&Deep | 0.76195 | 0.90577 | -- | -- | | Amazon Product | DIN | 0.47005 | 0.86379 | -- | -- | | Criteo | Fibinet | -- | 0.86662 | -- | -- | +| Avazu | Flen | -- | -- | -- | -- | ## 分布式