From 6e509429cf4d64ed9d8d65fe0b9d31f15cf0ce51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E8=82=96?= Date: Sun, 16 Feb 2020 19:21:14 +0800 Subject: [PATCH] add similarity_net dygraph (#4289) * Update README.md (#4267) * test=develop (#4269) * 3d use new api (#4275) * PointNet++ and PointRCNN use new API * Update Readme of Dygraph BERT (#4277) Fix some typos. * Update run_classifier_multi_gpu.sh (#4279) remove the CUDA_VISIBLE_DEVICES * Update README.md (#4280) * add similarity_net dygraph Co-authored-by: pkpk Co-authored-by: Kaipeng Deng --- dygraph/similarity_net/.run_ce.sh | 50 + dygraph/similarity_net/README.md | 194 +++ dygraph/similarity_net/__init__.py | 0 dygraph/similarity_net/_ce.py | 61 + dygraph/similarity_net/config.py | 57 + .../similarity_net/config/bow_pairwise.json | 23 + .../similarity_net/config/bow_pointwise.json | 21 + .../similarity_net/config/cnn_pairwise.json | 24 + .../similarity_net/config/cnn_pointwise.json | 23 + .../similarity_net/config/gru_pairwise.json | 23 + .../similarity_net/config/gru_pointwise.json | 22 + .../similarity_net/config/lstm_pairwise.json | 23 + .../similarity_net/config/lstm_pointwise.json | 22 + .../config/mmdnn_pointwise.json | 32 + dygraph/similarity_net/download.py | 149 +++ dygraph/similarity_net/download_data.sh | 5 + .../download_pretrained_model.sh | 10 + .../similarity_net/evaluate/evaluate_ecom.sh | 27 + .../similarity_net/evaluate/evaluate_qqsim.sh | 27 + .../evaluate/evaluate_unicom.sh | 30 + .../evaluate/evaluate_zhidao.sh | 27 + .../evaluate/unicom_compute_pos_neg.py | 60 + .../similarity_net/evaluate/unicom_split.py | 27 + dygraph/similarity_net/mmdnn.py | 296 +++++ dygraph/similarity_net/model_check.py | 73 ++ dygraph/similarity_net/nets/__init__.py | 0 dygraph/similarity_net/nets/base_layers.py | 66 + dygraph/similarity_net/nets/bow.py | 75 ++ dygraph/similarity_net/nets/cnn.py | 80 ++ dygraph/similarity_net/nets/copy.py | 762 ++++++++++++ dygraph/similarity_net/nets/gru.py | 86 ++ .../similarity_net/nets/losses/__init__.py | 0 .../similarity_net/nets/losses/hinge_loss.py | 50 + .../similarity_net/nets/losses/log_loss.py | 41 + .../nets/losses/softmax_cross_entropy_loss.py | 42 + dygraph/similarity_net/nets/lstm.py | 82 ++ dygraph/similarity_net/nets/mm_dnn.py | 169 +++ .../similarity_net/nets/paddle_layers.1.py | 457 +++++++ dygraph/similarity_net/nets/paddle_layers.py | 1083 +++++++++++++++++ dygraph/similarity_net/reader.py | 262 ++++ dygraph/similarity_net/run.sh | 100 ++ dygraph/similarity_net/run_classifier.py | 446 +++++++ dygraph/similarity_net/struct.jpg | Bin 0 -> 38115 bytes dygraph/similarity_net/utils.py | 364 ++++++ 44 files changed, 5471 insertions(+) create mode 100644 dygraph/similarity_net/.run_ce.sh create mode 100644 dygraph/similarity_net/README.md create mode 100644 dygraph/similarity_net/__init__.py create mode 100644 dygraph/similarity_net/_ce.py create mode 100644 dygraph/similarity_net/config.py create mode 100644 dygraph/similarity_net/config/bow_pairwise.json create mode 100644 dygraph/similarity_net/config/bow_pointwise.json create mode 100644 dygraph/similarity_net/config/cnn_pairwise.json create mode 100644 dygraph/similarity_net/config/cnn_pointwise.json create mode 100644 dygraph/similarity_net/config/gru_pairwise.json create mode 100644 dygraph/similarity_net/config/gru_pointwise.json create mode 100644 dygraph/similarity_net/config/lstm_pairwise.json create mode 100644 dygraph/similarity_net/config/lstm_pointwise.json create mode 100644 dygraph/similarity_net/config/mmdnn_pointwise.json create mode 100644 dygraph/similarity_net/download.py create mode 100644 dygraph/similarity_net/download_data.sh create mode 100644 dygraph/similarity_net/download_pretrained_model.sh create mode 100644 dygraph/similarity_net/evaluate/evaluate_ecom.sh create mode 100644 dygraph/similarity_net/evaluate/evaluate_qqsim.sh create mode 100644 dygraph/similarity_net/evaluate/evaluate_unicom.sh create mode 100644 dygraph/similarity_net/evaluate/evaluate_zhidao.sh create mode 100644 dygraph/similarity_net/evaluate/unicom_compute_pos_neg.py create mode 100644 dygraph/similarity_net/evaluate/unicom_split.py create mode 100644 dygraph/similarity_net/mmdnn.py create mode 100644 dygraph/similarity_net/model_check.py create mode 100644 dygraph/similarity_net/nets/__init__.py create mode 100644 dygraph/similarity_net/nets/base_layers.py create mode 100644 dygraph/similarity_net/nets/bow.py create mode 100644 dygraph/similarity_net/nets/cnn.py create mode 100644 dygraph/similarity_net/nets/copy.py create mode 100644 dygraph/similarity_net/nets/gru.py create mode 100644 dygraph/similarity_net/nets/losses/__init__.py create mode 100644 dygraph/similarity_net/nets/losses/hinge_loss.py create mode 100644 dygraph/similarity_net/nets/losses/log_loss.py create mode 100644 dygraph/similarity_net/nets/losses/softmax_cross_entropy_loss.py create mode 100644 dygraph/similarity_net/nets/lstm.py create mode 100644 dygraph/similarity_net/nets/mm_dnn.py create mode 100644 dygraph/similarity_net/nets/paddle_layers.1.py create mode 100644 dygraph/similarity_net/nets/paddle_layers.py create mode 100644 dygraph/similarity_net/reader.py create mode 100644 dygraph/similarity_net/run.sh create mode 100644 dygraph/similarity_net/run_classifier.py create mode 100644 dygraph/similarity_net/struct.jpg create mode 100644 dygraph/similarity_net/utils.py diff --git a/dygraph/similarity_net/.run_ce.sh b/dygraph/similarity_net/.run_ce.sh new file mode 100644 index 00000000..38ce6a50 --- /dev/null +++ b/dygraph/similarity_net/.run_ce.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +export FLAGS_enable_parallel_graph=1 +export FLAGS_sync_nccl_allreduce=1 +export FLAGS_fraction_of_gpu_memory_to_use=0.95 +TASK_NAME='simnet' +TRAIN_DATA_PATH=./data/train_pointwise_data +VALID_DATA_PATH=./data/test_pointwise_data +TEST_DATA_PATH=./data/test_pointwise_data +INFER_DATA_PATH=./data/infer_data +VOCAB_PATH=./data/term2id.dict +CKPT_PATH=./model_files +TEST_RESULT_PATH=./test_result +INFER_RESULT_PATH=./infer_result +TASK_MODE='pointwise' +CONFIG_PATH=./config/bow_pointwise.json +INIT_CHECKPOINT=./model_files/simnet_bow_pointwise_pretrained_model/ + + +# run_train +train() { + python run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda True \ + --do_train True \ + --do_valid True \ + --do_test True \ + --do_infer False \ + --batch_size 128 \ + --train_data_dir ${TRAIN_DATA_PATH} \ + --valid_data_dir ${VALID_DATA_PATH} \ + --test_data_dir ${TEST_DATA_PATH} \ + --infer_data_dir ${INFER_DATA_PATH} \ + --output_dir ${CKPT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --epoch 3 \ + --save_steps 1000 \ + --validation_steps 100 \ + --compute_accuracy False \ + --lamda 0.958 \ + --task_mode ${TASK_MODE} \ + --enable_ce +} + + +export CUDA_VISIBLE_DEVICES=0 +train | python _ce.py +sleep 20 +export CUDA_VISIBLE_DEVICES=0,1,2,3 +train | python _ce.py diff --git a/dygraph/similarity_net/README.md b/dygraph/similarity_net/README.md new file mode 100644 index 00000000..b9c147b5 --- /dev/null +++ b/dygraph/similarity_net/README.md @@ -0,0 +1,194 @@ +# 短文本语义匹配 +## 简介 +### 任务说明 +短文本语义匹配(SimilarityNet, SimNet)是一个计算短文本相似度的框架,可以根据用户输入的两个文本,计算出相似度得分。SimNet框架在百度各产品上广泛应用,主要包括BOW、CNN、RNN、MMDNN等核心网络结构形式,提供语义相似度计算训练和预测框架,适用于信息检索、新闻推荐、智能客服等多个应用场景,帮助企业解决语义匹配问题。可通过[AI开放平台-短文本相似度](https://ai.baidu.com/tech/nlp_basic/simnet)线上体验。 + +同时推荐用户参考[ IPython Notebook demo](https://aistudio.baidu.com/aistudio/projectDetail/124373) + +### 效果说明 +基于百度海量搜索数据,我们训练了一个SimNet-BOW-Pairwise语义匹配模型,在一些真实的FAQ问答场景中,该模型效果比基于字面的相似度方法AUC提升5%以上,我们基于百度自建测试集(包含聊天、客服等数据集)和语义匹配数据集(LCQMC)进行评测,效果如下表所示。LCQMC数据集以Accuracy为评测指标,而pairwise模型的输出为相似度,因此我们采用0.958作为分类阈值,相比于基线模型中网络结构同等复杂的CBOW模型(准确率为0.737),我们模型的准确率为0.7532。 + + +| 模型 | 百度知道 | ECOM |QQSIM | UNICOM | +|:-----------:|:-------------:|:-------------:|:-------------:|:-------------:| +| | AUC | AUC | AUC|正逆序比| +|BOW_Pairwise|0.6767|0.7329|0.7650|1.5630| +#### 测试集说明 +| 数据集 | 来源 | 垂类 | +|:-----------:|:-------------:|:-------------:| +|百度知道 | 百度知道问题 | 日常 | +|ECOM|商业问句|金融| +|QQSIM|闲聊对话|日常| +|UNICOM|联通客服|客服| +## 快速开始 +#### 版本依赖 +本项目依赖于 Paddlepaddle Fluid 1.6,请参考[安装指南](http://www.paddlepaddle.org/#quick-start)进行安装。 + +python版本依赖python 2.7 +#### 安装代码 +克隆工具集代码库到本地 +```shell +git clone https://github.com/PaddlePaddle/models.git +cd models/PaddleNLP/similarity_net +``` +#### 数据准备 +下载经过预处理的数据,运行命令后,data目录下会存在训练集数据示例、集数据示例、测试集数据示例,以及对应词索引字典(term2id.dict)。 +```shell +sh download_data.sh +``` +或者 +``` +python download.py dataset +``` +#### 模型准备 +我们开源了基于大规模数据训练好的```pairwise```模型(基于bow模型训练),用户可以通过运行命令下载预训练好的模型,该模型将保存在```./model_files/simnet_bow_pairwise_pretrained_model/```下。 +```shell +sh download_pretrained_model.sh +``` +或者 +``` +python download.py model +``` + +#### 评估 +我们公开了自建的测试集,包括百度知道、ECOM、QQSIM、UNICOM四个数据集,基于上面的预训练模型,用户可以进入evaluate目录下依次执行下列命令获取测试集评估结果。 +```shell +sh evaluate_ecom.sh +sh evaluate_qqsim.sh +sh evaluate_zhidao.sh +sh evaluate_unicom.sh +``` +用户也可以指定./run.sh中的TEST_DATA_PATH的值,通过下列命令评估自己指定的测试集。 +```shell +sh run.sh eval +``` + +#### 推测 +基于上面的预训练模型,可以运行下面的命令进行推测,并保存推测结果到本地。 +```shell +sh run.sh infer +``` +#### 训练与验证 +用户可以基于示例数据构建训练集和开发集,可以运行下面的命令,进行模型训练和开发集验证。 +```shell +sh run.sh train +``` +用户也可以指定./run.sh中train()函数里的INIT_CHECKPOINT的值,载入训练好的模型进行热启动训练。 +## 进阶使用 + +### 任务定义与建模 + +传统的文本匹配技术如信息检索中的向量空间模型 VSM、BM25 等算法,主要解决词汇层面的相似度问题,这种方法的效果在实际应用中受到语言的多义词和语言结构等问题影响。SimNet 在语义表示上沿袭了隐式连续向量表示的方式,但对语义匹配问题在深度学习框架下进行了 End-to-End 的建模,将```point-wise```与 ```pair-wise```两种有监督学习方式全部统一在一个整体框架内。在实际应用场景下,将海量的用户点击行为数据转化为大规模的弱标记数据,在网页搜索任务上的初次使用即展现出极大威力,带来了相关性的明显提升。 + +### 模型原理介绍 + +SimNet如下图所示: + +

+
+

+ +### 数据格式说明 + +训练模式一共分为```pairwise```和```pointwise```两种模式。 + +#### pairwise模式: +训练集格式如下: query \t pos_query \t neg_query。 +query、pos_query和neg_query是以空格分词的中文文本,中间使用制表符'\t'隔开,pos_query表示与query相似的正例,neg_query表示与query不相似的随机负例,文本编码为utf-8。
+``` +现在 安卓模拟器 哪个 好 用 电脑 安卓模拟器 哪个 更好 电信 手机 可以 用 腾讯 大王 卡 吗 ? +土豆 一亩地 能 收 多少 斤 一亩 地土豆 产 多少 斤 一亩 地 用 多少 斤 土豆 种子 +``` + +开发集和测试集格式:query1 \t query2 \t label。
+ +query1和query2表示以空格分词的中文文本,label为0或1,1表示query1与query2相似,0表示query1与query2不相似,query1、query2和label中间以制表符'\t'隔开,文本编码为utf-8。
+``` +现在 安卓模拟器 哪个 好 用 电脑 安卓模拟器 哪个 更好 1 +为什么 头发 掉 得 很厉害 我 头发 为什么 掉 得 厉害 1 +常喝 薏米 水 有 副 作用 吗 女生 可以 长期 喝 薏米 水养生 么 0 +长 的 清新 是 什么 意思 小 清新 的 意思 是 什么 0 +``` + +#### pointwise模式: + +训练集、开发集和测试集数据格式相同:query1和query2表示以空格分词的中文文本,label为0或1,1表示query1与query2相似,0表示query1与query2不相似,query1、query2和label中间以制表符'\t'隔开,文本编码为utf-8。 +``` +现在 安卓模拟器 哪个 好 用 电脑 安卓模拟器 哪个 更好 1 +为什么 头发 掉 得 很厉害 我 头发 为什么 掉 得 厉害 1 +常喝 薏米 水 有 副 作用 吗 女生 可以 长期 喝 薏米 水养生 么 0 +长 的 清新 是 什么 意思 小 清新 的 意思 是 什么 0 +``` + +#### infer数据集: + +```pairwise```和```pointwise```的infer数据集格式相同:query1 \t query2。
+ +query1和query2为以空格分词的中文文本。 +``` +怎么 调理 湿热 体质 ? 湿热 体质 怎样 调理 啊 +搞笑 电影 美国 搞笑 的 美国 电影 +``` + +__注__:本项目额外提供了分词预处理脚本(在preprocess目录下),可供用户使用,具体使用方法如下: + +```shell +python tokenizer.py --test_data_dir ./test.txt.utf8 --batch_size 1 > test.txt.utf8.seg +``` +其中test.txt.utf8为待分词的文件,一条文本数据一行,utf8编码,分词结果存放在test.txt.utf8.seg文件中 + +### 代码结构说明 +```text +. +├── run_classifier.py:该项目的主函数,封装包括训练、预测、评估的部分 +├── config.py:定义该项目模型的配置类,读取具体模型类别、以及模型的超参数等 +├── reader.py:定义了读入数据的相关函数 +├── utils.py:定义了其他常用的功能函数 +├── Config: 定义多种模型的配置文件 +├── download.py: 下载数据及预训练模型脚本 +``` + +### 如何训练 +```shell +python run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda false \ #是否使用GPU + --do_train True \ #是否训练 + --do_valid True \ #是否在训练中测试开发集 + --do_test True \ #是否验证测试集 + --do_infer False \ #是否预测 + --batch_size 128 \ #batch_size的值 + --train_data_dir ${TRAIN_DATA_kPATH} \ #训练集的路径 + --valid_data_dir ${VALID_DATA_PATH} \ #开发集的路径 + --test_data_dir ${TEST_DATA_PATH} \ #测试集的路径 + --infer_data_dir ${INFER_DATA_PATH} \ #待推测数据的路径 + --output_dir ${CKPT_PATH} \ #模型存放的路径 + --config_path ${CONFIG_PATH} \ #配置文件路径 + --vocab_path ${VOCAB_PATH} \ #字典路径 + --epoch 10 \ #epoch值 + --save_steps 1000 \ #每save_steps保存一次模型 + --validation_steps 100 \ #每validation_steps验证一次开发集结果 + --task_mode ${TASK_MODE} #训练模式,pairwise或pointwise,与相应的配置文件匹配。 + --compute_accuracy False \ #是否计算accuracy + --lamda 0.91 \ #pairwise模式计算accuracy时的阈值 + --init_checkpoint "" #预加载模型路径 +``` +### 如何组建自己的模型 +用户可以根据自己的需求,组建自定义的模型,具体方法如下所示: + +i. 定义自己的网络结构 + +用户可以在```./models/```下定义自己的模型; + +ii. 更改模型配置 + +用户仿照```config```中的文件生成自定义模型的配置文件。 + +用户需要保留配置文件中的```net```、```loss```、```optimizer```、```task_mode```和```model_path```字段。```net```为用户自定义的模型参数,```task_mode```表示训练模式,为```pairwise```或```pointwise```,要与训练命令中的```--task_mode```命令保持一致,```model_path```为模型保存路径,```loss```和```optimizer```依据自定义模型的需要仿照```config```下的其他文件填写。 + + +iii.模型训练,运行训练、评估、预测脚本即可(具体方法同上)。 + +## 其他 +### 如何贡献代码 +如果你可以修复某个issue或者增加一个新功能,欢迎给我们提交PR。如果对应的PR被接受了,我们将根据贡献的质量和难度进行打分(0-5分,越高越好)。如果你累计获得了10分,可以联系我们获得面试机会或者为你写推荐信。 diff --git a/dygraph/similarity_net/__init__.py b/dygraph/similarity_net/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dygraph/similarity_net/_ce.py b/dygraph/similarity_net/_ce.py new file mode 100644 index 00000000..32106ef7 --- /dev/null +++ b/dygraph/similarity_net/_ce.py @@ -0,0 +1,61 @@ +# this file is only used for continuous evaluation test! + +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi +from kpi import DurationKpi +from kpi import AccKpi + +each_step_duration_simnet_card1 = DurationKpi('each_step_duration_simnet_card1', 0.03, 0, actived=True) +train_loss_simnet_card1 = CostKpi('train_loss_simnet_card1', 0.01, 0, actived=True) +each_step_duration_simnet_card4 = DurationKpi('each_step_duration_simnet_card4', 0.02, 0, actived=True) +train_loss_simnet_card4 = CostKpi('train_loss_simnet_card4', 0.01, 0, actived=True) + +tracking_kpis = [ + each_step_duration_simnet_card1, + train_loss_simnet_card1, + each_step_duration_simnet_card4, + train_loss_simnet_card4, +] + + +def parse_log(log): + ''' + This method should be implemented by model developers. + + The suggestion: + + each line in the log should be key, value, for example: + + " + train_cost\t1.0 + test_cost\t1.0 + train_cost\t1.0 + train_cost\t1.0 + train_acc\t1.2 + " + ''' + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/dygraph/similarity_net/config.py b/dygraph/similarity_net/config.py new file mode 100644 index 00000000..12508d0e --- /dev/null +++ b/dygraph/similarity_net/config.py @@ -0,0 +1,57 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +SimNet config +""" + +import six +import json +import io + +class SimNetConfig(object): + """ + simnet Config + """ + + def __init__(self, args): + self.task_mode = args.task_mode + self.config_path = args.config_path + self._config_dict = self._parse(args.config_path) + + def _parse(self, config_path): + try: + with io.open(config_path) as json_file: + config_dict = json.load(json_file) + except Exception: + raise IOError("Error in parsing simnet model config file '%s'" % config_path) + + else: + if config_dict["task_mode"] != self.task_mode: + raise ValueError( + "the config '{}' does not match the task_mode '{}'".format(self.config_path, self.task_mode)) + return config_dict + + def __getitem__(self, key): + return self._config_dict[key] + + def __setitem__(self, key, value): + self._config_dict[key] = value + + def print_config(self): + """ + Print Config + """ + for arg, value in sorted(six.iteritems(self._config_dict)): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') diff --git a/dygraph/similarity_net/config/bow_pairwise.json b/dygraph/similarity_net/config/bow_pairwise.json new file mode 100644 index 00000000..6c6fc9a6 --- /dev/null +++ b/dygraph/similarity_net/config/bow_pairwise.json @@ -0,0 +1,23 @@ +{ + "net": { + "module_name": "bow", + "class_name": "BOW", + "emb_dim": 128, + "bow_dim": 128, + "hidden_dim": 128 + }, + "loss": { + "module_name": "hinge_loss", + "class_name": "HingeLoss", + "margin": 0.1 + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pairwise", + "model_path": "bow_pairwise" +} diff --git a/dygraph/similarity_net/config/bow_pointwise.json b/dygraph/similarity_net/config/bow_pointwise.json new file mode 100644 index 00000000..ba5a5f21 --- /dev/null +++ b/dygraph/similarity_net/config/bow_pointwise.json @@ -0,0 +1,21 @@ +{ + "net": { + "module_name": "bow", + "class_name": "BOW", + "emb_dim": 128, + "bow_dim": 128 + }, + "loss": { + "module_name": "softmax_cross_entropy_loss", + "class_name": "SoftmaxCrossEntropyLoss" + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pointwise", + "model_path": "bow_pointwise" +} diff --git a/dygraph/similarity_net/config/cnn_pairwise.json b/dygraph/similarity_net/config/cnn_pairwise.json new file mode 100644 index 00000000..074f9d03 --- /dev/null +++ b/dygraph/similarity_net/config/cnn_pairwise.json @@ -0,0 +1,24 @@ +{ + "net": { + "module_name": "cnn", + "class_name": "CNN", + "emb_dim": 128, + "filter_size": 3, + "num_filters": 256, + "hidden_dim": 128 + }, + "loss": { + "module_name": "hinge_loss", + "class_name": "HingeLoss", + "margin": 0.1 + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.2, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pairwise", + "model_path": "cnn_pairwise" +} diff --git a/dygraph/similarity_net/config/cnn_pointwise.json b/dygraph/similarity_net/config/cnn_pointwise.json new file mode 100644 index 00000000..572577a0 --- /dev/null +++ b/dygraph/similarity_net/config/cnn_pointwise.json @@ -0,0 +1,23 @@ +{ + "net": { + "module_name": "cnn", + "class_name": "CNN", + "emb_dim": 128, + "filter_size": 3, + "num_filters": 256, + "hidden_dim": 128 + }, + "loss": { + "module_name": "softmax_cross_entropy_loss", + "class_name": "SoftmaxCrossEntropyLoss" + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pointwise", + "model_path": "cnn_pointwise" +} diff --git a/dygraph/similarity_net/config/gru_pairwise.json b/dygraph/similarity_net/config/gru_pairwise.json new file mode 100644 index 00000000..5977a337 --- /dev/null +++ b/dygraph/similarity_net/config/gru_pairwise.json @@ -0,0 +1,23 @@ +{ + "net": { + "module_name": "gru", + "class_name": "GRU", + "emb_dim": 128, + "gru_dim": 128, + "hidden_dim": 128 + }, + "loss": { + "module_name": "hinge_loss", + "class_name": "HingeLoss", + "margin": 0.1 + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pairwise", + "model_path": "gru_pairwise" +} diff --git a/dygraph/similarity_net/config/gru_pointwise.json b/dygraph/similarity_net/config/gru_pointwise.json new file mode 100644 index 00000000..1163382a --- /dev/null +++ b/dygraph/similarity_net/config/gru_pointwise.json @@ -0,0 +1,22 @@ +{ + "net": { + "module_name": "gru", + "class_name": "GRU", + "emb_dim": 128, + "gru_dim": 128, + "hidden_dim": 128 + }, + "loss": { + "module_name": "softmax_cross_entropy_loss", + "class_name": "SoftmaxCrossEntropyLoss" + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate" : 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pointwise", + "model_path": "gru_pointwise" +} diff --git a/dygraph/similarity_net/config/lstm_pairwise.json b/dygraph/similarity_net/config/lstm_pairwise.json new file mode 100644 index 00000000..c78e41c1 --- /dev/null +++ b/dygraph/similarity_net/config/lstm_pairwise.json @@ -0,0 +1,23 @@ +{ + "net": { + "module_name": "lstm", + "class_name": "LSTM", + "emb_dim": 128, + "lstm_dim": 128, + "hidden_dim": 128 + }, + "loss": { + "module_name": "hinge_loss", + "class_name": "HingeLoss", + "margin": 0.1 + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pairwise", + "model_path": "lstm_pairwise" +} diff --git a/dygraph/similarity_net/config/lstm_pointwise.json b/dygraph/similarity_net/config/lstm_pointwise.json new file mode 100644 index 00000000..ceb930eb --- /dev/null +++ b/dygraph/similarity_net/config/lstm_pointwise.json @@ -0,0 +1,22 @@ +{ + "net": { + "module_name": "lstm", + "class_name": "LSTM", + "emb_dim": 128, + "lstm_dim": 128, + "hidden_dim": 128 + }, + "loss": { + "module_name": "softmax_cross_entropy_loss", + "class_name": "SoftmaxCrossEntropyLoss" + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "task_mode": "pointwise", + "model_path": "lstm_pointwise" +} diff --git a/dygraph/similarity_net/config/mmdnn_pointwise.json b/dygraph/similarity_net/config/mmdnn_pointwise.json new file mode 100644 index 00000000..0239f808 --- /dev/null +++ b/dygraph/similarity_net/config/mmdnn_pointwise.json @@ -0,0 +1,32 @@ +{ + "net": { + "module_name": "mm_dnn", + "class_name": "MMDNN", + "embedding_dim": 128, + "num_filters": 256, + "lstm_dim": 128, + "hidden_size": 128, + "window_size_left": 3, + "window_size_right": 3, + "dpool_size_left": 2, + "dpool_size_right": 2 + }, + "loss": { + "module_name": "softmax_cross_entropy_loss", + "class_name": "SoftmaxCrossEntropyLoss" + }, + "optimizer": { + "class_name": "AdamOptimizer", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-08 + }, + "max_len_left": 32, + "max_len_right": 32, + "n_class": 2, + "task_mode": "pointwise", + "match_mask" : 1, + "model_path": "mm_dnn_pointwise" + +} diff --git a/dygraph/similarity_net/download.py b/dygraph/similarity_net/download.py new file mode 100644 index 00000000..52b53def --- /dev/null +++ b/dygraph/similarity_net/download.py @@ -0,0 +1,149 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Download script, download dataset and pretrain models. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import io +import os +import sys +import time +import hashlib +import tarfile +import requests + + +def usage(): + desc = ("\nDownload datasets and pretrained models for SimilarityNet task.\n" + "Usage:\n" + " 1. python download.py dataset\n" + " 2. python download.py model\n") + print(desc) + + +def md5file(fname): + hash_md5 = hashlib.md5() + with io.open(fname, "rb") as fin: + for chunk in iter(lambda: fin.read(4096), b""): + hash_md5.update(chunk) + return hash_md5.hexdigest() + + +def extract(fname, dir_path): + """ + Extract tar.gz file + """ + try: + tar = tarfile.open(fname, "r:gz") + file_names = tar.getnames() + for file_name in file_names: + tar.extract(file_name, dir_path) + print(file_name) + tar.close() + except Exception as e: + raise e + + +def download(url, filename, md5sum): + """ + Download file and check md5 + """ + retry = 0 + retry_limit = 3 + chunk_size = 4096 + while not (os.path.exists(filename) and md5file(filename) == md5sum): + if retry < retry_limit: + retry += 1 + else: + raise RuntimeError("Cannot download dataset ({0}) with retry {1} times.". + format(url, retry_limit)) + try: + start = time.time() + size = 0 + res = requests.get(url, stream=True) + filesize = int(res.headers['content-length']) + if res.status_code == 200: + print("[Filesize]: %0.2f MB" % (filesize / 1024 / 1024)) + # save by chunk + with io.open(filename, "wb") as fout: + for chunk in res.iter_content(chunk_size=chunk_size): + if chunk: + fout.write(chunk) + size += len(chunk) + pr = '>' * int(size * 50 / filesize) + print('\r[Process ]: %s%.2f%%' % (pr, float(size / filesize*100)), end='') + end = time.time() + print("\n[CostTime]: %.2f s" % (end - start)) + except Exception as e: + print(e) + + +def download_dataset(dir_path): + BASE_URL = "https://baidu-nlp.bj.bcebos.com/" + DATASET_NAME = "simnet_dataset-1.0.0.tar.gz" + DATASET_MD5 = "ec65b313bc237150ef536a8d26f3c73b" + file_path = os.path.join(dir_path, DATASET_NAME) + url = BASE_URL + DATASET_NAME + + if not os.path.exists(dir_path): + os.makedirs(dir_path) + # download dataset + print("Downloading dataset: %s" % url) + download(url, file_path, DATASET_MD5) + # extract dataset + print("Extracting dataset: %s" % file_path) + extract(file_path, dir_path) + os.remove(file_path) + + +def download_model(dir_path): + MODELS = {} + BASE_URL = "https://baidu-nlp.bj.bcebos.com/" + CNN_NAME = "simnet_bow-pairwise-1.0.0.tar.gz" + CNN_MD5 = "199a3f3af31558edcc71c3b54ea5e129" + MODELS[CNN_NAME] = CNN_MD5 + + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + for model in MODELS: + url = BASE_URL + model + model_path = os.path.join(dir_path, model) + print("Downloading model: %s" % url) + # download model + download(url, model_path, MODELS[model]) + # extract model.tar.gz + print("Extracting model: %s" % model_path) + extract(model_path, dir_path) + os.remove(model_path) + + +if __name__ == '__main__': + if len(sys.argv) != 2: + usage() + sys.exit(1) + + if sys.argv[1] == "dataset": + pwd = os.path.join(os.path.dirname(__file__), './') + download_dataset(pwd) + elif sys.argv[1] == "model": + pwd = os.path.join(os.path.dirname(__file__), './model_files') + download_model(pwd) + else: + usage() diff --git a/dygraph/similarity_net/download_data.sh b/dygraph/similarity_net/download_data.sh new file mode 100644 index 00000000..ea1aaf9c --- /dev/null +++ b/dygraph/similarity_net/download_data.sh @@ -0,0 +1,5 @@ +#get data +wget --no-check-certificate https://baidu-nlp.bj.bcebos.com/simnet_dataset-1.0.0.tar.gz +tar xzf simnet_dataset-1.0.0.tar.gz +rm simnet_dataset-1.0.0.tar.gz + diff --git a/dygraph/similarity_net/download_pretrained_model.sh b/dygraph/similarity_net/download_pretrained_model.sh new file mode 100644 index 00000000..287e8dcc --- /dev/null +++ b/dygraph/similarity_net/download_pretrained_model.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +model_files_path="./model_files" + +#get pretrained_bow_pairwise_model +wget --no-check-certificate https://baidu-nlp.bj.bcebos.com/simnet_bow-pairwise-1.0.0.tar.gz +if [ ! -d $model_files_path ]; then + mkdir $model_files_path +fi +tar xzf simnet_bow-pairwise-1.0.0.tar.gz -C $model_files_path +rm simnet_bow-pairwise-1.0.0.tar.gz \ No newline at end of file diff --git a/dygraph/similarity_net/evaluate/evaluate_ecom.sh b/dygraph/similarity_net/evaluate/evaluate_ecom.sh new file mode 100644 index 00000000..4a00efab --- /dev/null +++ b/dygraph/similarity_net/evaluate/evaluate_ecom.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +export FLAGS_enable_parallel_graph=1 +export FLAGS_sync_nccl_allreduce=1 +export CUDA_VISIBLE_DEVICES=3 +export FLAGS_fraction_of_gpu_memory_to_use=0.95 +TASK_NAME='simnet' +TEST_DATA_PATH=./data/ecom +VOCAB_PATH=./data/term2id.dict +CKPT_PATH=./model_files +TEST_RESULT_PATH=./evaluate/ecom_test_result +TASK_MODE='pairwise' +CONFIG_PATH=./config/bow_pairwise.json +INIT_CHECKPOINT=./model_files/simnet_bow_pairwise_pretrained_model/ +cd .. + +python ./run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda false \ + --do_test True \ + --verbose_result True \ + --batch_size 128 \ + --test_data_dir ${TEST_DATA_PATH} \ + --test_result_path ${TEST_RESULT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --task_mode ${TASK_MODE} \ + --init_checkpoint ${INIT_CHECKPOINT} diff --git a/dygraph/similarity_net/evaluate/evaluate_qqsim.sh b/dygraph/similarity_net/evaluate/evaluate_qqsim.sh new file mode 100644 index 00000000..fa8bdcc0 --- /dev/null +++ b/dygraph/similarity_net/evaluate/evaluate_qqsim.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +export FLAGS_enable_parallel_graph=1 +export FLAGS_sync_nccl_allreduce=1 +export CUDA_VISIBLE_DEVICES=3 +export FLAGS_fraction_of_gpu_memory_to_use=0.95 +TASK_NAME='simnet' +TEST_DATA_PATH=./data/qqsim +VOCAB_PATH=./data/term2id.dict +CKPT_PATH=./model_files +TEST_RESULT_PATH=./evaluate/qqsim_test_result +TASK_MODE='pairwise' +CONFIG_PATH=./config/bow_pairwise.json +INIT_CHECKPOINT=./model_files/simnet_bow_pairwise_pretrained_model/ +cd .. + +python ./run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda false \ + --do_test True \ + --verbose_result True \ + --batch_size 128 \ + --test_data_dir ${TEST_DATA_PATH} \ + --test_result_path ${TEST_RESULT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --task_mode ${TASK_MODE} \ + --init_checkpoint ${INIT_CHECKPOINT} diff --git a/dygraph/similarity_net/evaluate/evaluate_unicom.sh b/dygraph/similarity_net/evaluate/evaluate_unicom.sh new file mode 100644 index 00000000..a93aaa4b --- /dev/null +++ b/dygraph/similarity_net/evaluate/evaluate_unicom.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +export FLAGS_enable_parallel_graph=1 +export FLAGS_sync_nccl_allreduce=1 +export CUDA_VISIBLE_DEVICES=3 +export FLAGS_fraction_of_gpu_memory_to_use=0.95 +TASK_NAME='simnet' +INFER_DATA_PATH=./evaluate/unicom_infer +VOCAB_PATH=./data/term2id.dict +CKPT_PATH=./model_files +INFER_RESULT_PATH=./evaluate/unicom_infer_result +TASK_MODE='pairwise' +CONFIG_PATH=./config/bow_pairwise.json +INIT_CHECKPOINT=./model_files/simnet_bow_pairwise_pretrained_model/ + +python unicom_split.py +cd .. +python ./run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda false \ + --do_infer True \ + --batch_size 128 \ + --infer_data_dir ${INFER_DATA_PATH} \ + --infer_result_path ${INFER_RESULT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --task_mode ${TASK_MODE} \ + --init_checkpoint ${INIT_CHECKPOINT} +cd evaluate +python unicom_compute_pos_neg.py + diff --git a/dygraph/similarity_net/evaluate/evaluate_zhidao.sh b/dygraph/similarity_net/evaluate/evaluate_zhidao.sh new file mode 100644 index 00000000..9e634610 --- /dev/null +++ b/dygraph/similarity_net/evaluate/evaluate_zhidao.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +export FLAGS_enable_parallel_graph=1 +export FLAGS_sync_nccl_allreduce=1 +export CUDA_VISIBLE_DEVICES=3 +export FLAGS_fraction_of_gpu_memory_to_use=0.95 +TASK_NAME='simnet' +TEST_DATA_PATH=./data/zhidao +VOCAB_PATH=./data/term2id.dict +CKPT_PATH=./model_files +TEST_RESULT_PATH=./evaluate/zhidao_test_result +TASK_MODE='pairwise' +CONFIG_PATH=./config/bow_pairwise.json +INIT_CHECKPOINT=./model_files/simnet_bow_pairwise_pretrained_model/ +cd .. + +python ./run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda false \ + --do_test True \ + --verbose_result True \ + --batch_size 128 \ + --test_data_dir ${TEST_DATA_PATH} \ + --test_result_path ${TEST_RESULT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --task_mode ${TASK_MODE} \ + --init_checkpoint ${INIT_CHECKPOINT} diff --git a/dygraph/similarity_net/evaluate/unicom_compute_pos_neg.py b/dygraph/similarity_net/evaluate/unicom_compute_pos_neg.py new file mode 100644 index 00000000..fd6d87fb --- /dev/null +++ b/dygraph/similarity_net/evaluate/unicom_compute_pos_neg.py @@ -0,0 +1,60 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +comput unicom +""" + +import io + + +infer_results = [] +labels = [] +result = [] +temp_reuslt = [] +temp_query = "" +pos_num = 0.0 +neg_num = 0.0 + +with io.open("./unicom_infer_result", "r", encoding="utf8") as infer_result_file: + for line in infer_result_file: + infer_results.append(line.strip().split("\t")) + +with io.open("./unicom_label", "r", encoding="utf8") as label_file: + for line in label_file: + labels.append(line.strip().split("\t")) + +for infer_result, label in zip(infer_results, labels): + if infer_result[0] != temp_query and temp_query != "": + result.append(temp_reuslt) + temp_query = infer_result[0] + temp_reuslt = [] + temp_reuslt.append(infer_result + label) + else: + if temp_query == '': + temp_query = infer_result[0] + temp_reuslt.append(infer_result + label) +else: + result.append(temp_reuslt) + +for _result in result: + for n, i in enumerate(_result, start=1): + for j in _result[n:]: + if (int(j[-1]) > int(i[-1]) and float(j[-2]) < float(i[-2])) or ( + int(j[-1]) < int(i[-1]) and float(j[-2]) > float(i[-2])): + neg_num += 1 + elif (int(j[-1]) > int(i[-1]) and float(j[-2]) > float(i[-2])) or ( + int(j[-1]) < int(i[-1]) and float(j[-2]) < float(i[-2])): + pos_num += 1 + +print("pos/neg of unicom data is %f" % (pos_num / neg_num)) diff --git a/dygraph/similarity_net/evaluate/unicom_split.py b/dygraph/similarity_net/evaluate/unicom_split.py new file mode 100644 index 00000000..885ebcfd --- /dev/null +++ b/dygraph/similarity_net/evaluate/unicom_split.py @@ -0,0 +1,27 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +split unicom file +""" + +import io + + +with io.open("../data/unicom", "r", encoding="utf8") as unicom_file: + with io.open("./unicom_infer", "w", encoding="utf8") as infer_file: + with io.open("./unicom_label", "w", encoding="utf8") as label_file: + for line in unicom_file: + line = line.strip().split('\t') + infer_file.write("\t".join(line[:2]) + '\n') + label_file.write(line[2] + '\n') diff --git a/dygraph/similarity_net/mmdnn.py b/dygraph/similarity_net/mmdnn.py new file mode 100644 index 00000000..149cdae5 --- /dev/null +++ b/dygraph/similarity_net/mmdnn.py @@ -0,0 +1,296 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +MMDNN class +""" +import numpy as np +import paddle.fluid as fluid +import logging +from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard +from paddle.fluid.dygraph.nn import Conv2D + +import paddle_layers as pd_layers + +from paddle.fluid import layers +from paddle.fluid.dygraph import Layer + +class BasicLSTMUnit(Layer): + """ + **** + BasicLSTMUnit class, Using basic operator to build LSTM + The algorithm can be described as the code below. + .. math:: + i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i) + f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias ) + o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o) + \\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c) + c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t} + h_t &= o_t \odot tanh(c_t) + - $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix + of weights from the input gate to the input) + - The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector). + - sigmoid is the logistic sigmoid function. + - $i, f, o$ and $c$ are the input gate, forget gate, output gate, + and cell activation vectors, respectively, all of which have the same size as + the cell output activation vector $h$. + - The :math:`\odot` is the element-wise product of the vectors. + - :math:`tanh` is the activation functions. + - :math:`\\tilde{c_t}` is also called candidate hidden state, + which is computed based on the current input and the previous hidden state. + Args: + name_scope(string) : The name scope used to identify parameter and bias name + hidden_size (integer): The hidden size used in the Unit. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight matrix. Note: + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|None): The parameter attribute for the bias + of LSTM unit. + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized as zero. Default: None. + gate_activation (function|None): The activation function for gates (actGate). + Default: 'fluid.layers.sigmoid' + activation (function|None): The activation function for cells (actNode). + Default: 'fluid.layers.tanh' + forget_bias(float|1.0): forget bias used when computing forget gate + dtype(string): data type used in this unit + """ + + def __init__(self, + hidden_size, + input_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype='float32'): + super(BasicLSTMUnit, self).__init__(dtype) + + self._hiden_size = hidden_size + self._param_attr = param_attr + self._bias_attr = bias_attr + self._gate_activation = gate_activation or layers.sigmoid + self._activation = activation or layers.tanh + self._forget_bias = layers.fill_constant( + [1], dtype=dtype, value=forget_bias) + self._forget_bias.stop_gradient = False + self._dtype = dtype + self._input_size = input_size + + self._weight = self.create_parameter( + attr=self._param_attr, + shape=[self._input_size + self._hiden_size, 4 * self._hiden_size], + dtype=self._dtype) + + self._bias = self.create_parameter( + attr=self._bias_attr, + shape=[4 * self._hiden_size], + dtype=self._dtype, + is_bias=True) + + def forward(self, input, pre_hidden, pre_cell): + concat_input_hidden = layers.concat([input, pre_hidden], 1) + gate_input = layers.matmul(x=concat_input_hidden, y=self._weight) + + gate_input = layers.elementwise_add(gate_input, self._bias) + i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) + new_cell = layers.elementwise_add( + layers.elementwise_mul( + pre_cell, + layers.sigmoid(layers.elementwise_add(f, self._forget_bias))), + layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j))) + new_hidden = layers.tanh(new_cell) * layers.sigmoid(o) + + return new_hidden, new_cell + + +class MMDNN(object): + """ + MMDNN + """ + + def __init__(self, config): + """ + initialize + """ + self.vocab_size = int(config['dict_size']) + self.emb_size = int(config['net']['embedding_dim']) + self.lstm_dim = int(config['net']['lstm_dim']) + self.kernel_size = int(config['net']['num_filters']) + self.win_size1 = int(config['net']['window_size_left']) + self.win_size2 = int(config['net']['window_size_right']) + self.dpool_size1 = int(config['net']['dpool_size_left']) + self.dpool_size2 = int(config['net']['dpool_size_right']) + self.hidden_size = int(config['net']['hidden_size']) + self.seq_len1 = int(config['max_len_left']) + self.seq_len2 = int(config['max_len_right']) + self.task_mode = config['task_mode'] + + if int(config['match_mask']) != 0: + self.match_mask = True + else: + self.match_mask = False + + if self.task_mode == "pointwise": + self.n_class = int(config['n_class']) + self.out_size = self.n_class + elif self.task_mode == "pairwise": + self.out_size = 1 + else: + logging.error("training mode not supported") + + def embedding_layer(self, input, zero_pad=True, scale=True): + """ + embedding layer + """ + emb = Embedding( + size=[self.vocab_size, self.emb_size], + padding_idx=(0 if zero_pad else None), + param_attr=fluid.ParamAttr( + name="word_embedding", initializer=fluid.initializer.Xavier())) + emb = emb(input) + if scale: + emb = emb * (self.emb_size**0.5) + return emb + + def bi_dynamic_lstm(self, input, hidden_size): + """ + bi_lstm layer + """ + fw_in_proj = Linear( + input_dim=self.emb_size, + output_dim=4 * hidden_size, + param_attr=fluid.ParamAttr(name="fw_fc.w"), + bias_attr=False) + fw_in_proj = fw_in_proj(input) + + forward = pd_layers.DynamicLSTMLayer( + size=4 * hidden_size, + is_reverse=False, + param_attr=fluid.ParamAttr(name="forward_lstm.w"), + bias_attr=fluid.ParamAttr(name="forward_lstm.b")).ops() + + forward = forward(fw_in_proj) + + rv_in_proj = Linear( + input_dim=self.emb_size, + output_dim=4 * hidden_size, + param_attr=fluid.ParamAttr(name="rv_fc.w"), + bias_attr=False) + rv_in_proj = rv_in_proj(input) + + reverse = pd_layers.DynamicLSTMLayer( + 4 * hidden_size, + 'lstm' + is_reverse=True, + param_attr=fluid.ParamAttr(name="reverse_lstm.w"), + bias_attr=fluid.ParamAttr(name="reverse_lstm.b")).ops() + reverse = reverse(rv_in_proj) + + return [forward, reverse] + + def conv_pool_relu_layer(self, input, mask=None): + """ + convolution and pool layer + """ + # data format NCHW + emb_expanded = fluid.layers.unsqueeze(input=input, axes=[1]) + # same padding + + conv = Conv2d( + num_filters=self.kernel_size, + stride=1, + padding=(int(self.seq_len1 / 2), int(self.seq_len2 // 2)), + filter_size=(self.seq_len1, self.seq_len2), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(0.1))) + conv = conv(emb_expanded) + + if mask is not None: + cross_mask = fluid.layers.stack(x=[mask] * self.kernel_size, axis=1) + conv = cross_mask * conv + (1 - cross_mask) * (-2**32 + 1) + # valid padding + pool = fluid.layers.pool2d( + input=conv, + pool_size=[ + int(self.seq_len1 / self.dpool_size1), + int(self.seq_len2 / self.dpool_size2) + ], + pool_stride=[ + int(self.seq_len1 / self.dpool_size1), + int(self.seq_len2 / self.dpool_size2) + ], + pool_type="max", ) + + relu = fluid.layers.relu(pool) + return relu + + def get_cross_mask(self, left_lens, right_lens): + """ + cross mask + """ + mask1 = fluid.layers.sequence_mask( + x=left_lens, dtype='float32', maxlen=self.seq_len1 + 1) + mask2 = fluid.layers.sequence_mask( + x=right_lens, dtype='float32', maxlen=self.seq_len2 + 1) + + mask1 = fluid.layers.transpose(x=mask1, perm=[0, 2, 1]) + cross_mask = fluid.layers.matmul(x=mask1, y=mask2) + return cross_mask + + def predict(self, left, right): + """ + Forward network + """ + left_emb = self.embedding_layer(left, zero_pad=True, scale=False) + right_emb = self.embedding_layer(right, zero_pad=True, scale=False) + + bi_left_outputs = self.bi_dynamic_lstm( + input=left_emb, hidden_size=self.lstm_dim) + left_seq_encoder = fluid.layers.concat(input=bi_left_outputs, axis=1) + bi_right_outputs = self.bi_dynamic_lstm( + input=right_emb, hidden_size=self.lstm_dim) + right_seq_encoder = fluid.layers.concat(input=bi_right_outputs, axis=1) + + pad_value = fluid.layers.assign(input=np.array([0]).astype("float32")) + left_seq_encoder, left_lens = fluid.layers.sequence_pad( + x=left_seq_encoder, pad_value=pad_value, maxlen=self.seq_len1) + right_seq_encoder, right_lens = fluid.layers.sequence_pad( + x=right_seq_encoder, pad_value=pad_value, maxlen=self.seq_len2) + + cross = fluid.layers.matmul( + left_seq_encoder, right_seq_encoder, transpose_y=True) + if self.match_mask: + cross_mask = self.get_cross_mask(left_lens, right_lens) + else: + cross_mask = None + + conv_pool_relu = self.conv_pool_relu_layer(input=cross, mask=cross_mask) + relu_hid1 = Linear( + input_dim=conv_pool_relu.shape[-1], + output_dim=self.hidden_size) + relu_hid1 = relu_hid1(conv_pool_relu) + relu_hid1 = fluid.layers.tanh(relu_hid1) + + relu_hid1 = Linear( + input_dim=relu_hid1.shape[-1], + output_dim=self.out_size) + pred = relu_hid1(pred) + + pred = fluid.layers.softmax(pred) + + return left_seq_encoder, pred diff --git a/dygraph/similarity_net/model_check.py b/dygraph/similarity_net/model_check.py new file mode 100644 index 00000000..51713452 --- /dev/null +++ b/dygraph/similarity_net/model_check.py @@ -0,0 +1,73 @@ +#encoding=utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import paddle +import paddle.fluid as fluid + + +def check_cuda(use_cuda, err = \ + "\nYou can not set use_cuda = True in the model because you are using paddlepaddle-cpu.\n \ + Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_cuda = False to run models on CPU.\n" + ): + """ + Log error and exit when set use_gpu=true in paddlepaddle + cpu version. + """ + try: + if use_cuda == True and fluid.is_compiled_with_cuda() == False: + print(err) + sys.exit(1) + except Exception as e: + pass + +def check_version(): + """ + Log error and exit when the installed version of paddlepaddle is + not satisfied. + """ + err = "PaddlePaddle version 1.6 or higher is required, " \ + "or a suitable develop version is satisfied as well. \n" \ + "Please make sure the version is good with your code." \ + + try: + fluid.require_version('1.6.0') + except Exception as e: + print(err) + sys.exit(1) + + +def check_version(): + """ + Log error and exit when the installed version of paddlepaddle is + not satisfied. + """ + err = "PaddlePaddle version 1.6 or higher is required, " \ + "or a suitable develop version is satisfied as well. \n" \ + "Please make sure the version is good with your code." \ + + try: + fluid.require_version('1.6.0') + except Exception as e: + print(err) + sys.exit(1) + + +if __name__ == "__main__": + check_cuda(True) + + check_cuda(False) + + check_cuda(True, "This is only for testing.") diff --git a/dygraph/similarity_net/nets/__init__.py b/dygraph/similarity_net/nets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dygraph/similarity_net/nets/base_layers.py b/dygraph/similarity_net/nets/base_layers.py new file mode 100644 index 00000000..cd055315 --- /dev/null +++ b/dygraph/similarity_net/nets/base_layers.py @@ -0,0 +1,66 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +base layers +""" + +from paddle.fluid import layers +from paddle.fluid.dygraph import Layer +from paddle.fluid.dygraph import GRUUnit +from paddle.fluid.dygraph.base import to_variable + + + + +# import numpy as np +# import logging + + +class DynamicGRU(Layer): + def __init__(self, + size, + param_attr=None, + bias_attr=None, + is_reverse=False, + gate_activation='sigmoid', + candidate_activation='tanh', + h_0=None, + origin_mode=False, + init_size = None): + super(DynamicGRU, self).__init__() + self.gru_unit = GRUUnit( + size * 3, + param_attr=param_attr, + bias_attr=bias_attr, + activation=candidate_activation, + gate_activation=gate_activation, + origin_mode=origin_mode) + self.size = size + self.h_0 = h_0 + self.is_reverse = is_reverse + def forward(self, inputs): + hidden = self.h_0 + res = [] + for i in range(inputs.shape[1]): + if self.is_reverse: + i = inputs.shape[1] - 1 - i + input_ = inputs[ :, i:i+1, :] + input_ = fluid.layers.reshape(input_, [-1, input_.shape[2]], inplace=False) + hidden, reset, gate = self.gru_unit(input_, hidden) + hidden_ = fluid.layers.reshape(hidden, [-1, 1, hidden.shape[1]], inplace=False) + res.append(hidden_) + if self.is_reverse: + res = res[::-1] + res = fluid.layers.concat(res, axis=1) + return res \ No newline at end of file diff --git a/dygraph/similarity_net/nets/bow.py b/dygraph/similarity_net/nets/bow.py new file mode 100644 index 00000000..d2897419 --- /dev/null +++ b/dygraph/similarity_net/nets/bow.py @@ -0,0 +1,75 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +bow class +""" + +import paddle_layers as layers +from paddle import fluid +from paddle.fluid.dygraph.base import to_variable +from paddle.fluid.dygraph import Layer, Embedding +import paddle.fluid.param_attr as attr +uniform_initializer = lambda x: fluid.initializer.UniformInitializer(low=-x, high=x) +class BOW(Layer): + """ + BOW + """ + + def __init__(self, conf_dict): + """ + initialize + """ + super(BOW, self).__init__() + self.dict_size = conf_dict["dict_size"] + self.task_mode = conf_dict["task_mode"] + self.emb_dim = conf_dict["net"]["emb_dim"] + self.bow_dim = conf_dict["net"]["bow_dim"] + self.seq_len = 5 + self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops() + self.bow_layer = layers.FCLayer(self.bow_dim, None, "fc").ops() + self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops() + + def forward(self, left, right): + """ + Forward network + """ + + # embedding layer + + left_emb = self.emb_layer(left) + right_emb = self.emb_layer(right) + left_emb = fluid.layers.reshape( + left_emb, shape=[-1, self.seq_len, self.bow_dim]) + right_emb = fluid.layers.reshape( + right_emb, shape=[-1, self.seq_len, self.bow_dim]) + + bow_left = fluid.layers.reduce_sum(left_emb, dim=1) + bow_right = fluid.layers.reduce_sum(right_emb, dim=1) + softsign_layer = layers.SoftsignLayer() + left_soft = softsign_layer.ops(bow_left) + right_soft = softsign_layer.ops(bow_right) + + # matching layer + if self.task_mode == "pairwise": + left_bow = self.bow_layer(left_soft) + right_bow = self.bow_layer(right_soft) + cos_sim_layer = layers.CosSimLayer() + pred = cos_sim_layer.ops(left_bow, right_bow) + return left_bow, pred + else: + concat_layer = layers.ConcatLayer(1) + concat = concat_layer.ops([left_soft, right_soft]) + concat_fc = self.bow_layer(concat) + pred = self.softmax_layer(concat_fc) + return left_soft, pred diff --git a/dygraph/similarity_net/nets/cnn.py b/dygraph/similarity_net/nets/cnn.py new file mode 100644 index 00000000..8e7951ac --- /dev/null +++ b/dygraph/similarity_net/nets/cnn.py @@ -0,0 +1,80 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +cnn class +""" + +import paddle_layers as layers +from paddle import fluid +from paddle.fluid.dygraph import Layer + +class CNN(Layer): + """ + CNN + """ + + def __init__(self, conf_dict): + """ + initialize + """ + super(CNN, self).__init__() + self.dict_size = conf_dict["dict_size"] + self.task_mode = conf_dict["task_mode"] + self.emb_dim = conf_dict["net"]["emb_dim"] + self.filter_size = conf_dict["net"]["filter_size"] + self.num_filters = conf_dict["net"]["num_filters"] + self.hidden_dim = conf_dict["net"]["hidden_dim"] + self.seq_len = 5 + self.channels = 1 + + # layers + self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops() + self.fc_layer = layers.FCLayer(self.hidden_dim, None, "fc").ops() + self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops() + self.cnn_layer = layers.SimpleConvPool( + self.channels, + self.num_filters, + self.filter_size) + + + def forward(self, left, right): + """ + Forward network + """ + # embedding layer + + left_emb = self.emb_layer(left) + right_emb = self.emb_layer(right) + # Presentation context + + left_emb = fluid.layers.reshape( + left_emb, shape=[-1, self.channels, self.seq_len, self.hidden_dim]) + right_emb = fluid.layers.reshape( + right_emb, shape=[-1, self.channels, self.seq_len, self.hidden_dim]) + + left_cnn = self.cnn_layer(left_emb) + right_cnn = self.cnn_layer(right_emb) + # matching layer + if self.task_mode == "pairwise": + left_fc = self.fc_layer(left_cnn) + right_fc = self.fc_layer(right_cnn) + cos_sim_layer = layers.CosSimLayer() + pred = cos_sim_layer.ops(left_fc, right_fc) + return left_fc, pred + else: + concat_layer = layers.ConcatLayer(1) + concat = concat_layer.ops([left_cnn, right_cnn]) + concat_fc = self.fc_layer(concat) + pred = self.softmax_layer(concat_fc) + return left_cnn, pred diff --git a/dygraph/similarity_net/nets/copy.py b/dygraph/similarity_net/nets/copy.py new file mode 100644 index 00000000..17fe99ca --- /dev/null +++ b/dygraph/similarity_net/nets/copy.py @@ -0,0 +1,762 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.fluid import layers, unique_name +from paddle.fluid.dygraph import Layer +from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper +from paddle.fluid.layers.control_flow import StaticRNN + +__all__ = ['BasicGRUUnit', 'basic_gru', 'BasicLSTMUnit', 'basic_lstm'] + + +class BasicGRUUnit(Layer): + """ + **** + BasicGRUUnit class, using basic operators to build GRU + The algorithm can be described as the equations below. + + .. math:: + u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u) + + r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r) + + m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m) + + h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t) + + Args: + name_scope(string) : The name scope used to identify parameters and biases + hidden_size (integer): The hidden size used in the Unit. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight matrix. Note: + If it is set to None or one attribute of ParamAttr, gru_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|None): The parameter attribute for the bias + of GRU unit. + If it is set to None or one attribute of ParamAttr, gru_unit will + create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + gate_activation (function|None): The activation function for gates (actGate). + Default: 'fluid.layers.sigmoid' + activation (function|None): The activation function for cell (actNode). + Default: 'fluid.layers.tanh' + dtype(string): data type used in this unit + + Examples: + + .. code-block:: python + + import paddle.fluid.layers as layers + from paddle.fluid.contrib.layers import BasicGRUUnit + + input_size = 128 + hidden_size = 256 + input = layers.data( name = "input", shape = [-1, input_size], dtype='float32') + pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32') + + gru_unit = BasicGRUUnit( "gru_unit", hidden_size ) + + new_hidden = gru_unit( input, pre_hidden ) + + """ + + def __init__(self, + name_scope, + hidden_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + dtype='float32'): + super(BasicGRUUnit, self).__init__(name_scope, dtype) + # reserve old school _full_name and _helper for static graph save load + self._full_name = unique_name.generate(name_scope + "/" + + self.__class__.__name__) + self._helper = LayerObjectHelper(self._full_name) + + self._name = name_scope + self._hiden_size = hidden_size + self._param_attr = param_attr + self._bias_attr = bias_attr + self._gate_activation = gate_activation or layers.sigmoid + self._activation = activation or layers.tanh + self._dtype = dtype + + def _build_once(self, input, pre_hidden): + self._input_size = input.shape[-1] + assert (self._input_size > 0) + + self._gate_weight = self.create_parameter( + attr=self._param_attr, + shape=[self._input_size + self._hiden_size, 2 * self._hiden_size], + dtype=self._dtype) + + self._candidate_weight = self.create_parameter( + attr=self._param_attr, + shape=[self._input_size + self._hiden_size, self._hiden_size], + dtype=self._dtype) + + self._gate_bias = self.create_parameter( + attr=self._bias_attr, + shape=[2 * self._hiden_size], + dtype=self._dtype, + is_bias=True) + self._candidate_bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._hiden_size], + dtype=self._dtype, + is_bias=True) + + def forward(self, input, pre_hidden): + concat_input_hidden = layers.concat([input, pre_hidden], 1) + + gate_input = layers.matmul(x=concat_input_hidden, y=self._gate_weight) + + gate_input = layers.elementwise_add(gate_input, self._gate_bias) + + gate_input = self._gate_activation(gate_input) + r, u = layers.split(gate_input, num_or_sections=2, dim=1) + + r_hidden = r * pre_hidden + + candidate = layers.matmul( + layers.concat([input, r_hidden], 1), self._candidate_weight) + candidate = layers.elementwise_add(candidate, self._candidate_bias) + + c = self._activation(candidate) + new_hidden = u * pre_hidden + (1 - u) * c + + return new_hidden + + +def basic_gru(input, + init_hidden, + hidden_size, + num_layers=1, + sequence_length=None, + dropout_prob=0.0, + bidirectional=False, + batch_first=True, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + dtype='float32', + name='basic_gru'): + """ + GRU implementation using basic operator, supports multiple layers and bidirection gru. + + .. math:: + u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u) + + r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r) + + m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m) + + h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t) + + Args: + input (Variable): GRU input tensor, + if batch_first = False, shape should be ( seq_len x batch_size x input_size ) + if batch_first = True, shape should be ( batch_size x seq_len x hidden_size ) + init_hidden(Variable|None): The initial hidden state of the GRU + This is a tensor with shape ( num_layers x batch_size x hidden_size) + if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size) + and can be reshaped to tensor with ( num_layers x 2 x batch_size x hidden_size) to use. + If it's None, it will be set to all 0. + hidden_size (int): Hidden size of the GRU + num_layers (int): The total number of layers of the GRU + sequence_length (Variabe|None): A Tensor (shape [batch_size]) stores each real length of each instance, + This tensor will be convert to a mask to mask the padding ids + If it's None means NO padding ids + dropout_prob(float|0.0): Dropout prob, dropout ONLY works after rnn output of earch layers, + NOT between time steps + bidirectional (bool|False): If it is bidirectional + batch_first (bool|True): The shape format of the input and output tensors. If true, + the shape format should be :attr:`[batch_size, seq_len, hidden_size]`. If false, + the shape format should be :attr:`[seq_len, batch_size, hidden_size]`. By default + this function accepts input and emits output in batch-major form to be consistent + with most of data format, though a bit less efficient because of extra transposes. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight matrix. Note: + If it is set to None or one attribute of ParamAttr, gru_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|None): The parameter attribute for the bias + of GRU unit. + If it is set to None or one attribute of ParamAttr, gru_unit will + create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + gate_activation (function|None): The activation function for gates (actGate). + Default: 'fluid.layers.sigmoid' + activation (function|None): The activation function for cell (actNode). + Default: 'fluid.layers.tanh' + dtype(string): data type used in this unit + name(string): name used to identify parameters and biases + + Returns: + rnn_out(Tensor),last_hidden(Tensor) + - rnn_out is result of GRU hidden, with shape (seq_len x batch_size x hidden_size) \ + if is_bidirec set to True, shape will be ( seq_len x batch_sze x hidden_size*2) + - last_hidden is the hidden state of the last step of GRU \ + shape is ( num_layers x batch_size x hidden_size ) \ + if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size), + can be reshaped to a tensor with shape( num_layers x 2 x batch_size x hidden_size) + + Examples: + .. code-block:: python + + import paddle.fluid.layers as layers + from paddle.fluid.contrib.layers import basic_gru + + batch_size = 20 + input_size = 128 + hidden_size = 256 + num_layers = 2 + dropout = 0.5 + bidirectional = True + batch_first = False + + input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32') + pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32') + sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32') + + + rnn_out, last_hidden = basic_gru( input, pre_hidden, hidden_size, num_layers = num_layers, \ + sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \ + batch_first = batch_first) + + """ + + fw_unit_list = [] + + for i in range(num_layers): + new_name = name + "_layers_" + str(i) + fw_unit_list.append( + BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr, + gate_activation, activation, dtype)) + if bidirectional: + bw_unit_list = [] + + for i in range(num_layers): + new_name = name + "_reverse_layers_" + str(i) + bw_unit_list.append( + BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr, + gate_activation, activation, dtype)) + + if batch_first: + input = layers.transpose(input, [1, 0, 2]) + + mask = None + if sequence_length: + max_seq_len = layers.shape(input)[0] + mask = layers.sequence_mask( + sequence_length, maxlen=max_seq_len, dtype='float32') + mask = layers.transpose(mask, [1, 0]) + + direc_num = 1 + if bidirectional: + direc_num = 2 + if init_hidden: + init_hidden = layers.reshape( + init_hidden, shape=[num_layers, direc_num, -1, hidden_size]) + + def get_single_direction_output(rnn_input, + unit_list, + mask=None, + direc_index=0): + rnn = StaticRNN() + with rnn.step(): + step_input = rnn.step_input(rnn_input) + + if mask: + step_mask = rnn.step_input(mask) + + for i in range(num_layers): + if init_hidden: + pre_hidden = rnn.memory(init=init_hidden[i, direc_index]) + else: + pre_hidden = rnn.memory( + batch_ref=rnn_input, + shape=[-1, hidden_size], + ref_batch_dim_idx=1) + + new_hidden = unit_list[i](step_input, pre_hidden) + + if mask: + new_hidden = layers.elementwise_mul( + new_hidden, step_mask, axis=0) - layers.elementwise_mul( + pre_hidden, (step_mask - 1), axis=0) + rnn.update_memory(pre_hidden, new_hidden) + + rnn.step_output(new_hidden) + + step_input = new_hidden + if dropout_prob != None and dropout_prob > 0.0: + step_input = layers.dropout( + step_input, + dropout_prob=dropout_prob, ) + + rnn.step_output(step_input) + + rnn_out = rnn() + + last_hidden_array = [] + rnn_output = rnn_out[-1] + for i in range(num_layers): + last_hidden = rnn_out[i] + last_hidden = last_hidden[-1] + last_hidden_array.append(last_hidden) + + last_hidden_output = layers.concat(last_hidden_array, axis=0) + last_hidden_output = layers.reshape( + last_hidden_output, shape=[num_layers, -1, hidden_size]) + + return rnn_output, last_hidden_output + # seq_len, batch_size, hidden_size + + fw_rnn_out, fw_last_hidden = get_single_direction_output( + input, fw_unit_list, mask, direc_index=0) + + if bidirectional: + bw_input = layers.reverse(input, axis=[0]) + bw_mask = None + if mask: + bw_mask = layers.reverse(mask, axis=[0]) + bw_rnn_out, bw_last_hidden = get_single_direction_output( + bw_input, bw_unit_list, bw_mask, direc_index=1) + + bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0]) + + rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2) + last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1) + + last_hidden = layers.reshape( + last_hidden, shape=[num_layers * direc_num, -1, hidden_size]) + + if batch_first: + rnn_out = layers.transpose(rnn_out, [1, 0, 2]) + return rnn_out, last_hidden + else: + + rnn_out = fw_rnn_out + last_hidden = fw_last_hidden + + if batch_first: + rnn_out = layers.transpose(rnn_out, [1, 0, 2]) + + return rnn_out, last_hidden + + +def basic_lstm(input, + init_hidden, + init_cell, + hidden_size, + num_layers=1, + sequence_length=None, + dropout_prob=0.0, + bidirectional=False, + batch_first=True, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype='float32', + name='basic_lstm'): + """ + LSTM implementation using basic operators, supports multiple layers and bidirection LSTM. + + .. math:: + i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i) + + f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias ) + + o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o) + + \\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c) + + c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t} + + h_t &= o_t \odot tanh(c_t) + + Args: + input (Variable): lstm input tensor, + if batch_first = False, shape should be ( seq_len x batch_size x input_size ) + if batch_first = True, shape should be ( batch_size x seq_len x hidden_size ) + init_hidden(Variable|None): The initial hidden state of the LSTM + This is a tensor with shape ( num_layers x batch_size x hidden_size) + if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size) + and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use. + If it's None, it will be set to all 0. + init_cell(Variable|None): The initial hidden state of the LSTM + This is a tensor with shape ( num_layers x batch_size x hidden_size) + if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size) + and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use. + If it's None, it will be set to all 0. + hidden_size (int): Hidden size of the LSTM + num_layers (int): The total number of layers of the LSTM + sequence_length (Variabe|None): A tensor (shape [batch_size]) stores each real length of each instance, + This tensor will be convert to a mask to mask the padding ids + If it's None means NO padding ids + dropout_prob(float|0.0): Dropout prob, dropout ONLY work after rnn output of earch layers, + NOT between time steps + bidirectional (bool|False): If it is bidirectional + batch_first (bool|True): The shape format of the input and output tensors. If true, + the shape format should be :attr:`[batch_size, seq_len, hidden_size]`. If false, + the shape format should be :attr:`[seq_len, batch_size, hidden_size]`. By default + this function accepts input and emits output in batch-major form to be consistent + with most of data format, though a bit less efficient because of extra transposes. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight matrix. Note: + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|None): The parameter attribute for the bias + of LSTM unit. + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + gate_activation (function|None): The activation function for gates (actGate). + Default: 'fluid.layers.sigmoid' + activation (function|None): The activation function for cell (actNode). + Default: 'fluid.layers.tanh' + forget_bias (float|1.0) : Forget bias used to compute the forget gate + dtype(string): Data type used in this unit + name(string): Name used to identify parameters and biases + + Returns: + rnn_out(Tensor), last_hidden(Tensor), last_cell(Tensor) + - rnn_out is the result of LSTM hidden, shape is (seq_len x batch_size x hidden_size) \ + if is_bidirec set to True, it's shape will be ( seq_len x batch_sze x hidden_size*2) + - last_hidden is the hidden state of the last step of LSTM \ + with shape ( num_layers x batch_size x hidden_size ) \ + if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size), + and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use. + - last_cell is the hidden state of the last step of LSTM \ + with shape ( num_layers x batch_size x hidden_size ) \ + if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size), + and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use. + + Examples: + .. code-block:: python + + import paddle.fluid.layers as layers + from paddle.fluid.contrib.layers import basic_lstm + + batch_size = 20 + input_size = 128 + hidden_size = 256 + num_layers = 2 + dropout = 0.5 + bidirectional = True + batch_first = False + + input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32') + pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32') + pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32') + sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32') + + rnn_out, last_hidden, last_cell = basic_lstm( input, pre_hidden, pre_cell, \ + hidden_size, num_layers = num_layers, \ + sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \ + batch_first = batch_first) + + """ + fw_unit_list = [] + + for i in range(num_layers): + new_name = name + "_layers_" + str(i) + fw_unit_list.append( + BasicLSTMUnit( + new_name, + hidden_size, + param_attr=param_attr, + bias_attr=bias_attr, + gate_activation=gate_activation, + activation=activation, + forget_bias=forget_bias, + dtype=dtype)) + if bidirectional: + bw_unit_list = [] + + for i in range(num_layers): + new_name = name + "_reverse_layers_" + str(i) + bw_unit_list.append( + BasicLSTMUnit( + new_name, + hidden_size, + param_attr=param_attr, + bias_attr=bias_attr, + gate_activation=gate_activation, + activation=activation, + forget_bias=forget_bias, + dtype=dtype)) + + if batch_first: + input = layers.transpose(input, [1, 0, 2]) + + mask = None + if sequence_length: + max_seq_len = layers.shape(input)[0] + mask = layers.sequence_mask( + sequence_length, maxlen=max_seq_len, dtype='float32') + + mask = layers.transpose(mask, [1, 0]) + + direc_num = 1 + if bidirectional: + direc_num = 2 + # convert to [num_layers, 2, batch_size, hidden_size] + if init_hidden: + init_hidden = layers.reshape( + init_hidden, shape=[num_layers, direc_num, -1, hidden_size]) + init_cell = layers.reshape( + init_cell, shape=[num_layers, direc_num, -1, hidden_size]) + + # forward direction + def get_single_direction_output(rnn_input, + unit_list, + mask=None, + direc_index=0): + rnn = StaticRNN() + with rnn.step(): + step_input = rnn.step_input(rnn_input) + + if mask: + step_mask = rnn.step_input(mask) + + for i in range(num_layers): + if init_hidden: + pre_hidden = rnn.memory(init=init_hidden[i, direc_index]) + pre_cell = rnn.memory(init=init_cell[i, direc_index]) + else: + pre_hidden = rnn.memory( + batch_ref=rnn_input, shape=[-1, hidden_size]) + pre_cell = rnn.memory( + batch_ref=rnn_input, shape=[-1, hidden_size]) + + new_hidden, new_cell = unit_list[i](step_input, pre_hidden, + pre_cell) + + if mask: + new_hidden = layers.elementwise_mul( + new_hidden, step_mask, axis=0) - layers.elementwise_mul( + pre_hidden, (step_mask - 1), axis=0) + new_cell = layers.elementwise_mul( + new_cell, step_mask, axis=0) - layers.elementwise_mul( + pre_cell, (step_mask - 1), axis=0) + + rnn.update_memory(pre_hidden, new_hidden) + rnn.update_memory(pre_cell, new_cell) + + rnn.step_output(new_hidden) + rnn.step_output(new_cell) + + step_input = new_hidden + if dropout_prob != None and dropout_prob > 0.0: + step_input = layers.dropout( + step_input, + dropout_prob=dropout_prob, + dropout_implementation='upscale_in_train') + + rnn.step_output(step_input) + + rnn_out = rnn() + + last_hidden_array = [] + last_cell_array = [] + rnn_output = rnn_out[-1] + for i in range(num_layers): + last_hidden = rnn_out[i * 2] + last_hidden = last_hidden[-1] + last_hidden_array.append(last_hidden) + last_cell = rnn_out[i * 2 + 1] + last_cell = last_cell[-1] + last_cell_array.append(last_cell) + + last_hidden_output = layers.concat(last_hidden_array, axis=0) + last_hidden_output = layers.reshape( + last_hidden_output, shape=[num_layers, -1, hidden_size]) + last_cell_output = layers.concat(last_cell_array, axis=0) + last_cell_output = layers.reshape( + last_cell_output, shape=[num_layers, -1, hidden_size]) + + return rnn_output, last_hidden_output, last_cell_output + # seq_len, batch_size, hidden_size + + fw_rnn_out, fw_last_hidden, fw_last_cell = get_single_direction_output( + input, fw_unit_list, mask, direc_index=0) + + if bidirectional: + bw_input = layers.reverse(input, axis=[0]) + bw_mask = None + if mask: + bw_mask = layers.reverse(mask, axis=[0]) + bw_rnn_out, bw_last_hidden, bw_last_cell = get_single_direction_output( + bw_input, bw_unit_list, bw_mask, direc_index=1) + + bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0]) + + rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2) + last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1) + last_hidden = layers.reshape( + last_hidden, shape=[num_layers * direc_num, -1, hidden_size]) + + last_cell = layers.concat([fw_last_cell, bw_last_cell], axis=1) + last_cell = layers.reshape( + last_cell, shape=[num_layers * direc_num, -1, hidden_size]) + + if batch_first: + rnn_out = layers.transpose(rnn_out, [1, 0, 2]) + return rnn_out, last_hidden, last_cell + else: + + rnn_out = fw_rnn_out + last_hidden = fw_last_hidden + last_cell = fw_last_cell + + if batch_first: + rnn_out = layers.transpose(rnn_out, [1, 0, 2]) + + return rnn_out, last_hidden, last_cell + + +class BasicLSTMUnit(Layer): + """ + **** + BasicLSTMUnit class, Using basic operator to build LSTM + The algorithm can be described as the code below. + + .. math:: + + i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i) + + f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias ) + + o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o) + + \\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c) + + c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t} + + h_t &= o_t \odot tanh(c_t) + + - $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix + of weights from the input gate to the input) + - The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector). + - sigmoid is the logistic sigmoid function. + - $i, f, o$ and $c$ are the input gate, forget gate, output gate, + and cell activation vectors, respectively, all of which have the same size as + the cell output activation vector $h$. + - The :math:`\odot` is the element-wise product of the vectors. + - :math:`tanh` is the activation functions. + - :math:`\\tilde{c_t}` is also called candidate hidden state, + which is computed based on the current input and the previous hidden state. + + Args: + name_scope(string) : The name scope used to identify parameter and bias name + hidden_size (integer): The hidden size used in the Unit. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight matrix. Note: + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|None): The parameter attribute for the bias + of LSTM unit. + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized as zero. Default: None. + gate_activation (function|None): The activation function for gates (actGate). + Default: 'fluid.layers.sigmoid' + activation (function|None): The activation function for cells (actNode). + Default: 'fluid.layers.tanh' + forget_bias(float|1.0): forget bias used when computing forget gate + dtype(string): data type used in this unit + + Examples: + + .. code-block:: python + + import paddle.fluid.layers as layers + from paddle.fluid.contrib.layers import BasicLSTMUnit + + input_size = 128 + hidden_size = 256 + input = layers.data( name = "input", shape = [-1, input_size], dtype='float32') + pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32') + pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32') + + lstm_unit = BasicLSTMUnit( "gru_unit", hidden_size) + + new_hidden, new_cell = lstm_unit( input, pre_hidden, pre_cell ) + + """ + + def __init__(self, + name_scope, + hidden_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype='float32'): + super(BasicLSTMUnit, self).__init__(name_scope, dtype) + # reserve old school _full_name and _helper for static graph save load + self._full_name = unique_name.generate(name_scope + "/" + + self.__class__.__name__) + self._helper = LayerObjectHelper(self._full_name) + + self._name = name_scope + self._hiden_size = hidden_size + self._param_attr = param_attr + self._bias_attr = bias_attr + self._gate_activation = gate_activation or layers.sigmoid + self._activation = activation or layers.tanh + self._forget_bias = layers.fill_constant( + [1], dtype=dtype, value=forget_bias) + self._forget_bias.stop_gradient = False + self._dtype = dtype + + def _build_once(self, input, pre_hidden, pre_cell): + self._input_size = input.shape[-1] + assert (self._input_size > 0) + + self._weight = self.create_parameter( + attr=self._param_attr, + shape=[self._input_size + self._hiden_size, 4 * self._hiden_size], + dtype=self._dtype) + + self._bias = self.create_parameter( + attr=self._bias_attr, + shape=[4 * self._hiden_size], + dtype=self._dtype, + is_bias=True) + + def forward(self, input, pre_hidden, pre_cell): + concat_input_hidden = layers.concat([input, pre_hidden], 1) + gate_input = layers.matmul(x=concat_input_hidden, y=self._weight) + + gate_input = layers.elementwise_add(gate_input, self._bias) + i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) + new_cell = layers.elementwise_add( + layers.elementwise_mul( + pre_cell, + layers.sigmoid(layers.elementwise_add(f, self._forget_bias))), + layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j))) + new_hidden = layers.tanh(new_cell) * layers.sigmoid(o) + + return new_hidden, new_cell diff --git a/dygraph/similarity_net/nets/gru.py b/dygraph/similarity_net/nets/gru.py new file mode 100644 index 00000000..eb7e1bd1 --- /dev/null +++ b/dygraph/similarity_net/nets/gru.py @@ -0,0 +1,86 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +gru class +""" + +import paddle_layers as layers +from paddle.fluid.dygraph.base import to_variable +from paddle.fluid.dygraph.nn import Linear +from paddle.fluid.dygraph import Layer +from paddle import fluid +import numpy as np + +class GRU(Layer): + """ + GRU + """ + + def __init__(self, conf_dict): + """ + initialize + """ + super(GRU, self).__init__() + self.dict_size = conf_dict["dict_size"] + self.task_mode = conf_dict["task_mode"] + self.emb_dim = conf_dict["net"]["emb_dim"] + self.gru_dim = conf_dict["net"]["gru_dim"] + self.hidden_dim = conf_dict["net"]["hidden_dim"] + self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops() + + self.gru_layer = layers.DynamicGRULayer(self.gru_dim, "gru").ops() + self.fc_layer = layers.FCLayer(self.hidden_dim, None, "fc").ops() + self.proj_layer = Linear(input_dim = self.hidden_dim, output_dim=self.gru_dim*3) + self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops() + self.seq_len=5 + + def forward(self, left, right): + """ + Forward network + """ + # embedding layer + left_emb = self.emb_layer(left) + right_emb = self.emb_layer(right) + # Presentation context + left_emb = self.proj_layer(left_emb) + right_emb = self.proj_layer(right_emb) + + h_0 = np.zeros((left_emb.shape[0], self.hidden_dim), dtype="float32") + h_0 = to_variable(h_0) + left_gru = self.gru_layer(left_emb, h_0=h_0) + right_gru = self.gru_layer(right_emb, h_0=h_0) + left_emb = fluid.layers.reduce_max(left_gru, dim=1) + right_emb = fluid.layers.reduce_max(right_gru, dim=1) + left_emb = fluid.layers.reshape( + left_emb, shape=[-1, self.seq_len, self.hidden_dim]) + right_emb = fluid.layers.reshape( + right_emb, shape=[-1, self.seq_len, self.hidden_dim]) + left_emb = fluid.layers.reduce_sum(left_emb, dim=1) + right_emb = fluid.layers.reduce_sum(right_emb, dim=1) + + left_last = fluid.layers.tanh(left_emb) + right_last = fluid.layers.tanh(right_emb) + + if self.task_mode == "pairwise": + left_fc = self.fc_layer(left_last) + right_fc = self.fc_layer(right_last) + cos_sim_layer = layers.CosSimLayer() + pred = cos_sim_layer.ops(left_fc, right_fc) + return left_fc, pred + else: + concat_layer = layers.ConcatLayer(1) + concat = concat_layer.ops([left_last, right_last]) + concat_fc = self.fc_layer(concat) + pred = self.softmax_layer(concat_fc) + return left_last, pred diff --git a/dygraph/similarity_net/nets/losses/__init__.py b/dygraph/similarity_net/nets/losses/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dygraph/similarity_net/nets/losses/hinge_loss.py b/dygraph/similarity_net/nets/losses/hinge_loss.py new file mode 100644 index 00000000..ee3c9ef4 --- /dev/null +++ b/dygraph/similarity_net/nets/losses/hinge_loss.py @@ -0,0 +1,50 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +hinge loss +""" + +import sys + +sys.path.append("../") +import nets.paddle_layers as layers + + +class HingeLoss(object): + """ + Hing Loss Calculate class + """ + + def __init__(self, conf_dict): + """ + initialize + """ + self.margin = conf_dict["loss"]["margin"] + + def compute(self, pos, neg): + """ + compute loss + """ + elementwise_max = layers.ElementwiseMaxLayer() + elementwise_add = layers.ElementwiseAddLayer() + elementwise_sub = layers.ElementwiseSubLayer() + constant = layers.ConstantLayer() + reduce_mean = layers.ReduceMeanLayer() + loss = reduce_mean.ops( + elementwise_max.ops( + constant.ops(neg, neg.shape, "float32", 0.0), + elementwise_add.ops( + elementwise_sub.ops(neg, pos), + constant.ops(neg, neg.shape, "float32", self.margin)))) + return loss diff --git a/dygraph/similarity_net/nets/losses/log_loss.py b/dygraph/similarity_net/nets/losses/log_loss.py new file mode 100644 index 00000000..38c483d9 --- /dev/null +++ b/dygraph/similarity_net/nets/losses/log_loss.py @@ -0,0 +1,41 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +log loss +""" + +import sys + +sys.path.append("../") +import nets.paddle_layers as layers + +class LogLoss(object): + """ + Log Loss Calculate + """ + + def __init__(self, conf_dict): + """ + initialize + """ + pass + + def compute(self, pos, neg): + """ + compute loss + """ + sigmoid = layers.SigmoidLayer() + reduce_mean = layers.ReduceMeanLayer() + loss = reduce_mean.ops(sigmoid.ops(neg - pos)) + return loss diff --git a/dygraph/similarity_net/nets/losses/softmax_cross_entropy_loss.py b/dygraph/similarity_net/nets/losses/softmax_cross_entropy_loss.py new file mode 100644 index 00000000..43dc8ca2 --- /dev/null +++ b/dygraph/similarity_net/nets/losses/softmax_cross_entropy_loss.py @@ -0,0 +1,42 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +softmax loss +""" + +import sys +import paddle.fluid as fluid + +sys.path.append("../") +import nets.paddle_layers as layers + +class SoftmaxCrossEntropyLoss(object): + """ + Softmax with Cross Entropy Loss Calculate + """ + + def __init__(self, conf_dict): + """ + initialize + """ + pass + + def compute(self, input, label): + """ + compute loss + """ + reduce_mean = layers.ReduceMeanLayer() + cost = fluid.layers.cross_entropy(input=input, label=label) + avg_cost = reduce_mean.ops(cost) + return avg_cost diff --git a/dygraph/similarity_net/nets/lstm.py b/dygraph/similarity_net/nets/lstm.py new file mode 100644 index 00000000..c099625d --- /dev/null +++ b/dygraph/similarity_net/nets/lstm.py @@ -0,0 +1,82 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +lstm class +""" +import paddle_layers as layers +from paddle.fluid.dygraph import Layer, Linear +from paddle import fluid + +class LSTM(Layer): + """ + LSTM + """ + + def __init__(self, conf_dict): + """ + initialize + """ + super(LSTM,self).__init__() + self.dict_size = conf_dict["dict_size"] + self.task_mode = conf_dict["task_mode"] + self.emb_dim = conf_dict["net"]["emb_dim"] + self.lstm_dim = conf_dict["net"]["lstm_dim"] + self.hidden_dim = conf_dict["net"]["hidden_dim"] + self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops() + self.lstm_layer = layers.DynamicLSTMLayer(self.lstm_dim, "lstm").ops() + self.fc_layer = layers.FCLayer(self.hidden_dim, None, "fc").ops() + self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops() + self.proj_layer = Linear(input_dim = self.hidden_dim, output_dim=self.lstm_dim*4) + self.seq_len = 5 + + + def forward(self, left, right): + """ + Forward network + """ + # embedding layer + left_emb = self.emb_layer(left) + right_emb = self.emb_layer(right) + # Presentation context + left_proj = self.proj_layer(left_emb) + right_proj = self.proj_layer(right_emb) + left_lstm, _ = self.lstm_layer(left_proj) + right_lstm, _ = self.lstm_layer(right_proj) + + left_emb = fluid.layers.reduce_max(left_lstm, dim=1) + right_emb = fluid.layers.reduce_max(right_lstm, dim=1) + left_emb = fluid.layers.reshape( + left_emb, shape=[-1, self.seq_len, self.hidden_dim]) + right_emb = fluid.layers.reshape( + right_emb, shape=[-1, self.seq_len, self.hidden_dim]) + left_emb = fluid.layers.reduce_sum(left_emb, dim=1) + right_emb = fluid.layers.reduce_sum(right_emb, dim=1) + + left_last = fluid.layers.tanh(left_emb) + right_last = fluid.layers.tanh(right_emb) + + + # matching layer + if self.task_mode == "pairwise": + left_fc = self.fc_layer(left_last) + right_fc = self.fc_layer(right_last) + cos_sim_layer = layers.CosSimLayer() + pred = cos_sim_layer.ops(left_fc, right_fc) + return left_fc, pred + else: + concat_layer = layers.ConcatLayer(1) + concat = concat_layer.ops([left_last, right_last]) + concat_fc = self.fc_layer(concat) + pred = self.softmax_layer(concat_fc) + return left_last, pred diff --git a/dygraph/similarity_net/nets/mm_dnn.py b/dygraph/similarity_net/nets/mm_dnn.py new file mode 100644 index 00000000..ce1679a2 --- /dev/null +++ b/dygraph/similarity_net/nets/mm_dnn.py @@ -0,0 +1,169 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +MMDNN class +""" +import numpy as np +import paddle.fluid as fluid +import logging +from paddle.fluid.dygraph import Linear, to_variable, Layer, Pool2D, Conv2D +import paddle_layers as pd_layers +from paddle.fluid import layers + + +class MMDNN(Layer): + """ + MMDNN + """ + + def __init__(self, config): + """ + initialize + """ + super(MMDNN, self).__init__() + + self.vocab_size = int(config['dict_size']) + self.emb_size = int(config['net']['embedding_dim']) + self.lstm_dim = int(config['net']['lstm_dim']) + self.kernel_size = int(config['net']['num_filters']) + self.win_size1 = int(config['net']['window_size_left']) + self.win_size2 = int(config['net']['window_size_right']) + self.dpool_size1 = int(config['net']['dpool_size_left']) + self.dpool_size2 = int(config['net']['dpool_size_right']) + self.hidden_size = int(config['net']['hidden_size']) + + self.seq_len1 = 5 + #int(config['max_len_left']) + self.seq_len2 = 5 #int(config['max_len_right']) + self.task_mode = config['task_mode'] + self.zero_pad = True + self.scale = False + + if int(config['match_mask']) != 0: + self.match_mask = True + else: + self.match_mask = False + + if self.task_mode == "pointwise": + self.n_class = int(config['n_class']) + self.out_size = self.n_class + elif self.task_mode == "pairwise": + self.out_size = 1 + else: + logging.error("training mode not supported") + + # layers + self.emb_layer = pd_layers.EmbeddingLayer(self.vocab_size, self.emb_size, + name="word_embedding",padding_idx=(0 if self.zero_pad else None)).ops() + self.fw_in_proj = Linear( + input_dim=self.emb_size, + output_dim=4 * self.lstm_dim, + param_attr=fluid.ParamAttr(name="fw_fc.w"), + bias_attr=False) + self.lstm_layer = pd_layers.DynamicLSTMLayer(self.lstm_dim, "lstm").ops() + self.rv_in_proj = Linear( + input_dim=self.emb_size, + output_dim=4 * self.lstm_dim, + param_attr=fluid.ParamAttr(name="rv_fc.w"), + bias_attr=False) + self.reverse_layer = pd_layers.DynamicLSTMLayer( + self.lstm_dim, + is_reverse=True).ops() + + self.conv = Conv2D( + num_channels=1, + num_filters=self.kernel_size, + stride=1, + padding=(int(self.seq_len1 / 2), int(self.seq_len2 // 2)), + filter_size=(self.seq_len1, self.seq_len2), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(0.1))) + + self.pool_layer = Pool2D( + pool_size=[ + int(self.seq_len1 / self.dpool_size1), + int(self.seq_len2 / self.dpool_size2) + ], + pool_stride=[ + int(self.seq_len1 / self.dpool_size1), + int(self.seq_len2 / self.dpool_size2) + ], + pool_type="max" ) + self.fc_layer = pd_layers.FCLayer(self.hidden_size, "tanh", "fc").ops() + self.fc1_layer = pd_layers.FCLayer(self.out_size, "softmax", "fc1").ops() + + + + def forward(self, left, right): + """ + Forward network + """ + left_emb = self.emb_layer(left) + right_emb = self.emb_layer(right) + if self.scale: + left_emb = left_emb * (self.emb_size**0.5) + right_emb = right_emb * (self.emb_size**0.5) + + # bi_listm + left_proj = self.fw_in_proj(left_emb) + right_proj = self.fw_in_proj(right_emb) + + left_lstm, _ = self.lstm_layer(left_proj) + right_lstm, _ = self.lstm_layer(right_proj) + left_rv_proj = self.rv_in_proj(left_lstm) + right_rv_proj = self.rv_in_proj(right_lstm) + left_reverse,_ = self.reverse_layer(left_rv_proj) + right_reverse,_ = self.reverse_layer(right_rv_proj) + + left_seq_encoder = fluid.layers.concat([left_lstm, left_reverse], axis=1) + right_seq_encoder = fluid.layers.concat([right_lstm, right_reverse], axis=1) + + pad_value = fluid.layers.assign(input=np.array([0]).astype("float32")) + + left_seq_encoder = fluid.layers.reshape(left_seq_encoder, shape=[left_seq_encoder.shape[0]/5,5,-1]) + right_seq_encoder = fluid.layers.reshape(right_seq_encoder, shape=[right_seq_encoder.shape[0]/5,5,-1]) + cross = fluid.layers.matmul( + left_seq_encoder, right_seq_encoder, transpose_y=True) + + left_lens=to_variable(np.array([5])) + right_lens=to_variable(np.array([5])) + + if self.match_mask: + mask1 = fluid.layers.sequence_mask( + x=left_lens, dtype='float32', maxlen=self.seq_len1 + 1) + mask2 = fluid.layers.sequence_mask( + x=right_lens, dtype='float32', maxlen=self.seq_len2 + 1) + + mask1 = fluid.layers.transpose(x=mask1, perm=[1, 0]) + mask = fluid.layers.matmul(x=mask1, y=mask2) + else: + mask = None + + # conv_pool_relu + emb_expand = fluid.layers.unsqueeze(input=cross, axes=[1]) + + conv = self.conv(emb_expand) + if mask is not None: + cross_mask = fluid.layers.stack(x=[mask] * self.kernel_size, axis=0) + cross_mask = fluid.layers.stack(x=[cross] * conv.shape[1], axis=1) + conv = cross_mask * conv + (1 - cross_mask) * (-2**5 + 1) + pool = self.pool_layer(conv) + conv_pool_relu = fluid.layers.relu(pool) + + relu_hid1 = self.fc_layer(conv_pool_relu) + relu_hid1 = fluid.layers.tanh(relu_hid1) + + pred = self.fc1_layer(relu_hid1) + pred = fluid.layers.softmax(pred) + return left_seq_encoder, pred diff --git a/dygraph/similarity_net/nets/paddle_layers.1.py b/dygraph/similarity_net/nets/paddle_layers.1.py new file mode 100644 index 00000000..f01f64f5 --- /dev/null +++ b/dygraph/similarity_net/nets/paddle_layers.1.py @@ -0,0 +1,457 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +network layers +""" + +import paddle.fluid as fluid +import paddle.fluid.param_attr as attr + + + +class EmbeddingLayer(object): + """ + Embedding Layer class + """ + + def __init__(self, dict_size, emb_dim, name="emb"): + """ + initialize + """ + self.dict_size = dict_size + self.emb_dim = emb_dim + self.name = name + + def ops(self, input): + """ + operation + """ + emb = fluid.dygraph.Embedding( + input=input, + size=[self.dict_size, self.emb_dim], + is_sparse=True, + param_attr=attr.ParamAttr(name=self.name)) + return emb + + +class SequencePoolLayer(object): + """ + Sequence Pool Layer class + """ + + def __init__(self, pool_type): + """ + initialize + """ + self.pool_type = pool_type + + def ops(self, input): + """ + operation + """ + pool = fluid.dygraph.Pool2D(input=input, pool_type=self.pool_type) + return pool + + +class FCLayer(object): + """ + Fully Connect Layer class + """ + + def __init__(self, fc_dim, act, name="fc"): + """ + initialize + """ + self.fc_dim = fc_dim + self.act = act + self.name = name + + def ops(self, input): + """ + operation + """ + fc = fluid.dygraph.FC(input=input, + size=self.fc_dim, + param_attr=attr.ParamAttr(name="%s.w" % self.name), + bias_attr=attr.ParamAttr(name="%s.b" % self.name), + act=self.act, + name=self.name) + return fc + + +class DynamicGRULayer(object): + """ + Dynamic GRU Layer class + """ + + def __init__(self, gru_dim, name="dyn_gru"): + """ + initialize + """ + self.gru_dim = gru_dim + self.name = name + + def ops(self, input): + """ + operation + """ + proj = fluid.dygraph.FC( + input=input, + size=self.gru_dim * 3, + param_attr=attr.ParamAttr(name="%s_fc.w" % self.name), + bias_attr=attr.ParamAttr(name="%s_fc.b" % self.name)) + gru = fluid.layers.dynamic_gru( + input=proj, + size=self.gru_dim, + param_attr=attr.ParamAttr(name="%s.w" % self.name), + bias_attr=attr.ParamAttr(name="%s.b" % self.name)) + return gru + + +class DynamicLSTMLayer(object): + """ + Dynamic LSTM Layer class + """ + + def __init__(self, lstm_dim, name="dyn_lstm"): + """ + initialize + """ + self.lstm_dim = lstm_dim + self.name = name + + def ops(self, input): + """ + operation + """ + proj = fluid.dygraph.FC( + input=input, + size=self.lstm_dim * 4, + param_attr=attr.ParamAttr(name="%s_fc.w" % self.name), + bias_attr=attr.ParamAttr(name="%s_fc.b" % self.name)) + lstm, _ = fluid.layers.dynamic_lstm( + input=proj, + size=self.lstm_dim * 4, + param_attr=attr.ParamAttr(name="%s.w" % self.name), + bias_attr=attr.ParamAttr(name="%s.b" % self.name)) + return lstm + + +class SequenceLastStepLayer(object): + """ + Get Last Step Sequence Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input): + """ + operation + """ + last = fluid.layers.sequence_last_step(input) + return last + + +class SequenceConvPoolLayer(object): + """ + Sequence convolution and pooling Layer class + """ + + def __init__(self, filter_size, num_filters, name): + """ + initialize + Args: + filter_size:Convolution kernel size + num_filters:Convolution kernel number + """ + self.filter_size = filter_size + self.num_filters = num_filters + self.name = name + + def ops(self, input): + """ + operation + """ + conv = fluid.nets.sequence_conv_pool( + input=input, + filter_size=self.filter_size, + num_filters=self.num_filters, + param_attr=attr.ParamAttr(name=self.name), + act="relu") + return conv + + +class DataLayer(object): + """ + Data Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, name, shape, dtype, lod_level=0): + """ + operation + """ + data = fluid.layers.data( #不用改 + name=name, shape=shape, dtype=dtype, lod_level=lod_level) + return data + + +class ConcatLayer(object): + """ + Connection Layer class + """ + + def __init__(self, axis): + """ + initialize + """ + self.axis = axis + + def ops(self, inputs): + """ + operation + """ + concat = fluid.layers.concat(inputs, axis=self.axis) + return concat + + +class ReduceMeanLayer(object): + """ + Reduce Mean Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input): + """ + operation + """ + mean = fluid.layers.reduce_mean(input) + return mean + + +class CrossEntropyLayer(object): + """ + Cross Entropy Calculate Layer + """ + + def __init__(self, name="cross_entropy"): + """ + initialize + """ + pass + + def ops(self, input, label): + """ + operation + """ + loss = fluid.layers.cross_entropy(input=input, label=label) # 不用改 + return loss + + +class SoftmaxWithCrossEntropyLayer(object): + """ + Softmax with Cross Entropy Calculate Layer + """ + + def __init__(self, name="softmax_with_cross_entropy"): + """ + initialize + """ + pass + + def ops(self, input, label): + """ + operation + """ + loss = fluid.layers.softmax_with_cross_entropy( # 不用改 + logits=input, label=label) + return loss + + +class CosSimLayer(object): + """ + Cos Similarly Calculate Layer + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + sim = fluid.layers.cos_sim(x, y) + return sim + + +class ElementwiseMaxLayer(object): + """ + Elementwise Max Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + max = fluid.layers.elementwise_max(x, y) + return max + + +class ElementwiseAddLayer(object): + """ + Elementwise Add Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + add = fluid.layers.elementwise_add(x, y) + return add + + +class ElementwiseSubLayer(object): + """ + Elementwise Add Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + sub = fluid.layers.elementwise_sub(x, y) + return sub + + +class ConstantLayer(object): + """ + Generate A Constant Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input, shape, dtype, value): + """ + operation + """ + constant = fluid.layers.fill_constant_batch_size_like(input, shape, + dtype, value) + return constant + + +class SigmoidLayer(object): + """ + Sigmoid Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input): + """ + operation + """ + sigmoid = fluid.layers.sigmoid(input) + return sigmoid + + +class SoftsignLayer(object): + """ + Softsign Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input): + """ + operation + """ + softsign = fluid.layers.softsign(input) + return softsign + + +# class MatmulLayer(object): +# def __init__(self, transpose_x, transpose_y): +# self.transpose_x = transpose_x +# self.transpose_y = transpose_y + +# def ops(self, x, y): +# matmul = fluid.layers.matmul(x, y, self.transpose_x, self.transpose_y) +# return matmul + +# class Conv2dLayer(object): +# def __init__(self, num_filters, filter_size, act, name): +# self.num_filters = num_filters +# self.filter_size = filter_size +# self.act = act +# self.name = name + +# def ops(self, input): +# conv = fluid.layers.conv2d(input, self.num_filters, self.filter_size, param_attr=attr.ParamAttr(name="%s.w" % self.name), bias_attr=attr.ParamAttr(name="%s.b" % self.name), act=self.act) +# return conv + +# class Pool2dLayer(object): +# def __init__(self, pool_size, pool_type): +# self.pool_size = pool_size +# self.pool_type = pool_type + +# def ops(self, input): +# pool = fluid.layers.pool2d(input, self.pool_size, self.pool_type) +# return pool diff --git a/dygraph/similarity_net/nets/paddle_layers.py b/dygraph/similarity_net/nets/paddle_layers.py new file mode 100644 index 00000000..cf9f2e39 --- /dev/null +++ b/dygraph/similarity_net/nets/paddle_layers.py @@ -0,0 +1,1083 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +network layers +""" +import collections +import contextlib +import inspect +import six +import sys +from functools import partial + +import numpy as np +import paddle +import paddle.fluid as fluid +from paddle.fluid import layers +import paddle.fluid.param_attr as attr +import paddle.fluid.layers.utils as utils +from paddle.fluid.dygraph import Embedding, Pool2D, Linear, Conv2D, GRUUnit, Layer, to_variable +from paddle.fluid.layers.utils import map_structure, flatten, pack_sequence_as + +class EmbeddingLayer(object): + """ + Embedding Layer class + """ + + def __init__(self, dict_size, emb_dim, name="emb", padding_idx=None): + """ + initialize + """ + self.dict_size = dict_size + self.emb_dim = emb_dim + self.name = name + self.padding_idx = padding_idx + + def ops(self): + """ + operation + """ + # name = self.name + emb = Embedding( + size=[self.dict_size, self.emb_dim], + is_sparse=True, + padding_idx=self.padding_idx, + param_attr=attr.ParamAttr(name=self.name, initializer=fluid.initializer.Xavier())) + + return emb + + + +class FCLayer(object): + """ + Fully Connect Layer class + """ + + def __init__(self, fc_dim, act, name="fc"): + """ + initialize + """ + self.fc_dim = fc_dim + self.act = act + self.name = name + + def ops(self): + """ + operation + """ + fc = FC(size=self.fc_dim, + param_attr=attr.ParamAttr(name="%s.w" % self.name), + bias_attr=attr.ParamAttr(name="%s.b" % self.name), + act=self.act) + return fc + + +class DynamicGRULayer(object): + """ + Dynamic GRU Layer class + """ + + def __init__(self, gru_dim, name="dyn_gru"): + """ + initialize + """ + self.gru_dim = gru_dim + self.name = name + + def ops(self): + """ + operation + """ + + gru = DynamicGRU( + size=self.gru_dim, + param_attr=attr.ParamAttr(name="%s.w" % self.name), + bias_attr=attr.ParamAttr(name="%s.b" % self.name)) + return gru + + +class DynamicLSTMLayer(object): + """ + Dynamic LSTM Layer class + """ + + def __init__(self, lstm_dim, name="dyn_lstm", is_reverse=False): + """ + initialize + """ + self.lstm_dim = lstm_dim + self.name = name + self.is_reverse = is_reverse + def ops(self): + """ + operation + """ + lstm_cell = BasicLSTMUnit(hidden_size=self.lstm_dim, input_size=self.lstm_dim*4) + lstm = RNN(cell=lstm_cell, time_major=True, is_reverse=self.is_reverse) + return lstm + + +class DataLayer(object): + """ + Data Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, name, shape, dtype, lod_level=0): + """ + operation + """ + data = fluid.layers.data( + name=name, shape=shape, dtype=dtype, lod_level=lod_level) + return data + + +class ConcatLayer(object): + """ + Connection Layer class + """ + + def __init__(self, axis): + """ + initialize + """ + self.axis = axis + + def ops(self, inputs): + """ + operation + """ + concat = fluid.layers.concat(inputs, axis=self.axis) + return concat + + +class ReduceMeanLayer(object): + """ + Reduce Mean Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input): + """ + operation + """ + mean = fluid.layers.reduce_mean(input) + return mean + + +class CrossEntropyLayer(object): + """ + Cross Entropy Calculate Layer + """ + + def __init__(self, name="cross_entropy"): + """ + initialize + """ + pass + + def ops(self, input, label): + """ + operation + """ + loss = fluid.layers.cross_entropy(input=input, label=label) # no need + return loss + + +class SoftmaxWithCrossEntropyLayer(object): + """ + Softmax with Cross Entropy Calculate Layer + """ + + def __init__(self, name="softmax_with_cross_entropy"): + """ + initialize + """ + pass + + def ops(self, input, label): + """ + operation + """ + loss = fluid.layers.softmax_with_cross_entropy( # no need + logits=input, label=label) + return loss + + +class CosSimLayer(object): + """ + Cos Similarly Calculate Layer + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + sim = fluid.layers.cos_sim(x, y) + return sim + + +class ElementwiseMaxLayer(object): + """ + Elementwise Max Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + max = fluid.layers.elementwise_max(x, y) + return max + + +class ElementwiseAddLayer(object): + """ + Elementwise Add Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + add = fluid.layers.elementwise_add(x, y) + return add + + +class ElementwiseSubLayer(object): + """ + Elementwise Add Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, x, y): + """ + operation + """ + sub = fluid.layers.elementwise_sub(x, y) + return sub + + +class ConstantLayer(object): + """ + Generate A Constant Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input, shape, dtype, value): + """ + operation + """ + constant = fluid.layers.fill_constant_batch_size_like(input, shape, + dtype, value) + return constant + + +class SigmoidLayer(object): + """ + Sigmoid Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input): + """ + operation + """ + sigmoid = fluid.layers.sigmoid(input) + return sigmoid + + +class SoftsignLayer(object): + """ + Softsign Layer class + """ + + def __init__(self): + """ + initialize + """ + pass + + def ops(self, input): + """ + operation + """ + softsign = fluid.layers.softsign(input) + return softsign + + + +# dygraph +class SimpleConvPool(fluid.dygraph.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + use_cudnn=False + ): + super(SimpleConvPool, self).__init__() + self._conv2d = Conv2D(num_channels = num_channels, + num_filters=num_filters, + filter_size=filter_size, + padding=[1, 1], + use_cudnn=use_cudnn, + act='relu') + + def forward(self, inputs): + x = self._conv2d(inputs) + x = fluid.layers.reduce_max(x, dim=-1) + x = fluid.layers.reshape(x, shape=[x.shape[0], -1]) + return x + +class FC(Layer): + """ + This interface is used to construct a callable object of the ``FC`` class. + For more details, refer to code examples. + It creates a fully connected layer in the network. It can take + one or multiple ``Tensor`` as its inputs. It creates a Variable called weights for each input tensor, + which represents a fully connected weight matrix from each input unit to + each output unit. The fully connected layer multiplies each input tensor + with its corresponding weight to produce an output Tensor with shape [N, `size`], + where N is batch size. If multiple input tensors are given, the results of + multiple output tensors with shape [N, `size`] will be summed up. If ``bias_attr`` + is not None, a bias variable will be created and added to the output. + Finally, if ``act`` is not None, it will be applied to the output as well. + When the input is single ``Tensor`` : + .. math:: + Out = Act({XW + b}) + When the input are multiple ``Tensor`` : + .. math:: + Out = Act({\sum_{i=0}^{N-1}X_iW_i + b}) + In the above equation: + * :math:`N`: Number of the input. N equals to len(input) if input is list of ``Tensor`` . + * :math:`X_i`: The i-th input ``Tensor`` . + * :math:`W_i`: The i-th weights matrix corresponding i-th input tensor. + * :math:`b`: The bias parameter created by this layer (if needed). + * :math:`Act`: The activation function. + * :math:`Out`: The output ``Tensor`` . + See below for an example. + .. code-block:: text + Given: + data_1.data = [[[0.1, 0.2]]] + data_1.shape = (1, 1, 2) # 1 is batch_size + data_2.data = [[[0.1, 0.2, 0.3]]] + data_2.shape = (1, 1, 3) # 1 is batch_size + fc = FC("fc", 2, num_flatten_dims=2) + out = fc(input=[data_1, data_2]) + Then: + out.data = [[[0.182996 -0.474117]]] + out.shape = (1, 1, 2) + Parameters: + + size(int): The number of output units in this layer. + num_flatten_dims (int, optional): The fc layer can accept an input tensor with more than + two dimensions. If this happens, the multi-dimension tensor will first be flattened + into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input + tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1) + dimensions will be flatten to form the first dimension of the final matrix (height of + the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to + form the second dimension of the final matrix (width of the matrix). For example, suppose + `X` is a 5-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3. + Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1 + param_attr (ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable + weights(Parameter) of this layer. Default: None. + bias_attr (ParamAttr or list of ParamAttr, optional): The attribute for the bias + of this layer. If it is set to False, no bias will be added to the output units. + If it is set to None, the bias is initialized zero. Default: None. + act (str, optional): Activation to be applied to the output of this layer. Default: None. + is_test(bool, optional): A flag indicating whether execution is in test phase. Default: False. + dtype(str, optional): Dtype used for weight, it can be "float32" or "float64". Default: "float32". + Attribute: + **weight** (list of Parameter): the learnable weights of this layer. + **bias** (Parameter or None): the learnable bias of this layer. + Returns: + None + + Examples: + .. code-block:: python + from paddle.fluid.dygraph.base import to_variable + import paddle.fluid as fluid + from paddle.fluid.dygraph import FC + import numpy as np + data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') + with fluid.dygraph.guard(): + fc = FC("fc", 64, num_flatten_dims=2) + data = to_variable(data) + conv = fc(data) + """ + + def __init__(self, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + is_test=False, + dtype="float32"): + super(FC, self).__init__(dtype) + + self._size = size + self._num_flatten_dims = num_flatten_dims + self._dtype = dtype + self._param_attr = param_attr + self._bias_attr = bias_attr + self._act = act + self.__w = list() + + def _build_once(self, input): + i = 0 + for inp, param in self._helper.iter_inputs_and_params(input, + self._param_attr): + input_shape = inp.shape + + param_shape = [ + reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], + 1) + ] + [self._size] + self.__w.append( + self.add_parameter( + '_w%d' % i, + self.create_parameter( + attr=param, + shape=param_shape, + dtype=self._dtype, + is_bias=False))) + i += 1 + + size = list([self._size]) + self._b = self.create_parameter( + attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True) + + # TODO(songyouwei): We should remove _w property + @property + def _w(self, i=0): + return self.__w[i] + + @_w.setter + def _w(self, value, i=0): + assert isinstance(self.__w[i], Variable) + self.__w[i].set_value(value) + + @property + def weight(self): + if len(self.__w) > 1: + return self.__w + else: + return self.__w[0] + + @weight.setter + def weight(self, value): + if len(self.__w) == 1: + self.__w[0] = value + + @property + def bias(self): + return self._b + + @bias.setter + def bias(self, value): + self._b = value + + def forward(self, input): + mul_results = list() + i = 0 + for inp, param in self._helper.iter_inputs_and_params(input, + self._param_attr): + tmp = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type="mul", + inputs={"X": inp, + "Y": self.__w[i]}, + outputs={"Out": tmp}, + attrs={ + "x_num_col_dims": self._num_flatten_dims, + "y_num_col_dims": 1 + }) + i += 1 + mul_results.append(tmp) + + if len(mul_results) == 1: + pre_bias = mul_results[0] + else: + pre_bias = self._helper.create_variable_for_type_inference( + self._dtype) + self._helper.append_op( + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}, + attrs={"use_mkldnn": False}) + + if self._b: + pre_activation = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], + 'Y': [self._b]}, + outputs={'Out': [pre_activation]}, + attrs={'axis': self._num_flatten_dims}) + else: + pre_activation = pre_bias + # Currently, we don't support inplace in dygraph mode + return self._helper.append_activation(pre_activation, act=self._act) + +class DynamicGRU(Layer): + def __init__(self, + size, + param_attr=None, + bias_attr=None, + is_reverse=False, + gate_activation='sigmoid', + candidate_activation='tanh', + origin_mode=False, + init_size = None): + super(DynamicGRU, self).__init__() + self.gru_unit = GRUUnit( + size * 3, + param_attr=param_attr, + bias_attr=bias_attr, + activation=candidate_activation, + gate_activation=gate_activation, + origin_mode=origin_mode) + self.size = size + self.is_reverse = is_reverse + def forward(self, inputs, h_0): + hidden = h_0 + res = [] + for i in range(inputs.shape[1]): + if self.is_reverse: + i = inputs.shape[1] - 1 - i + input_ = inputs[ :, i:i+1, :] + input_ = fluid.layers.reshape(input_, [-1, input_.shape[2]], inplace=False) + hidden, reset, gate = self.gru_unit(input_, hidden) + hidden_ = fluid.layers.reshape(hidden, [-1, 1, hidden.shape[1]], inplace=False) + res.append(hidden_) + if self.is_reverse: + res = res[::-1] + res = fluid.layers.concat(res, axis=1) + return res + + +class RNNUnit(Layer): + def get_initial_states(self, + batch_ref, + shape=None, + dtype=None, + init_value=0, + batch_dim_idx=0): + """ + Generate initialized states according to provided shape, data type and + value. + + Parameters: + batch_ref: A (possibly nested structure of) tensor variable[s]. + The first dimension of the tensor will be used as batch size to + initialize states. + shape: A (possiblely nested structure of) shape[s], where a shape is + represented as a list/tuple of integer). -1(for batch size) will + beautomatically inserted if shape is not started with it. If None, + property `state_shape` will be used. The default value is None. + dtype: A (possiblely nested structure of) data type[s]. The structure + must be same as that of `shape`, except when all tensors' in states + has the same data type, a single data type can be used. If None and + property `cell.state_shape` is not available, float32 will be used + as the data type. The default value is None. + init_value: A float value used to initialize states. + + Returns: + Variable: tensor variable[s] packed in the same structure provided \ + by shape, representing the initialized states. + """ + # TODO: use inputs and batch_size + batch_ref = flatten(batch_ref)[0] + + def _is_shape_sequence(seq): + if sys.version_info < (3, ): + integer_types = ( + int, + long, ) + else: + integer_types = (int, ) + """For shape, list/tuple of integer is the finest-grained objection""" + if (isinstance(seq, list) or isinstance(seq, tuple)): + if reduce(lambda flag, x: isinstance(x, integer_types) and flag, + seq, True): + return False + # TODO: Add check for the illegal + if isinstance(seq, dict): + return True + return (isinstance(seq, collections.Sequence) and + not isinstance(seq, six.string_types)) + + class Shape(object): + def __init__(self, shape): + self.shape = shape if shape[0] == -1 else ([-1] + list(shape)) + + # nested structure of shapes + states_shapes = self.state_shape if shape is None else shape + is_sequence_ori = utils.is_sequence + utils.is_sequence = _is_shape_sequence + states_shapes = map_structure(lambda shape: Shape(shape), states_shapes) + utils.is_sequence = is_sequence_ori + + # nested structure of dtypes + try: + states_dtypes = self.state_dtype if dtype is None else dtype + except NotImplementedError: # use fp32 as default + states_dtypes = "float32" + if len(flatten(states_dtypes)) == 1: + dtype = flatten(states_dtypes)[0] + states_dtypes = map_structure(lambda shape: dtype, states_shapes) + + init_states = map_structure( + lambda shape, dtype: fluid.layers.fill_constant_batch_size_like( + input=batch_ref, + shape=shape.shape, + dtype=dtype, + value=init_value, + input_dim_idx=batch_dim_idx), states_shapes, states_dtypes) + return init_states + + @property + def state_shape(self): + """ + Abstract method (property). + Used to initialize states. + A (possiblely nested structure of) shape[s], where a shape is represented + as a list/tuple of integers (-1 for batch size would be automatically + inserted into a shape if shape is not started with it). + Not necessary to be implemented if states are not initialized by + `get_initial_states` or the `shape` argument is provided when using + `get_initial_states`. + """ + raise NotImplementedError( + "Please add implementaion for `state_shape` in the used cell.") + + @property + def state_dtype(self): + """ + Abstract method (property). + Used to initialize states. + A (possiblely nested structure of) data types[s]. The structure must be + same as that of `shape`, except when all tensors' in states has the same + data type, a signle data type can be used. + Not necessary to be implemented if states are not initialized + by `get_initial_states` or the `dtype` argument is provided when using + `get_initial_states`. + """ + raise NotImplementedError( + "Please add implementaion for `state_dtype` in the used cell.") + + +class BasicLSTMUnit(RNNUnit): + """ + **** + BasicLSTMUnit class, Using basic operator to build LSTM + The algorithm can be described as the code below. + .. math:: + i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i) + f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias ) + o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o) + \\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c) + c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t} + h_t &= o_t \odot tanh(c_t) + - $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix + of weights from the input gate to the input) + - The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector). + - sigmoid is the logistic sigmoid function. + - $i, f, o$ and $c$ are the input gate, forget gate, output gate, + and cell activation vectors, respectively, all of which have the same size as + the cell output activation vector $h$. + - The :math:`\odot` is the element-wise product of the vectors. + - :math:`tanh` is the activation functions. + - :math:`\\tilde{c_t}` is also called candidate hidden state, + which is computed based on the current input and the previous hidden state. + Args: + hidden_size (integer): The hidden size used in the Unit. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight matrix. Note: + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|None): The parameter attribute for the bias + of LSTM unit. + If it is set to None or one attribute of ParamAttr, lstm_unit will + create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized as zero. Default: None. + gate_activation (function|None): The activation function for gates (actGate). + Default: 'fluid.layers.sigmoid' + activation (function|None): The activation function for cells (actNode). + Default: 'fluid.layers.tanh' + forget_bias(float|1.0): forget bias used when computing forget gate + dtype(string): data type used in this unit + """ + + def __init__(self, + hidden_size, + input_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype='float32'): + super(BasicLSTMUnit, self).__init__(dtype) + + self._hidden_size = hidden_size + self._param_attr = param_attr + self._bias_attr = bias_attr + self._gate_activation = gate_activation or layers.sigmoid + self._activation = activation or layers.tanh + self._forget_bias = layers.fill_constant( + [1], dtype=dtype, value=forget_bias) + self._forget_bias.stop_gradient = False + self._dtype = dtype + self._input_size = input_size + + self._weight = self.create_parameter( + attr=self._param_attr, + shape=[self._input_size + self._hidden_size, 4 * self._hidden_size], + dtype=self._dtype) + + self._bias = self.create_parameter(attr=self._bias_attr, + shape=[4 * self._hidden_size], + dtype=self._dtype, + is_bias=True) + + def forward(self, input, state): + pre_hidden, pre_cell = state + concat_input_hidden = layers.concat([input, pre_hidden], axis=1) + + gate_input = layers.matmul(x=concat_input_hidden, y=self._weight) + + gate_input = layers.elementwise_add(gate_input, self._bias) + i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) + new_cell = layers.elementwise_add( + layers.elementwise_mul( + pre_cell, + layers.sigmoid(layers.elementwise_add(f, self._forget_bias))), + layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j))) + new_hidden = layers.tanh(new_cell) * layers.sigmoid(o) + + return new_hidden, [new_hidden, new_cell] + + @property + def state_shape(self): + return [[self._hidden_size], [self._hidden_size]] + + +class RNN(Layer): + def __init__(self, + cell, + is_reverse=False, + time_major=False, + **kwargs): + super(RNN, self).__init__() + self.cell = cell + if not hasattr(self.cell, "call"): + self.cell.call = self.cell.forward + self.is_reverse = is_reverse + self.time_major = time_major + self.batch_index, self.time_step_index = (1, 0) if time_major else (0, + 1) + + def forward(self, inputs, initial_states=None, sequence_length=None, **kwargs): + if fluid.in_dygraph_mode(): + + class OutputArray(object): + def __init__(self, x): + self.array = [x] + def append(self, x): + self.array.append(x) + + def _maybe_copy(state, new_state, step_mask): + # TODO: use where_op + new_state = fluid.layers.elementwise_mul( + new_state, step_mask, + axis=0) - fluid.layers.elementwise_mul(state, + (step_mask - 1), + axis=0) + return new_state + + flat_inputs = flatten(inputs) + batch_size, time_steps = ( + flat_inputs[0].shape[self.batch_index], + flat_inputs[0].shape[self.time_step_index]) + + if initial_states is None: + initial_states = self.cell.get_initial_states( + batch_ref=inputs, batch_dim_idx=self.batch_index) + + if not self.time_major: + inputs = map_structure( + lambda x: fluid.layers.transpose(x, [1, 0] + list( + range(2, len(x.shape)))), inputs) + + if sequence_length: + mask = fluid.layers.sequence_mask( + sequence_length, + maxlen=time_steps, + dtype=flatten(initial_states)[0].dtype) + mask = fluid.layers.transpose(mask, [1, 0]) + + if self.is_reverse: + inputs = map_structure(lambda x: fluid.layers.reverse(x, axis=[0]), inputs) + mask = fluid.layers.reverse(mask, axis=[0]) if sequence_length else None + + states = initial_states + outputs = [] + for i in range(time_steps): + step_inputs = map_structure(lambda x:x[i], inputs) + step_outputs, new_states = self.cell(step_inputs, states, **kwargs) + if sequence_length: + new_states = map_structure( + partial(_maybe_copy, step_mask=mask[i]), states, + new_states) + states = new_states + if i == 0: + outputs = map_structure(lambda x: OutputArray(x), + step_outputs) + else: + map_structure(lambda x, x_array: x_array.append(x), + step_outputs, outputs) + + final_outputs = map_structure( + lambda x: fluid.layers.stack(x.array, axis=self.time_step_index + ), outputs) + + if self.is_reverse: + final_outputs = map_structure( + lambda x: fluid.layers.reverse(x, axis=self.time_step_index + ), final_outputs) + + final_states = new_states + else: + final_outputs, final_states = fluid.layers.rnn( + self.cell, + inputs, + initial_states=initial_states, + sequence_length=sequence_length, + time_major=self.time_major, + is_reverse=self.is_reverse, + **kwargs) + return final_outputs, final_states + + +from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer, to_variable +place = fluid.CPUPlace() +executor = fluid.Executor(place) + +class EncoderCell(RNNUnit): + def __init__(self, num_layers, input_size, hidden_size, dropout_prob=0.): + super(EncoderCell, self).__init__() + self.num_layers = num_layers + self.dropout_prob = dropout_prob + + self.lstm_cells = list() + for i in range(self.num_layers): + self.lstm_cells.append( + self.add_sublayer( + "layer_%d" % i, + BasicLSTMUnit(input_size if i == 0 else hidden_size, + hidden_size))) + + def forward(self, step_input, states): + new_states = [] + for i in range(self.num_layers): + out, new_state = self.lstm_cells[i](step_input, states[i]) + step_input = layers.dropout( + out, self.dropout_prob) if self.dropout_prob > 0 else out + new_states.append(new_state) + return step_input, new_states + + @property + def state_shape(self): + return [cell.state_shape for cell in self.lstm_cells] + +class BasicGRUUnit(Layer): + """ + **** + BasicGRUUnit class, using basic operators to build GRU + The algorithm can be described as the equations below. + .. math:: + u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u) + r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r) + m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m) + h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t) + Args: + hidden_size (integer): The hidden size used in the Unit. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight matrix. Note: + If it is set to None or one attribute of ParamAttr, gru_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|None): The parameter attribute for the bias + of GRU unit. + If it is set to None or one attribute of ParamAttr, gru_unit will + create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + gate_activation (function|None): The activation function for gates (actGate). + Default: 'fluid.layers.sigmoid' + activation (function|None): The activation function for cell (actNode). + Default: 'fluid.layers.tanh' + dtype(string): data type used in this unit + Examples: + .. code-block:: python + import paddle.fluid.layers as layers + from paddle.fluid.contrib.layers import BasicGRUUnit + input_size = 128 + hidden_size = 256 + input = layers.data( name = "input", shape = [-1, input_size], dtype='float32') + pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32') + gru_unit = BasicGRUUnit( "gru_unit", hidden_size ) + new_hidden = gru_unit( input, pre_hidden ) + """ + + def __init__(self, + hidden_size, + input_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + dtype='float32'): + super(BasicGRUUnit, self).__init__(dtype) + + self._hiden_size = hidden_size + self._input_size = input_size + self._param_attr = param_attr + self._bias_attr = bias_attr + self._gate_activation = gate_activation or layers.sigmoid + self._activation = activation or layers.tanh + self._dtype = dtype + + self._gate_weight = self.create_parameter( + attr=self._param_attr, + shape=[self._input_size + self._hiden_size, 2 * self._hiden_size], + dtype=self._dtype) + + self._candidate_weight = self.create_parameter( + attr=self._param_attr, + shape=[self._input_size + self._hiden_size, self._hiden_size], + dtype=self._dtype) + + self._gate_bias = self.create_parameter( + attr=self._bias_attr, + shape=[2 * self._hiden_size], + dtype=self._dtype, + is_bias=True) + self._candidate_bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._hiden_size], + dtype=self._dtype, + is_bias=True) + + def forward(self, input, state): + pre_hidden = state + concat_input_hidden = fluid.layers.concat([input, pre_hidden], axis=1) + + gate_input = layers.matmul(x=concat_input_hidden, y=self._gate_weight) + + gate_input = layers.elementwise_add(gate_input, self._gate_bias) + + gate_input = self._gate_activation(gate_input) + r, u = layers.split(gate_input, num_or_sections=2, dim=1) + + r_hidden = r * pre_hidden + + candidate = layers.matmul( + layers.concat([input, r_hidden], 1), self._candidate_weight) + candidate = layers.elementwise_add(candidate, self._candidate_bias) + + c = self._activation(candidate) + new_hidden = u * pre_hidden + (1 - u) * c + + return new_hidden + + +###### DELETE + +# @contextlib.contextmanager +# def eager_guard(is_eager): +# if is_eager: +# with fluid.dygraph.guard(): +# yield +# else: +# yield + +# # print(flatten(np.random.rand(2,8,8))) +# random_seed = 123 +# np.random.seed(random_seed) +# # print np.random.rand(2, 8) +# batch_size = 2 +# seq_len = 8 +# hidden_size = 8 +# vocab_size, embed_dim, num_layers, hidden_size = 100, 8, 2, 8 +# import torch + + +# with eager_guard(False): +# fluid.default_main_program().random_seed = random_seed +# fluid.default_startup_program().random_seed = random_seed +# lstm_cell = BasicLSTMUnit(hidden_size=8, input_size=8) +# lstm = RNN(cell=lstm_cell, time_major=True) +# #print lstm(inputs=to_variable(np.random.rand(2, 8, 8).astype("float32")))[0].numpy() +# executor.run(fluid.default_startup_program()) +# x = fluid.data(name="x", shape=[None, None, 8], dtype="float32") +# out, _ = lstm(x) +# out = executor.run(feed={"x": np.random.rand(2, 8, 8).astype("float32")}, fetch_list=[out.name])[0] +# print np.array(out) + diff --git a/dygraph/similarity_net/reader.py b/dygraph/similarity_net/reader.py new file mode 100644 index 00000000..c38c8c6d --- /dev/null +++ b/dygraph/similarity_net/reader.py @@ -0,0 +1,262 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +SimNet reader +""" + +import logging +import numpy as np +import io + + +class SimNetProcessor(object): + def __init__(self, args, vocab): + self.args = args + # load vocab + self.vocab = vocab + self.valid_label = np.array([]) + self.test_label = np.array([]) + + def get_reader(self, mode, epoch=0): + """ + Get Reader + """ + + def reader_with_pairwise(): + """ + Reader with Pairwise + """ + if mode == "valid": + with io.open(self.args.valid_data_dir, "r", + encoding="utf8") as file: + for line in file: + query, title, label = line.strip().split("\t") + if len(query) == 0 or len(title) == 0 or len( + label) == 0 or not label.isdigit() or int( + label) not in [0, 1]: + logging.warning( + "line not match format in test file") + continue + query = [ + self.vocab[word] for word in query.split(" ") + if word in self.vocab + ] + title = [ + self.vocab[word] for word in title.split(" ") + if word in self.vocab + ] + if len(query) == 0: + query = [0] + if len(title) == 0: + title = [0] + yield [query, title] + elif mode == "test": + with io.open(self.args.test_data_dir, "r", encoding="utf8") as file: + for line in file: + query, title, label = line.strip().split("\t") + if len(query) == 0 or len(title) == 0 or len( + label) == 0 or not label.isdigit() or int( + label) not in [0, 1]: + logging.warning( + "line not match format in test file") + continue + query = [ + self.vocab[word] for word in query.split(" ") + if word in self.vocab + ] + title = [ + self.vocab[word] for word in title.split(" ") + if word in self.vocab + ] + if len(query) == 0: + query = [0] + if len(title) == 0: + title = [0] + # query = np.array([x.reshape(-1,1) for x in query]).astype('int64') + # title = np.array([x.reshape(-1,1) for x in title]).astype('int64') + yield [query, title] + else: + for idx in range(epoch): + with io.open(self.args.train_data_dir, "r", + encoding="utf8") as file: + for line in file: + query, pos_title, neg_title = line.strip().split("\t") + if len(query) == 0 or len(pos_title) == 0 or len( + neg_title) == 0: + logging.warning( + "line not match format in test file") + continue + query = [ + self.vocab[word] for word in query.split(" ") + if word in self.vocab + ] + pos_title = [ + self.vocab[word] for word in pos_title.split(" ") + if word in self.vocab + ] + neg_title = [ + self.vocab[word] for word in neg_title.split(" ") + if word in self.vocab + ] + if len(query) == 0: + query = [0] + if len(pos_title) == 0: + pos_title = [0] + if len(neg_title) == 0: + neg_title = [0] + + yield [query, pos_title, neg_title] + + def reader_with_pointwise(): + """ + Reader with Pointwise + """ + if mode == "valid": + with io.open(self.args.valid_data_dir, "r", + encoding="utf8") as file: + for line in file: + query, title, label = line.strip().split("\t") + if len(query) == 0 or len(title) == 0 or len( + label) == 0 or not label.isdigit() or int( + label) not in [0, 1]: + logging.warning( + "line not match format in test file") + continue + query = [ + self.vocab[word] for word in query.split(" ") + if word in self.vocab + ] + title = [ + self.vocab[word] for word in title.split(" ") + if word in self.vocab + ] + if len(query) == 0: + query = [0] + if len(title) == 0: + title = [0] + yield [query, title] + elif mode == "test": + with io.open(self.args.test_data_dir, "r", encoding="utf8") as file: + for line in file: + query, title, label = line.strip().split("\t") + if len(query) == 0 or len(title) == 0 or len( + label) == 0 or not label.isdigit() or int( + label) not in [0, 1]: + logging.warning( + "line not match format in test file") + continue + query = [ + self.vocab[word] for word in query.split(" ") + if word in self.vocab + ] + title = [ + self.vocab[word] for word in title.split(" ") + if word in self.vocab + ] + if len(query) == 0: + query = [0] + if len(title) == 0: + title = [0] + yield [query, title] + else: + for idx in range(epoch): + with io.open(self.args.train_data_dir, "r", + encoding="utf8") as file: + for line in file: + query, title, label = line.strip().split("\t") + if len(query) == 0 or len(title) == 0 or len( + label) == 0 or not label.isdigit() or int( + label) not in [0, 1]: + logging.warning( + "line not match format in test file") + continue + query = [ + self.vocab[word] for word in query.split(" ") + if word in self.vocab + ] + title = [ + self.vocab[word] for word in title.split(" ") + if word in self.vocab + ] + label = int(label) + if len(query) == 0: + query = [0] + if len(title) == 0: + title = [0] + yield [query, title, label] + + if self.args.task_mode == "pairwise": + return reader_with_pairwise + else: + return reader_with_pointwise + + def get_infer_reader(self): + """ + get infer reader + """ + with io.open(self.args.infer_data_dir, "r", encoding="utf8") as file: + for line in file: + query, title = line.strip().split("\t") + if len(query) == 0 or len(title) == 0: + logging.warning("line not match format in test file") + continue + query = [ + self.vocab[word] for word in query.split(" ") + if word in self.vocab + ] + title = [ + self.vocab[word] for word in title.split(" ") + if word in self.vocab + ] + if len(query) == 0: + query = [0] + if len(title) == 0: + title = [0] + yield [query, title] + + def get_infer_data(self): + """ + get infer data + """ + with io.open(self.args.infer_data_dir, "r", encoding="utf8") as file: + for line in file: + query, title = line.strip().split("\t") + if len(query) == 0 or len(title) == 0: + logging.warning("line not match format in test file") + continue + yield line.strip() + + def get_valid_label(self): + """ + get valid data label + """ + if self.valid_label.size == 0: + labels = [] + with io.open(self.args.valid_data_dir, "r", encoding="utf8") as f: + for line in f: + labels.append([int(line.strip().split("\t")[-1])]) + self.valid_label = np.array(labels) + return self.valid_label + + def get_test_label(self): + """ + get test data label + """ + if self.test_label.size == 0: + labels = [] + with io.open(self.args.test_data_dir, "r", encoding="utf8") as f: + for line in f: + labels.append([int(line.strip().split("\t")[-1])]) + self.test_label = np.array(labels) + return self.test_label diff --git a/dygraph/similarity_net/run.sh b/dygraph/similarity_net/run.sh new file mode 100644 index 00000000..318f012e --- /dev/null +++ b/dygraph/similarity_net/run.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +export FLAGS_enable_parallel_graph=1 +export FLAGS_sync_nccl_allreduce=1 +export CUDA_VISIBLE_DEVICES=3 +export FLAGS_fraction_of_gpu_memory_to_use=0.95 +TASK_NAME='simnet' +TRAIN_DATA_PATH=./data/train_pairwise_data +VALID_DATA_PATH=./data/test_pairwise_data +TEST_DATA_PATH=./data/test_pairwise_data +INFER_DATA_PATH=./data/infer_data +VOCAB_PATH=./data/term2id.dict +CKPT_PATH=./model_files +TEST_RESULT_PATH=./test_result +INFER_RESULT_PATH=./infer_result +TASK_MODE='pairwise' +CONFIG_PATH=./config/bow_pairwise.json +INIT_CHECKPOINT=./model_files/simnet_bow_pairwise_pretrained_model/ + + +# run_train +train() { + python run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda False \ + --do_train True \ + --do_valid True \ + --do_test True \ + --do_infer False \ + --batch_size 128 \ + --train_data_dir ${TRAIN_DATA_PATH} \ + --valid_data_dir ${VALID_DATA_PATH} \ + --test_data_dir ${TEST_DATA_PATH} \ + --infer_data_dir ${INFER_DATA_PATH} \ + --output_dir ${CKPT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --epoch 40 \ + --save_steps 2000 \ + --validation_steps 200 \ + --compute_accuracy False \ + --lamda 0.958 \ + --task_mode ${TASK_MODE}\ + --init_checkpoint "" +} +#run_evaluate +evaluate() { + python run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda false \ + --do_test True \ + --verbose_result True \ + --batch_size 128 \ + --test_data_dir ${TEST_DATA_PATH} \ + --test_result_path ${TEST_RESULT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --task_mode ${TASK_MODE} \ + --compute_accuracy False \ + --lamda 0.958 \ + --init_checkpoint ${INIT_CHECKPOINT} +} +# run_infer +infer() { + python run_classifier.py \ + --task_name ${TASK_NAME} \ + --use_cuda false \ + --do_infer True \ + --batch_size 128 \ + --infer_data_dir ${INFER_DATA_PATH} \ + --infer_result_path ${INFER_RESULT_PATH} \ + --config_path ${CONFIG_PATH} \ + --vocab_path ${VOCAB_PATH} \ + --task_mode ${TASK_MODE} \ + --init_checkpoint ${INIT_CHECKPOINT} +} + +main() { + local cmd=${1:-help} + case "${cmd}" in + train) + train "$@"; + ;; + eval) + evaluate "$@"; + ;; + infer) + infer "$@"; + ;; + help) + echo "Usage: ${BASH_SOURCE} {train|eval|infer}"; + return 0; + ;; + *) + echo "Unsupport commend [${cmd}]"; + echo "Usage: ${BASH_SOURCE} {train|eval|infer}"; + return 1; + ;; + esac +} +main "$@" diff --git a/dygraph/similarity_net/run_classifier.py b/dygraph/similarity_net/run_classifier.py new file mode 100644 index 00000000..6da678f7 --- /dev/null +++ b/dygraph/similarity_net/run_classifier.py @@ -0,0 +1,446 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +SimNet Task +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +import argparse +import multiprocessing +import sys + +defaultencoding = 'utf-8' +if sys.getdefaultencoding() != defaultencoding: + reload(sys) + sys.setdefaultencoding(defaultencoding) + +sys.path.append("..") + +import paddle +import paddle.fluid as fluid +import numpy as np +import config +import utils +import reader +import nets.paddle_layers as layers +import io +import logging + +from utils import ArgConfig +from model_check import check_version +from model_check import check_cuda + + +def train(conf_dict, args): + """ + train process + """ + + # Get device + if args.use_cuda: + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + + # run train + logging.info("start train process ...") + + def valid_and_test(pred_list, process, mode): + """ + return auc and acc + """ + pred_list = np.vstack(pred_list) + if mode == "test": + label_list = process.get_test_label() + elif mode == "valid": + label_list = process.get_valid_label() + if args.task_mode == "pairwise": + pred_list = (pred_list + 1) / 2 + pred_list = np.hstack( + (np.ones_like(pred_list) - pred_list, pred_list)) + metric.reset() + metric.update(pred_list, label_list) + auc = metric.eval() + if args.compute_accuracy: + acc = utils.get_accuracy(pred_list, label_list, args.task_mode, + args.lamda) + return auc, acc + else: + return auc + + + with fluid.dygraph.guard(place): + # used for continuous evaluation + if args.enable_ce: + SEED = 102 + fluid.default_startup_program().random_seed = SEED + fluid.default_main_program().random_seed = SEED + + # loading vocabulary + vocab = utils.load_vocab(args.vocab_path) + # get vocab size + conf_dict['dict_size'] = len(vocab) + # Load network structure dynamically + net = utils.import_class("./nets", + conf_dict["net"]["module_name"], + conf_dict["net"]["class_name"])(conf_dict) + if args.init_checkpoint is not "": + model, _ = fluid.dygraph.load_dygraph(args.init_checkpoint) + net.set_dict(model) + # Load loss function dynamically + loss = utils.import_class("./nets/losses", + conf_dict["loss"]["module_name"], + conf_dict["loss"]["class_name"])(conf_dict) + # Load Optimization method + learning_rate = conf_dict["optimizer"]["learning_rate"] + optimizer_name = conf_dict["optimizer"]["class_name"] + if optimizer_name=='SGDOptimizer': + optimizer = fluid.optimizer.SGDOptimizer(learning_rate,parameter_list=net.parameters()) + elif optimizer_name=='AdamOptimizer': + beta1 = conf_dict["optimizer"]["beta1"] + beta2 = conf_dict["optimizer"]["beta2"] + epsilon = conf_dict["optimizer"]["epsilon"] + optimizer = fluid.optimizer.AdamOptimizer( + learning_rate, + beta1=beta1, + beta2=beta2, + epsilon=epsilon, + parameter_list=net.parameters()) + + # load auc method + metric = fluid.metrics.Auc(name="auc") + simnet_process = reader.SimNetProcessor(args, vocab) + + # set global step + global_step = 0 + ce_info = [] + losses = [] + start_time = time.time() + + train_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=False) + get_train_examples = simnet_process.get_reader("train",epoch=args.epoch) + train_pyreader.decorate_sample_list_generator( + paddle.batch(get_train_examples, batch_size=args.batch_size), + place) + if args.do_valid: + valid_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=False) + get_valid_examples = simnet_process.get_reader("valid") + valid_pyreader.decorate_sample_list_generator( + paddle.batch(get_valid_examples, batch_size=args.batch_size), + place) + pred_list = [] + + if args.task_mode == "pairwise": + + for left, pos_right, neg_right in train_pyreader(): + + left = fluid.layers.reshape(left, shape=[-1, 1]) + pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1]) + neg_right = fluid.layers.reshape(neg_right, shape=[-1, 1]) + net.train() + global_step += 1 + left_feat, pos_score = net(left, pos_right) + pred = pos_score + _, neg_score = net(left, neg_right) + avg_cost = loss.compute(pos_score, neg_score) + losses.append(np.mean(avg_cost.numpy())) + avg_cost.backward() + optimizer.minimize(avg_cost) + net.clear_gradients() + + if args.do_valid and global_step % args.validation_steps == 0: + for left, pos_right in valid_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1]) + net.eval() + left_feat, pos_score = net(left, pos_right) + pred = pos_score + + pred_list += list(pred.numpy()) + valid_result = valid_and_test(pred_list, simnet_process, "valid") + if args.compute_accuracy: + valid_auc, valid_acc = valid_result + logging.info( + "global_steps: %d, valid_auc: %f, valid_acc: %f, valid_loss: %f" % + (global_step, valid_auc, valid_acc, np.mean(losses))) + else: + valid_auc = valid_result + logging.info("global_steps: %d, valid_auc: %f, valid_loss: %f" % + (global_step, valid_auc, np.mean(losses))) + + if global_step % args.save_steps == 0: + model_save_dir = os.path.join(args.output_dir, + conf_dict["model_path"]) + model_path = os.path.join(model_save_dir, str(global_step)) + + if not os.path.exists(model_save_dir): + os.makedirs(model_save_dir) + fluid.dygraph.save_dygraph(net.state_dict(), model_path) + + logging.info("saving infer model in %s" % model_path) + else: + for left, right, label in train_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + right = fluid.layers.reshape(right, shape=[-1, 1]) + label = fluid.layers.reshape(label, shape=[-1, 1]) + net.train() + global_step += 1 + left_feat, pred = net(left, right) + avg_cost = loss.compute(pred, label) + losses.append(np.mean(avg_cost.numpy())) + avg_cost.backward() + optimizer.minimize(avg_cost) + net.clear_gradients() + + if args.do_valid and global_step % args.validation_steps == 0: + for left, right in valid_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + right = fluid.layers.reshape(right, shape=[-1, 1]) + net.eval() + left_feat, pred = net(left, right) + pred_list += list(pred.numpy()) + valid_result = valid_and_test(pred_list, simnet_process, "valid") + if args.compute_accuracy: + valid_auc, valid_acc = valid_result + logging.info( + "global_steps: %d, valid_auc: %f, valid_acc: %f, valid_loss: %f" % + (global_step, valid_auc, valid_acc, np.mean(losses))) + else: + valid_auc = valid_result + logging.info("global_steps: %d, valid_auc: %f, valid_loss: %f" % + (global_step, valid_auc, np.mean(losses))) + + if global_step % args.save_steps == 0: + model_save_dir = os.path.join(args.output_dir, + conf_dict["model_path"]) + model_path = os.path.join(model_save_dir, str(global_step)) + + if not os.path.exists(model_save_dir): + os.makedirs(model_save_dir) + fluid.dygraph.save_dygraph(net.state_dict(), model_path) + + logging.info("saving infer model in %s" % model_path) + + end_time = time.time() + ce_info.append([np.mean(losses), end_time - start_time]) + # final save + logging.info("the final step is %s" % global_step) + model_save_dir = os.path.join(args.output_dir, + conf_dict["model_path"]) + model_path = os.path.join(model_save_dir, str(global_step)) + + if not os.path.exists(model_save_dir): + os.makedirs(model_save_dir) + fluid.dygraph.save_dygraph(net.state_dict(), model_path) + logging.info("saving infer model in %s" % model_path) + # used for continuous evaluation + if args.enable_ce: + # if True: + card_num = get_cards() + ce_loss = 0 + ce_time = 0 + try: + ce_loss = ce_info[-1][0] + ce_time = ce_info[-1][1] + except: + logging.info("ce info err!") + print("kpis\teach_step_duration_%s_card%s\t%s" % + (args.task_name, card_num, ce_time)) + print("kpis\ttrain_loss_%s_card%s\t%f" % + (args.task_name, card_num, ce_loss)) + + if args.do_test: + # Get Feeder and Reader + test_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=False) + get_test_examples = simnet_process.get_reader("test") + test_pyreader.decorate_sample_list_generator( + paddle.batch(get_test_examples, batch_size=args.batch_size), + place) + pred_list = [] + for left, pos_right in test_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1]) + net.eval() + left = fluid.layers.reshape(left, shape=[-1, 1]) + pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1]) + left_feat, pos_score = net(left, pos_right) + pred = pos_score + pred_list += list(pred.numpy()) + test_result = valid_and_test(pred_list, simnet_process, "test") + if args.compute_accuracy: + test_auc, test_acc = test_result + logging.info("AUC of test is %f, Accuracy of test is %f" % + (test_auc, test_acc)) + else: + test_auc = test_result + logging.info("AUC of test is %f" % test_auc) + + +def test(conf_dict, args): + """ + Evaluation Function + """ + logging.info("start test process ...") + if args.use_cuda: + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + with fluid.dygraph.guard(place): + vocab = utils.load_vocab(args.vocab_path) + simnet_process = reader.SimNetProcessor(args, vocab) + test_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=False) + get_test_examples = simnet_process.get_reader("test") + test_pyreader.decorate_sample_list_generator( + paddle.batch(get_test_examples, batch_size=args.batch_size), + place) + + conf_dict['dict_size'] = len(vocab) + + net = utils.import_class("./nets", + conf_dict["net"]["module_name"], + conf_dict["net"]["class_name"])(conf_dict) + + model, _ = fluid.dygraph.load_dygraph(args.init_checkpoint) + net.set_dict(model) + metric = fluid.metrics.Auc(name="auc") + pred_list = [] + with io.open("predictions.txt", "w", encoding="utf8") as predictions_file: + if args.task_mode == "pairwise": + for left, pos_right in test_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1]) + + left_feat, pos_score = net(left, pos_right) + pred = pos_score + # pred_list += list(pred.numpy()) + pred_list += list(map(lambda item: float(item[0]), pred.numpy()[0])) + predictions_file.write(u"\n".join( + map(lambda item: str((item[0] + 1) / 2), pred.numpy()[0])) + "\n") + else: + for left, right in test_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + right = fluid.layers.reshape(right, shape=[-1, 1]) + left_feat, pred = net(left, right) + # pred_list += list(pred.numpy()) + pred_list += list(map(lambda item: float(item[0]), pred.numpy()[0])) + predictions_file.write(u"\n".join( + map(lambda item: str(np.argmax(item)), pred.numpy()[0])) + "\n") + + if args.task_mode == "pairwise": + pred_list = np.array(pred_list).reshape((-1, 1)) + pred_list = (pred_list + 1) / 2 + pred_list = np.hstack( + (np.ones_like(pred_list) - pred_list, pred_list)) + else: + pred_list = np.array(pred_list) + labels = simnet_process.get_test_label() + + metric.update(pred_list, labels) + if args.compute_accuracy: + acc = utils.get_accuracy(pred_list, labels, args.task_mode, + args.lamda) + logging.info("AUC of test is %f, Accuracy of test is %f" % + (metric.eval(), acc)) + else: + logging.info("AUC of test is %f" % metric.eval()) + + if args.verbose_result: + utils.get_result_file(args) + logging.info("test result saved in %s" % + os.path.join(os.getcwd(), args.test_result_path)) + + +def infer(conf_dict, args): + """ + run predict + """ + logging.info("start test process ...") + if args.use_cuda: + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + + + vocab = utils.load_vocab(args.vocab_path) + simnet_process = reader.SimNetProcessor(args, vocab) + get_infer_examples = simnet_process.get_infer_reader + infer_pyreader = fluid.io.PyReader(capacity=16, return_list=True, use_double_buffer=False) + infer_pyreader.decorate_sample_list_generator( + paddle.batch(get_infer_examples, batch_size=args.batch_size), + place) + + conf_dict['dict_size'] = len(vocab) + + net = utils.import_class("./nets", + conf_dict["net"]["module_name"], + conf_dict["net"]["class_name"])(conf_dict) + model, _ = fluid.dygraph.load_dygraph(args.init_checkpoint) + net.set_dict(model) + pred_list = [] + if args.task_mode == "pairwise": + for left, pos_right in infer_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1]) + + left_feat, pos_score = net(left, pos_right) + pred = pos_score + preds_list += list( + map(lambda item: str((item[0] + 1) / 2), pred.numpy()[0])) + + else: + for left, right in infer_pyreader(): + left = fluid.layers.reshape(left, shape=[-1, 1]) + pos_right = fluid.layers.reshape(right, shape=[-1, 1]) + left_feat, pred = net(left, right) + preds_list += map(lambda item: str(np.argmax(item)), pred.numpy()[0]) + + + with io.open(args.infer_result_path, "w", encoding="utf8") as infer_file: + for _data, _pred in zip(simnet_process.get_infer_data(), preds_list): + infer_file.write(_data + "\t" + _pred + "\n") + logging.info("infer result saved in %s" % + os.path.join(os.getcwd(), args.infer_result_path)) + + +def get_cards(): + num = 0 + cards = os.environ.get('CUDA_VISIBLE_DEVICES', '') + if cards != '': + num = len(cards.split(",")) + return num + +if __name__ == "__main__": + + args = ArgConfig() + args = args.build_conf() + + utils.print_arguments(args) + check_cuda(args.use_cuda) + check_version() + utils.init_log("./log/TextSimilarityNet") + conf_dict = config.SimNetConfig(args) + if args.do_train: + train(conf_dict, args) + elif args.do_test: + test(conf_dict, args) + elif args.do_infer: + infer(conf_dict, args) + else: + raise ValueError( + "one of do_train and do_test and do_infer must be True") diff --git a/dygraph/similarity_net/struct.jpg b/dygraph/similarity_net/struct.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46987779af2ff59aaa4a7bb0a8943f86524acbe6 GIT binary patch literal 38115 zcmeFYRa6^L*Dcx>3-0bvw75GZxVvj`ch?qoDK5p`DGtTmy|}x(+YR6O{yXmT9pjv* z^FkOTAvX&^f>_l4gQP8yw&edpGZDQ ziU_H=XPo?o_10CbfA`y8;g+1?keu&EppUX&?$P{Kx7HA_@2xiS?h|{wP2Isc69e&qwkV@PG12&Vc9p!yX}6KDISx zicHfyadaq#>KL0#kpvS$pP@k5-60zal6psZCO<@@OS+-d6%rHai%BlLh)~7tY%uQx z(4g|wJOt!n`8sJ3m?1{oIXg)Z2X^~o5WWIeJD4Lz_6iJ^Zc+4lizN@f8_P4e0e>` z@XGVB^t7>YqF(1{Ye@7X42+!)?)HyG6NZ_J#EyJ8g%UC|DqTM_BX#1l)CTNNu%-t3 zDya8fE+bxC8Hm`{DA@Z+y-7cMA+LCHx5;t&wME+yXqawa*MmkYovLXMb9ih=UVTYS zU)uTte|N}s-2|9c`j+#sKC zA@+8P8ry-MUTqKm^OT^qk-=!UBllW-$MH(GRw>fcQ`0|^Lq@}mRC>*F=cP>Lpnx}z zu^q0;q+@yQq;5A3?xxM^KxS%Br~FgfcAq4j9(SUhyOUr#-&W=jv9fCy4fV{IrX0l; zClLhrhW(2IQJTMLUj+Ebg0o}*+kf`89jZ#d!odTjus(hOejm!0g>T&T7!D3^lsDEz z=4lFc?EEljsAToJWh|n6T-erY9nETp#o$CD5|%iTmv3sS0S& zp{BXN7bf{m@VJ^d!$6GAj%_XVJn6?{pFCg_xO*u-w_8ny9g51yXwN`QJkSx1u+5H2 z=(lh}LP@q85U^v13gP718oFph-a$37Di1jtM4oG2ZjXXyvUP68FOo@=i~vtg8N(QR zXX7S9_ei9-64w(v1@g9a2gq}@m|ywN%K>Qksp$YT%-*<6{J8AIdRbkox0#n>vuc4C z6=BTQR#7nr;%$Fm zzU} z)d>Cusd;K5q8-&i^V)nX&D>^xNrOn>)yE0dFac<~eAWhXnz@c=DOg@J5C>=vNmtCS{WsWDMTUn5T3Rus4y~O_o~S ze+$PacQda=RdjgxA_eJXw}wpZ7#(sme`cem>L|NmX6G@5vG&1`8l+n#gM?g+|fnCut`8o&H8XHm6`Qcd57UBiXC7IvH01m2OuEXBd8SSZ zMxM(>80qv5s3)6M>n#=6koi8Fpnr2mPE%%RulX9#L5Mm)97EePcdhKMp(in2)I^f6 z@*ixR>T2gPO;1BVyjdnlo>(|&`kEWq!vQ^bAh3Rm^2^V+{hx@v&tPX}Ck>|B+xT}` zm9KQ6YYWOwK;r4y`P5u4UmpnsVLs%d33eNS*wg#O{T&s2?3M_=#}%7|@JZS*@7<$+vZlIiQvtnLr7%50{Jqdbx`^ zEy;6w1K`U{9^F&GpUz=c!(g?UvIKbJp$|sHC9Y`ikY5Q1k5KbK#JuWEX$qp^G`+Y@ z)C?4@U$NJ~HASqx7z{>e@WL`h3`?6`!cA;&h@rE@w#u;?8zN@ituZ;CepoVu5zN5p zQB)=E<=EG_N{>YY6OmZ7{`e_2y!uO_)ZfV_I-_HI@EWI11Wc8FID$mzUBv1|4)>J} zE|B8UZwV`QyhKE0_x5ij#I<6gleth$cp2Vvat7M*0d{mUP{&w~UC2>!R>9~!Jp0&LN60|TataIS+aA?-Bt4|{OVwC=M3ES?59^U=`;cT04>^9P2?wE@r`-$f_& zIZbOku`l$$v6%m&!bKa(?y9|<4KrD2!X>f$ZVphgQE0BK?6l8H|4YbO&688_+?HS3X#_D-hS_#(l1(bO8l@#b2#d_-IBs& z?%qb8S-%z=bWTccasSdl*qhowFXl(Ui-|Jl{x%b473^jSAFxTupA~h{)LBdD>32q9 zYN0;`ts%i6+YWBc)-Y#K{4+9qM~S9xtQI&XUk>}D0Al`r4HU&>lO%T=WT#XQX8yXf z_2!A)p&j23ah0*W`%=AYq>UDu1m$m{Jl*4n$`rJV7L_@CCocojt_^>F!Xxqjlkq@rn7;U#EzrWY!|5xs9$RtGS_es z`h#d!@V4zo(k=LAHVg|Z;|9OKcNP*9A|MFg2o)VTnd4sYpptpEBkkFpxnA^9V)NE^ zme+JSutEnqobD8J3-v{vpQk^9lw^cEAJIi3-I=;~A6; zIqV>g7`?TL%lzUAf)iX!>WK}tanw=P^X-Url_tci_87Wg8umtZJGH+@qj_pK&j_QZ zEuz`2`2jAS|EZtBL(qw+cX3g;*4Pv^)@Dfe|2zP@ZnUCr17qK2nyhT_wzq{GzO5oU zJNZ^e*W)|qCB@*c$$)I7wM9pK7U-}QIi6+~4l-e(;Pi~uv zM$iN-b#;#}F70MUm`cb3j@687xOVX!;>XW3|IrSx?Wi0;PE5Zk9W__yZzhLHv}AJx z7VWxJ3}>kq7v9d6^h9pRvUJ-G*TY7r;nt=9ZGZ{{Vop>Sr4kCM%KN-6`J#Gz zXnjeZU*bBeXPYJr404lEm5QN8 zoS2KnkbGSy@h`fcpi=iYyP+_=two}uF7#A*UWi&SN~4dp!*>9U;zvG{IwSl`fnH6$ ziAP80vQ4n#vB+6aF-ygUt?jW;SZZbYVr}heKU_;wcl7F*Ww-dyKg%VMY{jdY%o?n| z1gPf`6S1Cn;Hu||Jdcb>YZIPMBf_h(pE|(Vu^pW`f#+syIiJC2!^+i@dpb&|DYv_g zwQ^-j|1emb^muhqS`l7zgRPXeyk#312v3}`0nWcTVk#2cd!Iwlp zSFP1v`Xe2!oeY~K@sTF7bnEN-go3~|{Fdb2SZ>*ihcQq2gJsjZC3o!SfE{@Av&WwC z^GlE+#tt%KhVzmSk*6#s!x3s|i2sj?$i+zObL6pJg$1EbDwDPjKHr=u=-LI=XeLlW$ z-e{WR!@^hrX#Nj*dg^zc()FU-{?iKS;|hYD0>MVm3+7{XIb-(lKu@AuvZJdM*TIO8Acy|g zR|*vLt2NLCzBI0RTFG;U{rAmxBN2@4p+%XVa1A!(sWMwR zAw=E5i1?l@nt`!sV8xFzO%uJnQJ8KE6c&b{8v$|=^!r1>+fVS{-#E_zNGTA&q&QzW z)8|f;?|#MUf^PxJf~ZTALEoC{mJVqT31Jd8KRrDW+0v8d&u;v44UBEWVKt2$f>xXe{57&`zvi(I>JEf`IJNV$+G50* zcR`JCShoQVu2o-AlJ3|{?IK~K3=nzI>A7zPW!fT<=J#$Y0l-fNFuP+ zN$j$cWdJ5&HqBQUn2G{u1S}W$ALZKub{k^ggT*fUE)@w>WyRSKwLaeUhj)8Bd1R)^ z!3V2?Wie`oLRz9RPpp0Jk~vQwu%3=K@JLP$KG)MBIn^Y(!Tm}v&YF1hRpUUy15gVU z;82l;h>68?voR!jQ&{-0nM4<2KPG>~-P;O@vplC5EDl4%`qmRJFUR_5tSxUnTe$OM zfFPlkKwTF07GD!+nwVVwSS28jxWfQ2%H`&D8%hSiM}QgD zFA?Q7_9jmk^y6RPH-0#md$<+yUge5H60qdD7}^?=iO!$F=H-F=#negd114!f_?@iy zlLfpB)FC*e|L2qG|9gnL`By)I&E~%Ii$>LaBUsV`_zx=pLte~~@%_0KH}Li4vSH3u zZ?j0Rw)XSdIPr$_mHW@wdOKb+wPmNYtY==$tbwIg`tfA~X#kBb_}d%?9c*Y5)+*rs z`m8Y<^evz|!eHafn(rUGJSL1M{0QJ|@aL^2aq3G~76Gbg(0MAj0)FRsC-5wb)WZ47 zsje~rk4R+QtTAkbPW_Y>4yVr0z_L%C;3p@ZxxcJzL_H(8vys8ZatyuIHynY&`2IrL zY^s5W4GR_JGQ&`*9nV@7fim6tLCKPH0vzb9H{sFI9T+R6Fc)lx%ZPxK6-F=b& ziFD^#&KKB(@b7ge8jib4>`h}WrDq?z@WN9m>PL`t{{Ry=(E}WGcxd%?UI)?xFKZ?M z3%-IIJL_6FWd+$B(}eV?2ax36E2aY3RfYBSa`zUA_G-90{<;bWUo`Qh{qN%P_N_Z_ zT?P7c&<307B?Kmx?ll$~umuHN@H|Ob#>w##f9eKym+|Ua50y}T0c*~F80e6TqdS5< z4i6kH&~`Z~YWY%XUo9dL9q#HjDYlCwvaH-%{Ac2Iy%e^N5D{;02PJ|;dZZV30P7C7 zKr36681CjspO(xCalifYKxMf_kcG6WD2{pzoKeK~IRz1r%pPx-8&C9W#sv^B45FGt z_NtqRzxm6|>MLV23fO7?SQ$-&Ne=hnRuRiZ$Mg<f+Y4G5J$ z5wky|9`|LSu;I~92Qtu;9gZ1=@zj->+m`(J*?jP+dS@@lduJytT}w9~1Og2YoSe$p z19?9)mn?NBi;3S!GS}{Cl?s*`a*Pwb@mO}by_GZuqfCPj7s_rJ->l@bCi7| z;(F5cZdxOec67q_Hq3RrJDtTTDtS0`%$*lZmx4BG|82+u31>UcE6lzT1SNf%x?|Cm~csRbGmAoYn4Pq8sgE{?!vnC(Ty}qmKk}?nczb+F>V>dB1pBZz!GFDve_e!@=K(gkmu=%LF zxr2}KN<)H@u2@(&`>!H4X{zfZ!hUdwq`^p;kIekl7PsohJqI%!)YKF+B@pH+)+R@1 zwnq;4;q+y&GWlvZmlXF#ke@7g0ueS{CN36}uC{j<+_o_J5hPX5q_z>0!u~ZJA1Ms_ zmUFF8FL=$DqY;Yp+JxsoL?a@$BdqOoX0#p|Y$MG=Er5u1jXaErnl zAyktqf3#bl<;n{R1v5@DNl+|6RQwJZCURd1h30n$t-%>sWW$B5N|D;F3aXqfD259AM<^Tr#5C%ebRh$#b1O zXBHBxEeXIwXDpyl{KvZ8ILNEAqO_2{gMgVjGVbbrni4w%Dbm2*1;=c_JN{Rx$WDrC ztJA}Sm1Ma@54Ar?RgsRDCz$$94XY9}^jE@*Kts3K;Ps{VZ*+IM9lb^|4Y>yaG zI%^|C40ALW#dD_E_}Deb4>%+sEUpLcVpH}rvm9*~%4*II0SGNPhpl>n(};h+)CTw4 z_kAK}91LxvIVMkPp6XbeHaxC|`XFM~5St|DyBmUY#xsTK&4we___f+T|XIZ4@MjwIS4kL%E{<$w;ysd;_fPfgLn%W3!_L$0*L zQNd!&hgPPgi1ddK?y%%ZSy^uXd~j<>xBXIp`pARJRs(|yX~B>8<|Q!n*CAM4kJNLvc6sED z1Pn$t{vyH?%_-&-P77d+i2zxq23hHk8{`3^>Q?;vV*!;}L_HB;x3&8Z9Y;Fa?$1X5 zGLpa>hX3s8fx&C%6M~*CERb~Mn!m+ov!t**Cbagj>k_0;hl#pd|%}IOU^iZdSuYJIDC$vY*O< z3iQPOfN_@56h?@*U{maCH_Zd=1|~&t@3`V2aa~AL!^1AF=Z`ot!>Z_y2LD7H+-e-Shcv&U4xZO4cw;yUE5cw1|($r_SUb(qT{MirT3D*mEx zyd3EnsXjcpmQ4GJx z0D8&S#4Cpe_L_hzCaqr#wksz%^23ymuYT%gvNqxLnybRSoHRxBj$^CSZ@RR!d}DZ@ z1q2v^W02uC0jQMlTB(>o3A>)F*Jvkv;2XE|9=EYvyk-cMZU_xq5Qkmw&vLw!HhvU$ zvJ=;zacQnR93W6KAg~1qs)UULSZ2_FMU!pu=)D?E)(@8?QnwbU1I>Ol0y4qcJ8=w9 z+H_q1{>VrHGz{5pEDOb~BcQd;Tz_5`Ui+Ev9gPh}uV|xT&lZ~e z7@-(22q)ZiZ~T~`d718lkBvw5ad1+Q&o%0kqtC#!xfg+TntaP5r{6O08SWWYOQ}z#QOLCz02*C@AvH+!ciwre!~bDA^r< z69~ptF4F(ZUrd2F=wlA!f5)TfK(|1;85&Ru~0LqmWE!r zA!E0C>@aC~NKvtfanXK?f>Bs7kM?ijq{y16;E% z*VjlP6M`=imz#~@&ajE7@cOA3!Zx$Oq$RMvA>IEhjttnW-QET(#A)#liPjHW5AwEB zBEY<&9|1R2rniknRAj(Oetih5GLUL5)dI~>9mha!soi8~jnNPqPFy~kEjJ4lFewe- z#aQuv{Nc^$=M@7t`R9t0x2dFO)xke2e_%?U^e}vZ*kHSe6^*NuXLd%G%m-)We!Yx> zbf;5c{&<9!@Wt8#Z{oyNb|YnMNNEBsjPz!&wsUwMq#7e)J}L3Q-m{5N9z;w#%K75j zlHHbCy`-x4XS6EL7=5K>taDtpv!VWClQ0$>`rHNJi^(j68iAIQigE%)yf8rg2_E6K7EPv;tB@@jYDGEQaoi$Uz|4f;%?5^(uxsQa5FNB&i5bF$aqY+CKp#L|M zg5t}D|MkRxQ4D{uxrkT}e^%(+S;!#rKo{vu;pRbJboXiFJ7(s7cf+ac2&K(_^Oc5& z2lw8n2gX3h`NNqvcRo7xp@jVR7^p*L^Bd2kKO$ew{obE1yWts0;)fs&b08M2u~SkBRkkfCa*rN`uKlr%l=rC+w+x7k%)H`*)*TU9ng`#p~6k8+>#6ME{I zm=tjDKWxWvHfq+IlEz6mFCUW!-q3*$2La^CLQPMv(uXmo(PU4GMWw0|TeCZb3G~JHI(>cPW=7s``M6A@`D*wZv)MSowHw%T zjPbo8_kO$18rU04k8)S+x|$S5#;sv-+>s#H80Wi|(XsD>!r^f?cl(P7Zs~kXDhUMk z^V4bm>7`RX_)(=7V9XLmDjd9#8^O>$AaKV_PVjVpzRvJ~w)fV0)-rXlrXn8B7h>wcV_xu>@8k*~ z< zzSQ@tZp4-CPnIuN$sN6Y*!nM(nfh7Jx$ul4W7$G`JkRKWBSg(+#{!EyPZnoxQDr?C z1RMjAnD9u8nZi`pMr{7T*tP$ROT;E6VOg)XU^?l0A-b*laGFkJ+CUpcSxoK*$9V6M z_a7^0YkPg4S9Z8M$cxzmq5{sLbf4UhD;h6ykbMq*MBnovPw-twqT=9vFU?HUZFNmO ziI?}@Lhp~rS&N6T5&ZAaRdP1#8|F8)l!9om*-C)Sck#*fY$c4@s6VAnL05}wJCb#D zV4ws|WM|PVt8av{!bLP0XEe_d zb-xjcQa4z9Dfu7J>529_P4I`z>vCz(vB5R zTEin2&t5Hm`0k;oi7O5Rgzm4m^RlXw#w1HVu^bTxc%D43xP1hzr2n!crE|=k0NSjHJ*7$N6zP#VDHD zB94~&#?AKc9P5En7#yE2rN`4@QPjfZgp9JQi?~_^RIZ`?x8(mW;*rYzwqE`6fIJ3I zCwwc&`RE>NMy6ETe=H(+u*hxLNNa7Y=9FjQfZ&OFPp)m`H}w=oI`+P9x&+y0ea7%?Svs ztT>hpV3Oy*Rl!mxqm)oj?qxd`R}McuJ|0Ut+t2i>ERoLroJ|XaV-G>wkc- z?z@$i$K;mQBKfSJ%5vO`?i8kDX?;Te&fyI4hxcNvQYzNPII7Oip0e8fcMC>QlN;!^*2V48mLV+e z9}fLuyV8U@lFT>|;P=k^<~pxS32@#Sj4MnEQ+xiq;SVokgAROR0UO-%xg`M~U@J*D zyNl$aYeg~IgT#xFtnb?_iA>1n&h&J9JU=5#;e#Tps;ZbH{k3u3w>4};fw^p*bG1x_ zHJt52nIHcqI<6f$2n_S{zMPW!Kf3*IERT+kM*Edo;tSN|W}9Gt zLmeUI;{7Pmjyu*P%uy%?17xUBN8XdJ&j)}WmD*lbN;_md^io}HBKs}OicRsG91M}P zVS?jzJZXY>D|1k&ghzJ|t~Pj_wVve8CBpN6Tquj7BEda8mej~a1e@^;?!DIXpMCW7 z^rp2ux78&~nOqLJmrbbC>EQF-_C@z-GBxu|uH^V1Eu}M}7q}^BY9`fY!7c*bAGa2I z#+lbz+5WH<;bm!jz>?{?To)W39z`C*H%*s}qI3in3%|s^>A!6gzccl#SwSDHYZXiJ zqgmRs=Lt8q@4X@BS7kHf(9+d4gsVNzI;-i@az7p9Mg35#$yxUTN`b$IlH)-p0kYsr zHFK%m-^*ZBbgfCLWUg_+cW+j&85eUbVlp4x)dRVakq$5wE6i_Vc(!O5%9o5E{gZVU z0xksu`L$?DQXlL1p3K9>e{w}*x2?|#`8 z*JYa&FvO&qsH&@{UkzaU-Q;n-3KMU+&IS^EIy#(D+HzZQ2!XX!s|to@FRO0a`6V4$ zWcP5fd2g+rvNP%~uz%Xy*Nl_Qk4oXZS3OkUDf2bJ z`{RcP&NSI1Hhz_FfTM;f&n!HKRddOEp~-I5`uckQRTUtTmUD?Q8008}w3XHC5!#{M z(YIA`~Sp zbNxAMG)(Lki>xH{d4AVAlzCXH3pj@jVv z)F>L%Q>oi31IiQxON)N#oU@tX;bHzDkGj7N(7{uzLK5eqAzvy^G#6+dg0S_zD=WvB zczVq$il?5gPH=4_%+AiH3_*}SgD+K_1QZtAto>ayc3JT{Z*#xs>KLIboT9TaFAsz^ zwDde!s4?#ux@3Xu)^^|H=dNzh%*(g-<2EceIL*R8%!@nv6l;F&>*w!K!W^Jn@8Y`4dmkE3Q z;IO93Y>1zB$Gh`C1@#F{?WE<5Ak=+pzIYO74KQowM;mNP5(CNI>V9XrNqc((FHssl7c@BLX&nGE|US zZ@vRb;0ZE4aixo;GYbn?eUNiFYl@tPVs*{jsNR}Zn~rtCfEN&m_j$EW;vQpi*jo!$ znY_cLP33yNUShmhF|Wx1m2~IF6sbtESeN067c{ z1{Tcc2rP!)IsF#Akz{(#^dF)|is-8qtszIFK=>^YqsvTNO49t~_z_c??0bKxYlmlj z+W}!@L2H%~Xe5FkVdzwwUhAz$MH!x2@|^L`%;!=X#ya3;xBU(Re^_|FTeayeZH99< zEDk4#Ou*%F*R(mWun;d!0)rI);^sQx{lVjoFRi-lEw}mwyPm%J);U5dzHT9xz(!Fr zXI)p8yV3gA@YAC8_(@%sbQK@Co<{uK4z(fvoc* zOf!IwyD>pglY=O@H$Rrk4D;1I0&wUWN$>c>;??@aKnh_$W~GDvNJWDySI#ym zwaYEZEH**>Fo+`GY)B%DfS~5$SQvde2RYM6gRSy77!&tJ!xs0E7pN^F5tG|sE;8C} zhb+Aj3HrU9D;Lyvy-cXX#u+AdID0Y}m;lY#i@H3h9d|WhM@Q{F1axTZgPa7mw0q|&} z3g5}_qr2gLdxJSKsK(uGJqs4h1*X_83Vc?Ka%gO&U3tYMP=Qj&r|FWZJLT3(^H`_k zbpHxc&157>a<|K&+I1!fuiX1LI4^I6(u%e$X68` zRItt~2)mP2FZ@LRJE5D$UHUPR`U#cWBLL4(RR}17tXM6|^K2ZXd}W(mO--uua2un= z)xZ+?ln_y1L0q9nWm{m|`ATKlOR7Zz^km1|R-$h>Fs~8i?Zu*(ek{~5lwTmLk2rVP zjPWlv%dS2k{rgfGdD?N);&c)l7`0BU9E{CrYJaPZ%jQarU_4&KbH1?@hU8tfw9(AF zjLY&Z=4Ipn038Y(loWPiu^DPUx*I+>2yKY2JPi%pp^QS1h5$x~2-=RP`iq@R1}1P6 zzPlMlOi!N=fcp+uHKI=c3NPymG;7&hNE!x**Yy#!^Zh1@qc;*hW9g_vB>SNMJ=t5f zZseVC_c-mu5_LA)h>gK+*t-a(CR@rq3+m2IpF+y+6LoHsI>pd?6WHPOEJ=_<{gob$ zfC(31)a9V2=W*j?c)J|x zVP-!fH=)z`T`luOI({pK00z&|=G3=neokI5W$srG;dH3l;bY_hRlf6-K-q2hQwTUK z@eG4qO`stqWg}8YFxLnrQ+A=e@il zu(|Jj>QP_J4SS7AjFBc?P!osbnsr{5C}FOt=~@WkB5cL_dg^|Xt~gn9Lrc3^k3O3A zaEi}0Id_{chzTA-ofBjAoealP+uuTdayb0T{8(3t&OxLf-?fijP@{S_gRH>ol6vz( zUfNTL-s(G<%6l9jbP{aR(5vdw;nMhze(`xQHe-c$-rT~$VfdPcV31)T&EW>$LzXzw z+zZD=T%siJZZD-2U@KUPpgO(o@<*v!lopVSiVw?uwxY;PK-EwHhSQM){lsef^>DWK zI<|l(1q0<-EY;rY28q$|yniNw4b-8lgXc=$I;u)0(u{bQqVpHXS6ALl< zI4ze2xoD$_Wum&C&HgzLc?E+vQ!#Np0phKlJp}I18J80UztS0c^`(O6YZT##mer-4yIP8A8b(iMGuNz{=;C34<7Y1qyc_p6H61)u zWT;k=GYpWC+reN%M)KG*H%!u^*pI*HiofSl4#gGMD1j2#d8;jYJWmkIjuP%a$C#uu zr{Cu&5$Ug)WhjM?P5dvd+pMf(QxocHMX%HNcX$?ZJ(l~hh5`#jPjrTtJLtoAY!vD2 zB3yTPuo0t65l6(yOpOe-Is-+U>niee$yZ^5L^<2V zWVs?)Yv%FU=E<~u11;rd{^-tyQJCn}>>+joW33BvP5r})6h8@Sj?YT({ zy%{hF&OXwH>VN7dXm4f;3SyC+JUyBFol5_JThZqGm;5dB{Y!HGINoS;l3k>j^l%S* z8H-T*YeT23b7bA!b(hwamB^c&sW-Sc^yPP~4j)hWxm;w}t3|D7OJrQEWE4Y?4iiVT zrf^=r*n)?)u}GMVud{#MDC}Q|mY*h-I1LgEN!82avS~kLiQR05H<~H^dC1!{ut zeIk~z7LPn5yU={42;cpOsEH81$W`7gR{OG=7Bf7K8TrY-li@cmLW2IkhL*khYDRRW zr(*i0*`rO=8c|Ke=+R}e-x`s{fX(3{MY{D-3r%s z1>}!7D2ROZyBVSu#4spiV&MouqR810m%qfY=h<5d<2@cV{XK~a5dfRY%7z*%J6n{8 zI(qr^O(oT)#Q~lRFE`941~}M}aOnrdI&0Kil^s;S=aS;z z_nTA~5#}G&n}wuRO@dp`w32iFPvGO3O9&D%fxMQN>KuCP7mmB;t7XfrK`)&!;<--N zv+p|KGMy8VVN_GK+uf$CLr03#bhk8u*4O*hHGxHSxm@r_b+HWnSWe6ZRR$ecbJ7G! zq>?t-9KRQ9S9xh^ne26SY?;hw zp}c&Dc;@(2%B#sp7o)41 zXD^;Gi{)Z6USc}tl;W~0{UKt(HLY`f;0sz@rsfy{+gL=)d*AQVrxPY<$X3g&kt4K! zCRSvo(l;5RXk8trErrRIMQx1!1Eras)CyPYyTOpzGb#wDBAu5kjF z@#V;}&h;$@FBQZ#JoH{mi9X85gQ=yu1I9ynA8kWaF$JX^kp3mvU>{ff?s0b+RC$gauSG>ap;(!MuKf zjGgt=CT=OA!S4KjW&z5bf<^6>f}*_ou;|Ti18(62H7G4y_%nFEhNC9Mz%=lh+uv0# zEbGCDTqBz!Xh<+z93-6(F^0~EFNBP_P8tY?iV zBKDRpAOzWi7(zKoox7D5B)RfuuBSdT=sc?0VdM#cXn7Zu;k@%~Dhe_qczNz-(=)$P zAhJb=Zb@1cG~Et|W%e%r*N?J1pssFu?}%ACFB~+$zD>?$tqHfDIDr4+(*da`5m02T zMuzw@1cF5vh6yV*qe#tK6}dVa>Gmvvc1?W-3Vp6NGgfoBc6DR?Xo(}%=~r60I&4MI zpIXP6K=wq1P*n>PJumrVoGlz~cI4anX<854nK>h>;z9xeW^ZQ*OH zpg=yiSN>6eLe|&+z06vDyd%;^WQGvE5s^op@`o#kxMDmP>VS=i%rwb> z&byc+oK-v|@&~hnUZp2JUmh9SsT^H?uUNh;vxSP!bYJ88sDWFtnba@hNCgdN)}?zi z(h-F5L{$8J#F!0jd#mAx4w2t|huoFB((CN;~5EHW6%Pio_>7OKN1pDz|^9LPUaYGw+tE-9(s zDy2aX#Sdt)c2+k9tu9ii8HYm!@EU zE|FE3bCXzZ=b|d=@UjQjOn2C6XRzgx(V{1`a_-q*UO@UXF^8X9+uQ$^@ekG}@@<H+@l&0QVn&GslRJ}MBpr9(n49NR)bEUWY3opIg`{r*n`XA3y} z^@c{i+NZhw_x87c+0IrDanhA8q_8;PpOYONynVbOtE`7NcnvZB?Vc{_4SHzD3n4~b z+2gizv4cz{){|frm)5*;o!kYNjay&OhY{35{!oIEWP3Bb#3Y5ADX@c1Zqtfob6ASk z6bX>i!L}o-$TDTG@IzX72Oo&lhPes=L!c9Q1~CgClUW)qPt^L|K@;x1wrAi()5MD& zKd^vxrHL$xJbznplCo-26AjM5Ol_3uZP>-iK*zi~oncuYPK) z3%hNB0tJc`2(G~i?p`Fg6e-0i?(R}(aR}~C30^4fTHKxDE~U6bfB-k|H}l<}?;mhy zk~wE~&OVdv`&rL=)~=X%pv9_CT_E`L(fZX~10=CDCS@l|;q5^wpi(!1>$PTXHlGGT zBxRG9%+P~Yb)3Ac{SYIA%!RnkX)s94LmPl+-zwTL(a|ZbB8X#EVxWFbPEt9Xu`{6V1(g zcv^a-*iXI~Ef1=$2s8xRvi3AP>kyB}aNjTQ{?N5ob?`xdIg3Y$jqrxSS9y)>Z@w%A zbdFdpbQKl8i)wb1Ndwv6XXY{Spr{bM^eO#<9Y&GQTP$b0he~M;F332KpK}J#+rpoP zaguBcymk5p!A~b*e}wrs4_0PJ&dU#6l7%QMGxPdJ55eBydX3BZP4r^^sv6lWW@Lt9Ui{4_KjE@tfiLy=;Rjr^Eh$kl#EhSZ`6!6g1nvhqglaURN9s|qA&;*9*!r%i3WT!_W7$jz zZ42g{b;d}>M7^~YEpP5lEw=>`b+{b9aO5U?|xO@ zdmU6XIgXhqu*mPV+8VLuQW5rgF${9n@keL0xuzSY^)3MDTmoAxRs**D&u6|h$BfF# za8BL2m^+MJ|8!?G>tMMjYTD=2bYwW54}p+wxLn3NgafCqQiV z)?)58X;nx1wZ}*ko4}`?4(j^XeW{?>_^f4Gu|R7dnTdBa+kW2O<-t*L5}IOcW(?>0e(UbL%;bN%b5N)DM67^f^lV`O(wUw~ zz`$0og(N=09wBB^+gftiSUUs<*ZStET*kgD?}eCe`Kf+3O4}XBvDFprEH_(ZnRTL( z#vxD%(n@$I%g$L@h*kba@r9B7?nmNTj%b8ziD)x@IGe${1Md38rMG`*L_JKw2y}Gx zquToBQj7!HaRR%X!|$Oe+~P3*)vDk?1WH^*byfrR+`nNY7}pUgW(mT7)Lo=L+<8~y z!n)oK$gmrAP#zno;iIv)bv{?LtG2HwyA0kNPK|&m{FP%b{}NZxIELe}@Ofac+S>{+ z*)-#~{K*Zcdb8%e@$?)(_{Myh?;4$DQz@Qv_TOjtU4{YG;dDJhPX>Xb$+8t_HCo42ekN z*dz%+!wC_e-+o~DaFZj0<#TqdrsK?9h{EIv2t9nIS$b$V(krE>T_|maJ%{_xW=|Q7 zrf|X3lZ?%kh(Gk_dao8=GRNkhT(RySXWy8w<@D#wrle&MVO#M~gxlwTuMfQ4!Vdc> zF9?ch+teNj+lPO!GjHpvaD)cdB=oxx5WjJ$%iOR$d`@yI9}#Aypu5C9bE-Xh*Ly*m1uiwuWcKTJXeVzL=k&Z1Uyq*sLw21oylpg9YYnpxAP233N-SPKwf#CxVc1rR=TEJNp>?GrZY8krWhegv@{dIt{P zwzKNbMULCOct6@%;7kD;UX*X|yc>g690HB4oSGI+J#_BBrsUeZi6y4&Ckk7(6g?=v zJ}AQdEZ!XA54A~9YsK8yS!8nrvBYd z-}}@GYv&qn%w(Gy)O$xd^G#vss+3efU=1@Y>SRev`I+3<#LT>>XM}-h#m!G5oDq$Z z?Xp2RAh4z8JzrWoQXY1p~3R?tome{8g zkOA#V;aJjd4hl#c9DStTZ~&z?9%|}cK1WBc5EG)!^FJ=?Exu*jz*aHLF>ox9%5|`r zkt^RQmRDhRGCs=HaX$IML!oC=U|jlZ=jgG$vA>s}V-fNjd!0W?m7m!p>&~;KzG%JI zXtI1zHSLZaL}W-iSEpmBeBJUm?8j~P0bK+Uk}+auyz*}| zg@MWn2Wv;Ew+9B-LnHjEr<$bEmlRv#XH>`+s^J(eANUii>7Bd~$)9=o!R5m@o<#+0 zii%xTV|S0srNvH29xcBCzdVB|WEwP6l&sQ!ewEEVv#5D{(sih(;Fah;md|zK#D19F zk=MIGp@wGr*1`Bed8Nf@9nfM5fqOQ}qYc>lbV~PAr-TQKw|zpLyf@VZnkcb8n}ras zC%D=rKEt#v&$P4=-kII6r7Mkixxe$iGf{x$7pAVZJo_;M?$(CFt|z`oCNBR-D} zTKdS7#Xy&ub>%!lz{6MgWbX)9@9jBmX_edg5z{nL-=&JA*^ex@3fl*^duoSV$OY~KUAIsx+wh=&mG$aIoL$jfxSpsbQr zGyGnAY<$$MBMexuzrZ-?qUV7=S*5b$R=$!PI=`03&lzPr7GYAlx4pZsQ96qa+8A+p z0>UO0xUf`3noWHGBCg**21UCOYaLh5`k zLK2bTS<4u?Mc`orA)2r&yI$D5(y!*GKVS#tfV_m?!zqBnEOLKe^<{MQafyYy8h*;2 zg<^W=`m=3UAA3iZ((IDBPs1F>8QH%hFI65!YIy;Xt<64`NPN?cce-!Z*%NNmoW{1p z$y|q^zclu7c3Z+%gAbqO= zlVQK>sRyipT3K;q30&rf#<;TR%{aNfi@sK@&Nf;gbf#xVggjf5u-^sH%&Y8>z&n41tbhHWS_5X~(Mhj!Nm=Z4Lko=`_kk^w{qn9Ya?nTw&|lH}z>z=E z`0s@RL_wY&>0`T{td0MgF)&xi$PHL-Ex(FWN;=3o=n^kPsid=DY`s@!QJU>GEB=wm z6zZg`u!)`_9N=PuN@g)|9S$5FGcV{E-=JUcvoBc0CfR+0`$@D22T_gs@q98GZOzd& zpldAsa}zA4wY#7E8V@d?+f>U1!Av!;R_W{G721EnerG?uF<*EwdZWsDQ2T{JGJFVh zdlx=%{yT~{pq(GW;8V8`$#G0&K9sr7o=D-^8pxp>dEAsK!Rajgz@}Orxp}B=5>f}H z^I%}z=!|@lE+#+TUc&~=+wC`S)-gmojS}6DTh~yM|tQLj7S}BtNLu zll!Rz_Qux9X3KkEKmCUTgqfpU51L1d+~js23RtAl>FW-Hgy&;9buH25lmS>+|@AIF+N4-u%VQ6?fF$Agy9Y$#wosk zdb478(s8?9Z(K*wBC}XS@F9zbmU_);ZM3<^+ za+c*+Hvr$xoCB2Z3!u9*Yzl(Nrqv{$-xxh*+wuw0-AD2b9Ikp;I3bznbaoYCx@hm z3pe#I#78%_jrKEkT;G2kWR-#)8TE5U?I;Dt7NA!HVah*}<$Sf(egGy+La0<)9&hJy z*@QEyy6m(f_;(+A@%?vlqM?o^8GssPF5m0G62P$Q8oo~qgi-E-%QgaZC`wZTo@Ffs z*O@XMh*k_6%kI^ej$Pi!`o1pw46gs@jjEnk!cMdoEC%b%GP_?-&Svm_962*#iE9ga z=<)8Wcm;akg6Q$`13s|jU7^;=MTYKpOMISW?GYG&eSPDKZ9{db=f2!WTuF@gp=5$Y z#;4L{4m#ZK$mz9aLNL(|FccCCOHvfcZGz(yv9TOZ(fY7~FMKi*K1L6+n>sT-auY(y z2@B0=WUslj9c}tv>cucc_U}h0A3Agm8fYW$IFz=p_m`b_EXmXxv#n-6*kBGgkLb1q zN~+Z*rks3_Va~p`NBUf$xb^T!A^TF9tg|U0SDbZRWKho8=+mo6ezehp0j2(yYL*Z< z?ZJD=$uAba#!i|w`@&AxrepJom(XE!+Wxv&H;%Tsc%bU*HS3nNsNmvw0 z7m_yt33}#vu4a!Em_3zHV{3J^c+V)Y7Dc7rMOW&x5yCkE1y1)2?f=o<2%0DX>^T@r z@U?-y_!@<3$(5L3jVzSg}VS6tYyopi~|b*^soUP*%gUwB59#XkE2Yw zSgQvCeLlrTHSqvGbFp|%0ohcC+gyR z<=%)fQBEwE#eNr8BS~KIl@6g)TJ7I6E{R&&4q>INC&xnOxrwr^^((_zbBu`^&Ng>I zxU-|htEK4R{j3gl5!i4V+nUyPq|%tx?OuAgyo}kSxk~dF>mQb2O zNoE)){eG;3q?xmuEKkiMw-^AMY=1wAGO;4`%ql~+R=JAL++~L}HImOD+Bn*s2pBzT zBbc|8%;)p=xj7s%$c$o2oHmn{OL>f&kQ>i;bEHR6k@USs2X{dk0~ni&UwApGD|5&e zT`T%rlWaQwt#T*yIe`8{P?u=!tgP5e#7uDB6g|>we(ss2nNwQTnl?{UV(mQk_7@9B z>)M*kjqmn3{(u~Ta-1Eki~RXfbb`DM^Z+W!s#k2fB(BXugPW@+fcMO39F0&Vf@9Aa zXlvo|>Vd%iOmI3Cv^m|Ag9knJWMo^Po9@uvS{q!Sj2hp@k zKh+Hh_Mk1$EaZk{wO!oIlaSB>&#$V0!gvG~UheK~`uLv30G^{qu*(OAFrc*~TjNzX zEDgvGlO^Tj*s3?jO^&)%nAW@ce!`{1d~)sSr>xSX)Ty-Ro?DYWlO$&xPGQXl#KB*8N=a18uNYaAE4Jq_wAFK-H)2#JbKX2uldq~l`*EDUDG(>jxuUh5x##(pl^52I% zLbms9fhn*1h1<#j8KRg`BNB~|dc2y6wakW->H^DjOG)2H5BdWN^wj&P!vcY3J2)%w ztzn*@Jp=JzNGvOkiTjw;Y%a)XD0Gk8P`~xT>e9+5*nynH#djjf1h4ascC=pDk5dl? z?r4UQrSTh9i_4ye%hiJ2K{KeNI`FGuyZ+pFE`&!0-}k8#>HuDb1qWp0(0y0lw-WC0*FpRi`I8K)W(+ zPFz1%<&XB7UOXJHKpDNJjZp6ZQMw6WUJO!*f$VPd51`fNrpPy<;pWn4K5!cXWCd7; zHhG^tf2qi=Fl<&;TRR{?%L2DfDu9Wzur?ngFA4ZJ$c@oQ}J(x zLMRC-p%|~K_zoZ4U0@~GrUcC$8HfSN>vA?FC>c&yqM~dzSCAN=dUl%e>#+;)&?e~J zyb=CohthwjBVPW=f8r8<0zstAm0>-De)CV3coAZatx}Er5p)Lkl?I$RE?f;YA4)ga zi1t6m$Ogk^qR|ys*E<4ehW0&(Lai)%dXE$E{7aW7ISx+J;rL192^!^RZmf!d)NAY9 z1S|>FpsD#KdoX0+yn{ov05BernIEdgEmdhZvVH1{N?;0nJwJuyb#TsyY)fnUAef6kKf)7yP)0j#yR}O=y&HrfOW+$o26) zyd&EWJgyfBfe5d(Cg!FsB&DrUd#Ni;r`}6^1H3UHYI0C8LqND(X0)(x*V)%DAD(^t zq2Sl$)D5N76p-PH`fc#pzrTb^y_(XK@GJX&m04uH{X2`_bUR;W4@~tKlvNO%gI(R&I4M~s=HhVVEB9!{prD}@86>8xV5Eny#4cqv^l^Pc8J<5q` z@^d^}gJkyloH$0VIB7VN^*ZjYmU2OD^jEO+AF zU1TI)U;BDesQCz!3X*`+P`XVG=HeG)aoWXxyrGT2#Z{|C(K$Yq*o@j0pP!$y@4Rk$ zXyJdGY2o{0Obj7naOY3&1srR}AJ?eLWh^%IviK(!$_i*R#$IBGcMr?(6tNkr-22?C)c=)vX-@VecM^%D~2X_`QB4 z3h9mW7+)wI8WPbgK*o@n`XxipNl%X2>D&d{(FNL?@xfAonSWh2-b4P@naVzKNRt|N zsZj_PB*lcb)+GyV{huX&WBZrswP(&K5~Iz>LG2K8JrpCGBvE?XaBDnso$g-BL9s$H zrL!uPE#o;}h4W-t@zqg1-oM(r;S0?Pnbgd0ouB|iMYd%%e9=IieU+BouutQ$Z*i}r zYZK2tO*}UL6*Pu)$}&0<Zd7!`^|8yjp(1eW!qK zIB&gkN-+~`dx0ph|M_p>9jJ3d=_Wa58F5idOURnvO5xVZTzWq}1YhvQ_-*@9KT1G7 zGvsF?b!Q`WY!mj?KJ%zfw!f$O;^Fia0i#$R%AQX^;|I=7zsDKa=d@7e%N-P9?0$E_ zTqmgdjDk;cZ1|X-V8Vbni1G_BJ}3TMTFqW6>K{RI^#drDsb6_;%v%?G`28j=H7&74 zxAc{B(c_tZ`E0drryH|Ti8iVVlD1R2RXrob_>X1G$FI)5bJx}G5;D;T@C2HA^-!ZC zvn`J(gTkx^ww#4*bMH=lr_i-`X71 z*nZd)i)`(PsLxr^<`sZUuxv8%*g(4XxC{^D_6K;|817IK*snfEn;|8Xp+`^>FRH|5 zYc+L=o`V^=8)sIat8xYoQ^@8=t73RsWy_f6k9~JJJO}+{ztnW@!72b8qf@=2Eeihu zb}hB;dFM{B8|S4CQ7x^GKL*z0T#%N#A@(20k5h9@#U7)CNwq_*R8CcI^CR&FFfOEo zZ4yK?#h8f}Iud_3RF%}w9N4K7Zph!x*w8*SglvqT++auTNcvlApzKt1CV~2y;lmHn z2MxrZ@CJwm!-?Z>Dh-43Sa%2u0fgCRG@rg2J4xXfhX1NCSF3&PXa0HK+$PY;5W`F9 zP1KAZ2PQP=lZVxSQ=b*liA3;+=VaMW09m|DYeqJbE~JIVMV+L}62BY?mXIZaw{J@p zYHN#p#_-$V)fU_Z-`q%2hPIhGrf1g@E(-82;zyVCCd}olH7=#it-(mhx$ zr=qf@<;9czj3js94mrUy+qloclrR{w|B>r>UWJMY{+R=5%&w-{;uk9sJ+l_yZ!s?< zdw@fqz|mX5;AAA|TbbFp0giks zlpQ*p-G{Jncvc7X4)fe!Bsynr!@nMQXRA?Us+(>Zw>c!uS=UF%4|j`5 z#EJnqTDB&vK!)jYc0D>Fm$^#0CQIkpR=UmsD`nvPq z1mtIMyW@rrjbABP!W(Ee;{IGntQvD17{IJ%1i78MuoeuV{0EE-YGb?~p~KTWH2Q?I z6L_hS{E}4ZPU5sNs@O!cu)xuxqCu2fL^F)GAA?>%y53o)Vp$x16nMHiZDO{>x?*pC z1FsY~X!VPi)s!k|i%3zQuhyA8{t-I>`aa4n%*$QRl|9k@1{lmx_i!(}0HUNPR3#6e zEtf7i~h#cr?my-9f-3EC0v4H8HBbRgS>r`>hQ}#f(3H*Lm2Z z1wP$&%x7iGTDtmseO*>A|9S*tTBjS<%#kYQF~{Z)b;HjoD{Aa||K-k2o~THM0!N)N zckPJ!cb9~)xc$gdQsfBFBK<%MS1@`((UtD}Qo?!~GQhMw<*~7FJgS7xzcMdtxr2_= z4kbG2S)cTBf`0)7*L41`r|dS!|8)9S zBJPntbMzBDbHyJWNOxSb0t-`LG(&E)>a+;tVU?MKGyniLYF$NHfoeaLP= z7K!)MX$)^B?2aBEXz37x{O-$X5whoO7?zYf^}0BF)SKzpA_QsFN}~iQ8eYNB%3YPQ z)`{jb@cQ|-TxDzd|d3=*9=h`#>PvZ=j7NRW%{p!I^w;^Hh5IWVslIV4z)1j zS7~a?#*ji#foXt+^=4h^KB7guigB3)zou{~{C%r55v06wy#AGm&?vPl#lpyRap3Wk&TJ)`_zNjn?DeTk<4eITQFTjstW6Zg9)zI}>ebnu~gFB0(auJ60XH7_Dn#nLF zt^a|F&X_AapgaacSR`c{c?Fu?kAh=g&FVAx=K@O*&D5P)$wtRQiu`n4?6qMgG+Mxzlk*d)fC23gWy|Va5*fDvp4^y)ha3`7DMspH zC!g_CP=J=*j@yn@voCv21YYAofBe5E0{+s$RJ3?=%;z>08`17Cf#qQ_lOgNmJcA|GiQ?`Iy>l{A=lzi6x)iU8gi-+M< z&dML%M*QKX+$_=ig+$-b@_Hl8c{^Z!3v}a=mxO#993~+lQRit6BnEvue+Qlxu8cb) zIgy^v)z)Wudu1EPk|sBkcg$qR%ztLcaB@>U!82d!!JcC9WB@Uc$1cPCnA8UBBWs zFQ>Z=#LlZYOxZO}_pWaI=i>x~1<~0wlV5md^zSe_4ho6*^>|lDzYxJ6SZ)GnjUTZc zmW~zrFjyr+D`uL1iU)Vq?I~Ts2;H#*bggy}Typ=NblPm5kpT$z_0VLVWVi`?aI(xy zx6}I2rVlD}!pCs43(~5!e*>JNC19oZxl7z1vftZW=Q&ins9W@*FqM-LzuE(&aEpO> zg>|=I_D-<18(Jy!{jbLN=1PnmFn4y$q+zRY+H9fa0aKEoAGp~{Y>6Gp0UbZr{P{Uz zeAwcz7)x{RW`u;g7$TSlh<}MU$j2_JoE^&@%?P@fj9ts`^3n9*5sl#;9%9mUUuKftl*&O)3u<;e4E!?n; zwz;ra8gfaMkn5_DKCTgGoL_vlMMfWP)DR|$fLbCxx7G{~XsWQUYZCTlz~@h`zw4Tr z(ihy?Tzgs^EW?R%zUZ9)#wL5IWa~~wrTERLU!MC#L~=9&H<2CtHD>R!ESf!$GgkJ9ewE@3@U!l@0MeM0~uG?FiTMs zZsNA*U(lU2n;&|B3E8E-KyX*9YbhKKh$l=kEIBc{Dnui&z&4m?lG<&L_wyA8{Dj{Z zyJhRgPqgxVzNh%JOYDC|P|1mlQo~PL*573uvaGvBq=-!)5Fdbbdr%)4^@F@Qs+K-{ zPiSB;Kl@fg&=E6E0xIUfKjf{Yopl!&7Il@CiS*Z^F`ZH6+H#|$Kv%~qT3(VL@c?5b zJ)@f4`|&}pkB2t-g;AUe3g~CUG+SI7R&rf%cPZCYag2frsH#*<14LtM2$}^FMNtDn z=po*3tEPMmGH=3b3HF?U{2j5~y*w(E6D*h+2kUqBNPav}F#AAVieB;pty(LbLbPjb zAnrh`%HeJyI=<}0SU*=ri94!EbMo)!Fy|R`V@MyW(HEf4{$Ji4+d<{MRjW3*g2vV9 zs^!l1;VaK6XX6se40C6goxXhB0GT_sx#emBydzeYSSB1WoP4@a1|~CK%gzQMJo5&Z zcaEcP>rgfBdJn;=NSVz(RhZW)m@nw zd@f+jBfS5R;4gwB-jR9ksN-Xzqjlq*)GM$hI*!Z>VR6>FKa@+k{AA$ViEpkB(4DV7 z*dnldLz}>@YPee(VhGsHrsKt!^5$jPhow)f^=dWJ*CxVH#TOoz05X{McXOBRl}iF< zln#<^w?h+Jgo9scXtfeVRKXwrw zvG!!f>e&szGGj|~)U%oD*G%K%8}SaVYHWD~?BnhFCPw=r<)kyLKiZNWz)Wq(f52# z@rxDtmn`4du-iQhHE^=jJ%lYEy{%Hx$Tl!@SAR8Y2H>ZMuc<&vCzK8hV`~lCcgj$5 zckSd_1=wa^TXdx}sNy1g719QdQws%7p(rXW-Df8S!Qx>#@NrjaZf*?An<8Qk_YYp-nw_>X7fm>t%88-2P%=3{VOS-cD6gBpFB8{ zm?f(4Pu+1`DVe1MAeZceex2V(YVJ>EIJ}hi7le2Io-)R_#iB=D|BzQxR>#3%Pw~@P zn@ln85sdd!tITo|Y?WY?tOSZtyXN?NL3P2CH&&I`@$L?}G4Ebl6bByAN(i?;yV1fP zTRq$FZtNw^3Cqk}%xQ$gy2NXU2Ys>Gy=y4+H_@2Ge}X#=z5YP`KLu^eJ@s z38Qtx1EKL2V>dN8Hbhw*Sof{WH(kCZHTrhY&!{_k5o9wF1 zyh7SuYm7KYD^uFNP8obi50o>CxzWIO`(?18<0K@7rpfT~WE9(U>l8n(V?i}38%1#B zF5wzT|Fu-t>5g&#K(tpWAm-M^kf-Cq3+Z)`I~zMQA zHy=|ct_#6ylhZj9ckHSWyOW~YY5j>gBp{`N(fUQHf-e-9Wk@mZpS|e);*gSiWcx1G zmwBI20(Xj#C?>RGUy&xVd)7b3L~8dI;7sR^|KhLI-Ai9z|MFcx{>_mN`oO3btGF$f z5U*-L=E9dWS)ojIHjOdI$@5$KLIC%h6CI|J=`VdzP^_=93~Vgt*d$pDhp7bcK6?#X@6p?r5&$|T3~ z93TJ~@m)Tyd&I=|%Yv0Njr=f+&R`H`H(K;$(cojIoT9`8lk^N_0?o|Owsgj-w9K@V z^P?*!x!T&RDKOs#nZHCLpD>g$jcjgaP_(d(fmQ-js-NZ#3=4XM(LSk%F`16__5dcx z)EBi`xZbYxMOdZSpU>cc{%6{>h58S;1d07 z13n=CBA5i%Tq2X>!bmOxQ0A1+B#l}#_{Kugfa@0m?y4zqF|k13$R+`Uac!rfxKZK_ z2?piZ`~w}y&3u%OtIHzA_Mgy2!P1yea*dDClv~ zhu$f`7@H6cNSrWXD}gd~P&?QJ-3m0(eaoZ7AJhe6iZnq*YFEyRl7O(=F=Ce9pWrBN zod`g~^fMs!&3WA$u(umd3!x8a^2#>`+1&c7XpR3FaZXbl-?F8X&ytb0RT`D$5GD8LBpWPT4avaY^(@?aLN8RVLjYxp+=z49v3bKj-j zNKtuA8m3D72odLFiz-7Kx;UJz8A_@a1C?;S6p2Ij{3zNJ+-pAL|0F2dz`OW`t($H@ z@H?_s+I#;WXEB#l#KC-e7{BfGjI1$4d$+z+cfXKDu?yHs0tIW5tYyovJqXxNC(pcs zqUD3p_H=d=a7IeRN(X)Vesdg8cZo$E{k7*Gc z9)e4REFNCD_OGC$R-G1CdW4vvz#>=!8=tfjz^S650vlEf2>Qqrvb?MZKlSS3Sxbn5 zG9ivGG}{qpTdp=iK5ac>-}%B68Yv8T6D|1uKiM(ESj6!xHG`DB2$?+5#eNNfJ*J&h zeaG%pHho-ll=yMWi_Ksh)BY&JU_;OzXMQaBd#&99JN)^sOF$MB8!ChGpMK-lyEQ8# zO#(0pdZW{ZV^Fp~4j7@ic=)IM;x{smHID6fC?cOLLZa=<$#TQv`@&u0j8}BA{k0aO zs4ebMP;xf-;peX91$`O>1`{SNQ;^&vO#wkF?ChirL})f9$kW}D)KMn{UF*pg{^$T8 zGB`Fmu0{Q)9$BXKa}Fn6^L|y!TnDHOBMr> z&GS-kHVdHOG=Qi$w9YRhCSb&~(!O-3?N_R^`ZPOf8Oj6g?CRg)>Ku7x1H;ED#@L33vOIsv*-w&!lxC<7Q zI5`Bs#w|_F-MKV1^{ugyUrSpX+cSX>%=}^9d*@v=l?0{PKs1+h?Y6QgLI{#r$hA;c zP3{@M_Ora4XF^UvVfYJ??g1mVje?WKKOm+_Lu2FVk*~dz)8OuKk_$pcYnub1+1ZH* z-f3!WMF_K)b3)3Q6u-}`uG$BswF6F;>d@+}CI{~K^SYb{GO6|Gy5n~ZnL@wc>JSiH zSXlgJRO@`?B8|y3!UU@)fB(_mz6#%s>^FUU{*yb?BkKmjK(u10r!sRCcX}y+Lafv* zp9#^K6ua_cL!tlOJ%tG)S^{x*O?IgBUqaN3)Bk_y|FsI#H+AlfMV&Yy`Aj3~6XI+^ zw(9w2K1P#lsy(;o=bmJNtC<5M{6ugyA*2g(vJ;#q)b2yBh@4y1-xG*Vlq_xTjjr>N zvQLgd)<~XY9&fQNs3BsG>8;%?O0o`wX`>9Pf{(}dk#+w- zfiZ`i)$w}Q>bBDgbI^|*hWKK_01pKsW24-@4iS5qdbOR1!CJUZJ`t6MkW_rt*(^S7 zbATfjJPv^)<54X;H?Tu#D4hx2vD&hN@KGbL>8gw`vf7d7~0$m=#<*uZWVIT zS*y41dX5NhT!CF-d$15#Fwsu*IW&;8H?L40BZlv337||9vHIa$01bQrU1j5@2kTD_ z))Ir;r^~!O!f?b-?<27i*`19IP2$ub)@#yeEI(C4p;!t*b5P5bUgIXJ5VIP^I$lDH zA=(6~Fost4Ei6xUY*wgL`7${H%sLKAUK8QKaG+!V?|(|7_owP(GEGeSzv<1FQL&$h zF(6Ik!o2J|)cZ#g=YKXT5AXi(MrE$%<9}P0qK%J(QvdB?k^>O4m?YgE(h2tFbq&IK zca?O5NX90Df`^f<)dyw`#gl`9yo+xHNfR1WgC#M~N-<1{yT0#}Qq0W+|1+(J_ikCm zLLN20)unpK8~w;;RjH}=sj>bo1;d>^vQR9@1_tWfrTl5#P}@Hm;XcnCuVRv>Dge`9 zzKGLCFvZ-4ieVr6PSK|6xtrouSM-O&*Oec>50xja%&! zD)=e1xJaGnYKT$#K90}=)&+#3+*pvC5BE@Py=K!zND#_X+#S3jS%3RK+KULD718No zyFTQA*tOoj+Z^Gd8RNj*oQSc37yJ?VNB_gYXb<5+Prm%di<#vCFDfirP5o1Im`S~# z{N|68%_6V&HcimB{VF%U9wJg5MqdT1l%=80%OO;eD&b zwF+cJo!L@{)RP>->Xew9tY$swdZt^Rw$n)tl-Qo-WCZ8 zwJI59*<>A?mSK#eS>|)IpE<&zOu=zVqm}6TrOX)6My}wZIa=MUA9WKE07F5RpdFSV zovg+fZI9FOW6bGqpoV{onVjsU7(I3#=U%qck|KciDH0&C*+g+jkl9kF8Aq(v^#z|T zk#haW*tXZ!1B6PB_~Kp^b3p4pN?|fbqH)ns{yq{hBv(S)p%cdKum6kIXi=k^9rTk^ zT#!F{%>g?sCH!Jk7n|KUhYN!G=HVX(L0`IqFKwQ|e5`Iss_9lKC(DAxwCA^-TZ%JS z2c!Iw_auZF#g%2&?hXhK{o^Rwh+Y!rT2)Gq7|b%qd?VU-8>L{EWnT*y^a2u@cC8~5 zbzNL5Fp{O<)%R9VqXs0lDsppsn#R6Fle3sR=p|!o6XP%v#1=@6);)wC939uW9809i z+AW1Dyhp|@zq1*xX`d=-zQ@S=-n)L7N1FMoO@bhINlfI{=q#`p?GuUj++Z4pz^hjT zz6vsuS~bMfg}!J9c>dmcGWqjf6#dwCg@;krg z>lPFi>oJLR{J}iy69oN}$M)P08Jkx-`0a8Qp?P-1aFx5^TP;UZM8=P7U@Hrg}ew zaQ1LR55MPRd`WtFt_(^SAqN0-i+!WX^4po*jiL@xpg=;{RnbdBv8aA)m!ABuDA|*~ z47#4Dq%hBMeZU>Z?O-MW3@lg;R>uq9gnoYSjhIhX5_x+IjB`l)b1q&v8sv3vcVLsQ zbvDQwQK`oL5H;P3KEyqB5aVTwn#FrSqR1b`wv71Xx)_DTl1-^}sm-iUn4t{_yDU>jf}Vk|w5TsxF@Hbr*g=h$1Fhz0#lY zN76rAk2mkv6_5VxdBv-BFxz{Pjakc`1H;$Z`uGEDkRF(yVJfyPhSExhQNvKabGDf1 zmx188B0x+4Ft`J)3|u@I6g#@z!EZF~plniU=CP{gh4i{B9G1enA+Z>O``>b5`VG&F zSf7Ab{dZ2AsK2u;7EnXCeJ-(|u|$T8d2=f+Yu4LN1!V(@oH(NL2#S4m63yM&#T}Ue zED)?2k_rI;PrTIX#QGvLwDJ02Oy{N@F+ROg1H-TzN}_(W%%+j65tt1bN5I-4fg*{8 za@3otT#s>syXYgem-o9`TL$JI0gctbezRYlB_%9T+`Q=;=HzC{c>mTGTnN$k4sAjbGsQABo%*a-S&aw(H>z0fAr;d2)e20b1@&Y|I2v4_faYC>0*xfxv2Z3 z6p5YehgJcflca-PxU1?M9hd}o+GzcmQ(#=F*n92PS0tdmmn&n(b`nt=cx;KQQFXFm zQ)AX!^oNr|JG9XlPD!ZIa zvl~7i{M=BTbW>&9P7_5o5!Df2?aA>IW2;=VXK^N8lAbk?Ud*nF>5C<#o(BVV-A$`sN;fuMvT`pZ%87V;_M0c`aDCOjJE-ct_XN|2kWu;ky>WolR4 zXGL-Mu5z%PYdlK%plFrom{+Y&H;q^crO ztpQ|M)f=#&$m!P89I9>#&%BaTzjN+Y&L2tp#e{(!2lmsAy-mY8=njb}o2sv6n4+=! zJQSrPlIURQ*dBQq<`$aVNrg>B@nf(02O3_y;UyWk`<&UnZD3vnth zOD|LL<5@p6T>?p`gAHQON_*2NmWQAXum&_B>nZV!Ba z-)}9P6sJ{4HaG4SeyFh<#chs<9~&Ax(7Df3dLIbv!NLMXQ{eaa;8D*1SxJ}7*J}ep z{!aka4Jz_KH8@y6z54em3!{fcaOczzo}x3+Of71m)->C0ud7>1fb`LcQH+TIH@uNS z3$^H>+6+Fqs0bI)v3ZrRc?Xz!8qAHaNKc8(ocqlte|av~+h2W9lgE6qE zV=<7haxdGJ#V41#Xid5ZM}qkJ+!(eur17ISvS_6#+%qnW%`Gk--bury=Y1i=h*lcL zk~$XrYzgwKaqHjEduPH0V_;(%9|wypDb(La#P>X2SVwN6gZ&-}5jo7Qp1;A(QLn9n!aXzVQn-`?i**n=v^ET9-kF2yg_q){8nm@+ic0s1#OJ>Q!h#bfJ|_|)30 zDc2NgQ8AC&miBu(ibDRrOM8OqR175QSPT&UuAlVa=fyFr$U-?IfJ#37VNnZpZNL^f z6=GojG1|h6sxZDl$75_+5V4?#Fh3WjNfz~D&Eix@hV*G;BKXR*2wtbrn(<7xW2%+e z&x00!+om*5;)SoA6T`NwiyzfFC<=Bs7cNgWp*Ud5x-R8lIU@lX_K>8_^Cm>hv5#y> z;{Fy7NnUe)Wf0$;8^aO`(6yT~*pRkNr8=0^_!LDMc+4h+G|q2Oi>4O&i&hP%22}9h z+LVV4du(iIbMe737Uqoz<0?ia6GV#wc`ah2O)cuFM>4NP=Q1ez!juSJs!!v4TeE2Q zx@wU`F#oeYg*Oc=!B<$0x@;1^+~JsQ4LZw)>{~Rm)PkL{@j`+QgXNhua*A1|3rB~M zP1v}ej?XT>RuK>4l9_SL4!ZdKt0^pJU=+yZTl6*5qVzw9vJ)}%d}D%Lu^btXv_HPRFsGP9)ueH+f!~uh95e9R2V;H z10Uyc-&&Qz>h%6Q2ukn5j&>K9|q6|r-p%q`Y_ys@7i$kfqVHvHGFOrp^W;-&?0Tr)m|KqoB{t(#a9!boaRBcr4pGQ?r| zH*nST7*4OSaMSD5x{N>UA-x*Acn_zJjNr$-{_wDWU3Dr39(61R5Lv_irqxM&e0d5t zY|P-PJ&sv$s){YNOA9EL}N=J=qhTdaPnZ@`+_BBq-MFno~}G@oj5 z&G>6oEP!v!j^mUP3pdhfS<>d2s6@nRbF+>AUY(+&0(^ab9A_}P=qXcMQIw&9*%-|6 za03I3PrR1Gcd14HYI49fnOYPu$=edhh!#D!BV)Aa)_Mjyj{n+PYEc`tsI$I!x&K`` zJ&xbckD-S8@ohQ{vPF8hm!wGvUY0P@`uMUWtXL4&Gtl|f9N8nuGC3NOOL5`o5MtD_ ze^D#l+;u)ODT*&n3ghXG)JT>~hHquaSAv{d)~4`R256^@kKj8K!p5;sIU0C$C}e1d@uk&{S+MFoBQ4W|Ae((j@51@x zqGpqzBEImQrS16HF2^iq+n2{E*L=PqgNqk?_```sIBQe{4{yt2dD6vuMu)MWEPxF= z83|+=F_ebzz)5jTFAA9JUO6|8&(5=q;}S4};hA#FRJB+jh@Z`6B(WrkU0L;76=kR* z`qGrsA}=6XR1Suj0quXd3d-!Yle@n znHj@pCWcKu#+;F1JXak-IOzAriXM54YGjy4dNnSa6T{0*HtyW3uEjv3j>Q0zU-H(; zt@jWPVBa>t`Q#>T=m#LOmnYFpY&vy#UBpY>^cBgQYX z)y9wMRD=$4N|hM<3yc=dd8Q5Xi-AAxbx_3+;+>2ThS893-eQ{`mNt4XOIkft2VGth z=BFj?XNT`(gs_9<*vm4t^7jkIMsRjj7@w`l;`(j&0n4B$%HYwP7VSf3neoLRpHPfX zjtwJ~^k;UQFfNAgP6-<=dSr7J?Y6&w)+5`gMf5AuY|MYY+v&BZqBBwB@ESmWG{k0jK9Cop}#latT*r;`UEs`G8 zpKS28=uq<-Ct&} zL^IUmS!UxlT6M_WvHy*3JmA2Etj{>W6)cK8d;v^#!JgcyRX zS|`H{X7LwKJofxD--jnT(hSjf{w8CwV23d}xf*hy>7)5$W%x#?b{#$>YCL6l5Ko+1 zGT22qY{(dU&BM*6J}HMEvDY&Gh+AN^h|fRC|H{#vx3WATv*5loI- zW=fl)C_{_111u~H;@x8k+p&;!2|hA0jPWtmB1IXB$f~T1$^*D?)c%7Z`#BclN&@&V z+4U|SK$~ol>*+YBq6{Wte`3U9kBcgT`2PGjN(+1_M3`0@#LaVK#<^(Asun5AU?V<$ zUW-nz4B`ga__BwDul*Lt%JW=5@GSq`l5~+#i&m6DLAH{f!vCE;B4m0L=TsbB z69b>znZ<2evUtAH##U-kR<%e`1_jxiZYuxx_8MP{rkC}sMZG!}LJKW;Q=5yu{5N~p zimE6|Uoc=tWyHd?k^rJd>7qYPjBGX~T(fbO?n0v|gF~G1GhDQ&G+gp; P00000NkvXXu0mjfafPR7 literal 0 HcmV?d00001 diff --git a/dygraph/similarity_net/utils.py b/dygraph/similarity_net/utils.py new file mode 100644 index 00000000..76fcc363 --- /dev/null +++ b/dygraph/similarity_net/utils.py @@ -0,0 +1,364 @@ +# -*- encoding:utf-8 -*- +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +SimNet utilities. +""" +import argparse +import time +import sys +import re +import os +import six +import numpy as np +import logging +import logging.handlers +import paddle.fluid as fluid +import io +""" +******functions for file processing****** +""" + +def load_vocab(file_path): + """ + load the given vocabulary + """ + vocab = {} + f = io.open(file_path, "r", encoding="utf8") + for line in f: + items = line.strip("\n").split("\t") + if items[0] not in vocab: + vocab[items[0]] = int(items[1]) + vocab[""] = 0 + return vocab + + +def get_result_file(args): + """ + Get Result File + Args: + conf_dict: Input path config + samples_file_path: Data path of real training + predictions_file_path: Prediction results path + Returns: + result_file: merge sample and predict result + + """ + with io.open(args.test_data_dir, "r", encoding="utf8") as test_file: + with io.open("predictions.txt", "r", encoding="utf8") as predictions_file: + with io.open(args.test_result_path, "w", encoding="utf8") as test_result_file: + test_datas = [line.strip("\n") for line in test_file] + predictions = [line.strip("\n") for line in predictions_file] + for test_data, prediction in zip(test_datas, predictions): + test_result_file.write(test_data + "\t" + prediction + "\n") + os.remove("predictions.txt") + + +""" +******functions for string processing****** +""" + + +def pattern_match(pattern, line): + """ + Check whether a string is matched + Args: + pattern: mathing pattern + line : input string + Returns: + True/False + """ + if re.match(pattern, line): + return True + else: + return False + + +""" +******functions for parameter processing****** +""" + + +def print_progress(task_name, percentage, style=0): + """ + Print progress bar + Args: + task_name: The name of the current task + percentage: Current progress + style: Progress bar form + """ + styles = ['#', '█'] + mark = styles[style] * percentage + mark += ' ' * (100 - percentage) + status = '%d%%' % percentage if percentage < 100 else 'Finished' + sys.stdout.write('%+20s [%s] %s\r' % (task_name, mark, status)) + sys.stdout.flush() + time.sleep(0.002) + + +def display_args(name, args): + """ + Print parameter information + Args: + name: logger instance name + args: Input parameter dictionary + """ + logger = logging.getLogger(name) + logger.info("The arguments passed by command line is :") + for k, v in sorted(v for v in vars(args).items()): + logger.info("{}:\t{}".format(k, v)) + + +def import_class(module_path, module_name, class_name): + """ + Load class dynamically + Args: + module_path: The current path of the module + module_name: The module name + class_name: The name of class in the import module + Return: + Return the attribute value of the class object + """ + if module_path: + sys.path.append(module_path) + module = __import__(module_name) + return getattr(module, class_name) + + +def str2bool(v): + """ + String to Boolean + """ + # because argparse does not support to parse "true, False" as python + # boolean directly + return v.lower() in ("true", "t", "1") + + +class ArgumentGroup(object): + """ + Argument Class + """ + + def __init__(self, parser, title, des): + self._group = parser.add_argument_group(title=title, description=des) + + def add_arg(self, name, type, default, help, **kwargs): + """ + Add argument + """ + type = str2bool if type == bool else type + self._group.add_argument( + "--" + name, + default=default, + type=type, + help=help + ' Default: %(default)s.', + **kwargs) + +class ArgConfig(object): + def __init__(self): + parser = argparse.ArgumentParser() + + model_g = ArgumentGroup(parser, "model", "model configuration and paths.") + model_g.add_arg("config_path", str, None, "Path to the json file for EmoTect model config.") + model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.") + model_g.add_arg("output_dir", str, None, "Directory path to save checkpoints") + model_g.add_arg("task_mode", str, None, "task mode: pairwise or pointwise") + + train_g = ArgumentGroup(parser, "training", "training options.") + train_g.add_arg("epoch", int, 10, "Number of epoches for training.") + train_g.add_arg("save_steps", int, 200, "The steps interval to save checkpoints.") + train_g.add_arg("validation_steps", int, 100, "The steps interval to evaluate model performance.") + + log_g = ArgumentGroup(parser, "logging", "logging related") + log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.") + log_g.add_arg("verbose_result", bool, True, "Whether to output verbose result.") + log_g.add_arg("test_result_path", str, "test_result", "Directory path to test result.") + log_g.add_arg("infer_result_path", str, "infer_result", "Directory path to infer result.") + + data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options") + data_g.add_arg("train_data_dir", str, None, "Directory path to training data.") + data_g.add_arg("valid_data_dir", str, None, "Directory path to valid data.") + data_g.add_arg("test_data_dir", str, None, "Directory path to testing data.") + data_g.add_arg("infer_data_dir", str, None, "Directory path to infer data.") + data_g.add_arg("vocab_path", str, None, "Vocabulary path.") + data_g.add_arg("batch_size", int, 32, "Total examples' number in batch for training.") + + run_type_g = ArgumentGroup(parser, "run_type", "running type options.") + run_type_g.add_arg("use_cuda", bool, False, "If set, use GPU for training.") + run_type_g.add_arg("task_name", str, None, "The name of task to perform sentiment classification.") + run_type_g.add_arg("do_train", bool, False, "Whether to perform training.") + run_type_g.add_arg("do_valid", bool, False, "Whether to perform dev.") + run_type_g.add_arg("do_test", bool, False, "Whether to perform testing.") + run_type_g.add_arg("do_infer", bool, False, "Whether to perform inference.") + run_type_g.add_arg("compute_accuracy", bool, False, "Whether to compute accuracy.") + run_type_g.add_arg("lamda", float, 0.91, "When task_mode is pairwise, lamda is the threshold for calculating the accuracy.") + + custom_g = ArgumentGroup(parser, "customize", "customized options.") + self.custom_g = custom_g + + parser.add_argument('--enable_ce',action='store_true',help='If set, run the task with continuous evaluation logs.') + + self.parser = parser + + def add_arg(self, name, dtype, default, descrip): + self.custom_g.add_arg(name, dtype, default, descrip) + + def build_conf(self): + return self.parser.parse_args() + + +def print_arguments(args): + """ + Print Arguments + """ + print('----------- Configuration Arguments -----------') + for arg, value in sorted(six.iteritems(vars(args))): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +def init_log( + log_path, + level=logging.INFO, + when="D", + backup=7, + format="%(levelname)s: %(asctime)s - %(filename)s:%(lineno)d * %(thread)d %(message)s", + datefmt=None): + """ + init_log - initialize log module + + Args: + log_path - Log file path prefix. + Log data will go to two files: log_path.log and log_path.log.wf + Any non-exist parent directories will be created automatically + level - msg above the level will be displayed + DEBUG < INFO < WARNING < ERROR < CRITICAL + the default value is logging.INFO + when - how to split the log file by time interval + 'S' : Seconds + 'M' : Minutes + 'H' : Hours + 'D' : Days + 'W' : Week day + default value: 'D' + format - format of the log + default format: + %(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s + INFO: 12-09 18:02:42: log.py:40 * 139814749787872 HELLO WORLD + backup - how many backup file to keep + default value: 7 + + Raises: + OSError: fail to create log directories + IOError: fail to open log file + """ + formatter = logging.Formatter(format, datefmt) + logger = logging.getLogger() + logger.setLevel(level) + + # console Handler + consoleHandler = logging.StreamHandler() + consoleHandler.setLevel(logging.DEBUG) + logger.addHandler(consoleHandler) + + dir = os.path.dirname(log_path) + if not os.path.isdir(dir): + os.makedirs(dir) + + handler = logging.handlers.TimedRotatingFileHandler( + log_path + ".log", when=when, backupCount=backup) + handler.setLevel(level) + handler.setFormatter(formatter) + logger.addHandler(handler) + + handler = logging.handlers.TimedRotatingFileHandler( + log_path + ".log.wf", when=when, backupCount=backup) + handler.setLevel(logging.WARNING) + handler.setFormatter(formatter) + logger.addHandler(handler) + + +def set_level(level): + """ + Reak-time set log level + """ + logger = logging.getLogger() + logger.setLevel(level) + logging.info('log level is set to : %d' % level) + + +def get_level(): + """ + get Real-time log level + """ + logger = logging.getLogger() + return logger.level + + +def get_accuracy(preds, labels, mode, lamda=0.958): + """ + compute accuracy + """ + if mode == "pairwise": + preds = np.array(list(map(lambda x: 1 if x[1] >= lamda else 0, preds))) + else: + preds = np.array(list(map(lambda x: np.argmax(x), preds))) + labels = np.squeeze(labels) + return np.mean(preds == labels) + + +def get_softmax(preds): + """ + compute sotfmax + """ + _exp = np.exp(preds) + return _exp / np.sum(_exp, axis=1, keepdims=True) + + +def get_sigmoid(preds): + """ + compute sigmoid + """ + return 1 / (1 + np.exp(-preds)) + + +def deal_preds_of_mmdnn(conf_dict, preds): + """ + deal preds of mmdnn + """ + if conf_dict['task_mode'] == 'pairwise': + return get_sigmoid(preds) + else: + return get_softmax(preds) + + +def init_checkpoint(exe, init_checkpoint_path, main_program): + """ + init checkpoint + """ + assert os.path.exists( + init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path + + def existed_persitables(var): + if not fluid.io.is_persistable(var): + return False + return os.path.exists(os.path.join(init_checkpoint_path, var.name)) + + fluid.io.load_vars( + exe, + init_checkpoint_path, + main_program=main_program, + predicate=existed_persitables) + print("Load model from {}".format(init_checkpoint_path)) + -- GitLab