From 8f13d8ca689beed53f2b5a7ab38dae717873c49b Mon Sep 17 00:00:00 2001 From: KP <109694228@qq.com> Date: Mon, 15 Mar 2021 10:21:45 +0800 Subject: [PATCH] Add dataset, module task, and demo of text-matching (#1307) * update Transformer module that can do text-matching task --- demo/text_matching/predict.py | 2 +- demo/text_matching/train.py | 2 +- .../language_model/bert-base-cased/README.md | 4 ++ .../language_model/bert-base-cased/module.py | 61 ++++++++++++++++++- .../bert-base-chinese/README.md | 4 ++ .../bert-base-chinese/module.py | 61 ++++++++++++++++++- .../bert-base-multilingual-cased/README.md | 4 ++ .../bert-base-multilingual-cased/module.py | 61 ++++++++++++++++++- .../bert-base-multilingual-uncased/README.md | 4 ++ .../bert-base-multilingual-uncased/module.py | 61 ++++++++++++++++++- .../bert-base-uncased/README.md | 4 ++ .../bert-base-uncased/module.py | 61 ++++++++++++++++++- .../language_model/bert-large-cased/README.md | 4 ++ .../language_model/bert-large-cased/module.py | 61 ++++++++++++++++++- .../bert-large-uncased/README.md | 4 ++ .../bert-large-uncased/module.py | 61 ++++++++++++++++++- .../language_model/chinese_bert_wwm/README.md | 4 ++ .../language_model/chinese_bert_wwm/module.py | 61 ++++++++++++++++++- .../chinese_bert_wwm_ext/README.md | 4 ++ .../chinese_bert_wwm_ext/module.py | 61 ++++++++++++++++++- .../chinese_electra_base/README.md | 4 ++ .../chinese_electra_base/module.py | 61 ++++++++++++++++++- .../chinese_electra_small/README.md | 4 ++ .../chinese_electra_small/module.py | 61 ++++++++++++++++++- .../language_model/electra_base/README.md | 4 ++ .../language_model/electra_base/module.py | 61 ++++++++++++++++++- .../language_model/electra_large/README.md | 4 ++ .../language_model/electra_large/module.py | 61 ++++++++++++++++++- .../language_model/electra_small/README.md | 4 ++ .../language_model/electra_small/module.py | 61 ++++++++++++++++++- modules/text/language_model/ernie/README.md | 4 ++ modules/text/language_model/ernie/module.py | 61 ++++++++++++++++++- .../text/language_model/ernie_tiny/README.md | 4 ++ .../ernie_v2_eng_base/README.md | 4 ++ .../ernie_v2_eng_base/module.py | 61 ++++++++++++++++++- .../ernie_v2_eng_large/README.md | 4 ++ .../ernie_v2_eng_large/module.py | 61 ++++++++++++++++++- modules/text/language_model/rbt3/README.md | 4 ++ modules/text/language_model/rbt3/module.py | 61 ++++++++++++++++++- modules/text/language_model/rbtl3/README.md | 4 ++ modules/text/language_model/rbtl3/module.py | 61 ++++++++++++++++++- .../roberta-wwm-ext-large/README.md | 4 ++ .../roberta-wwm-ext-large/module.py | 61 ++++++++++++++++++- .../language_model/roberta-wwm-ext/README.md | 4 ++ .../language_model/roberta-wwm-ext/module.py | 61 ++++++++++++++++++- 45 files changed, 1308 insertions(+), 65 deletions(-) diff --git a/demo/text_matching/predict.py b/demo/text_matching/predict.py index 6fe6a42a..f4d17155 100644 --- a/demo/text_matching/predict.py +++ b/demo/text_matching/predict.py @@ -25,7 +25,7 @@ if __name__ == '__main__': model = hub.Module( name='ernie_tiny', - version='2.0.1', + version='2.0.2', task='text-matching', load_checkpoint='./checkpoint/best_model/model.pdparams', label_map=label_map) diff --git a/demo/text_matching/train.py b/demo/text_matching/train.py index 7770b3c0..ddbebb32 100644 --- a/demo/text_matching/train.py +++ b/demo/text_matching/train.py @@ -31,7 +31,7 @@ parser.add_argument("--save_interval", type=int, default=2, help="Save checkpoin args = parser.parse_args() if __name__ == '__main__': - model = hub.Module(name='ernie_tiny', version='2.0.1', task='text-matching') + model = hub.Module(name='ernie_tiny', version='2.0.2', task='text-matching') tokenizer = model.get_tokenizer() train_dataset = LCQMC(tokenizer=tokenizer, max_seq_len=args.max_seq_len, mode='train') diff --git a/modules/text/language_model/bert-base-cased/README.md b/modules/text/language_model/bert-base-cased/README.md index de01d968..3f8c657d 100644 --- a/modules/text/language_model/bert-base-cased/README.md +++ b/modules/text/language_model/bert-base-cased/README.md @@ -164,3 +164,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/bert-base-cased/module.py b/modules/text/language_model/bert-base-cased/module.py index 8b7b75d5..af74ab5f 100644 --- a/modules/text/language_model/bert-base-cased/module.py +++ b/modules/text/language_model/bert-base-cased/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="bert-base-cased", - version="2.0.1", + version="2.0.2", summary= "bert_cased_L-12_H-768_A-12, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -72,6 +72,12 @@ class Bert(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-cased', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-cased', **kwargs) else: @@ -85,8 +91,28 @@ class Bert(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -109,6 +135,35 @@ class Bert(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/bert-base-chinese/README.md b/modules/text/language_model/bert-base-chinese/README.md index 41a8e0c1..52fb33a6 100644 --- a/modules/text/language_model/bert-base-chinese/README.md +++ b/modules/text/language_model/bert-base-chinese/README.md @@ -163,3 +163,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/bert-base-chinese/module.py b/modules/text/language_model/bert-base-chinese/module.py index bb8cca19..443e56b1 100644 --- a/modules/text/language_model/bert-base-chinese/module.py +++ b/modules/text/language_model/bert-base-chinese/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="bert-base-chinese", - version="2.0.1", + version="2.0.2", summary= "bert_chinese_L-12_H-768_A-12, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class Bert(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-chinese', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-chinese', **kwargs) else: @@ -93,8 +99,28 @@ class Bert(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Bert(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/bert-base-multilingual-cased/README.md b/modules/text/language_model/bert-base-multilingual-cased/README.md index 14a2def0..bd3355a8 100644 --- a/modules/text/language_model/bert-base-multilingual-cased/README.md +++ b/modules/text/language_model/bert-base-multilingual-cased/README.md @@ -163,3 +163,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/bert-base-multilingual-cased/module.py b/modules/text/language_model/bert-base-multilingual-cased/module.py index 124a0ce4..e1c79e94 100644 --- a/modules/text/language_model/bert-base-multilingual-cased/module.py +++ b/modules/text/language_model/bert-base-multilingual-cased/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="bert-base-multilingual-cased", - version="2.0.1", + version="2.0.2", summary= "bert_multi_cased_L-12_H-768_A-12, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class Bert(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-multilingual-cased', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-multilingual-cased', **kwargs) else: @@ -93,8 +99,28 @@ class Bert(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Bert(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/bert-base-multilingual-uncased/README.md b/modules/text/language_model/bert-base-multilingual-uncased/README.md index 3d07c213..8ff20880 100644 --- a/modules/text/language_model/bert-base-multilingual-uncased/README.md +++ b/modules/text/language_model/bert-base-multilingual-uncased/README.md @@ -163,3 +163,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/bert-base-multilingual-uncased/module.py b/modules/text/language_model/bert-base-multilingual-uncased/module.py index c957d7e3..82fc6a4b 100644 --- a/modules/text/language_model/bert-base-multilingual-uncased/module.py +++ b/modules/text/language_model/bert-base-multilingual-uncased/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="bert-base-multilingual-uncased", - version="2.0.1", + version="2.0.2", summary= "bert_multi_uncased_L-12_H-768_A-12, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class Bert(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-multilingual-uncased', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-multilingual-uncased', **kwargs) else: @@ -93,8 +99,28 @@ class Bert(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Bert(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/bert-base-uncased/README.md b/modules/text/language_model/bert-base-uncased/README.md index 84867e57..0d42f276 100644 --- a/modules/text/language_model/bert-base-uncased/README.md +++ b/modules/text/language_model/bert-base-uncased/README.md @@ -163,3 +163,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/bert-base-uncased/module.py b/modules/text/language_model/bert-base-uncased/module.py index 8c06ad34..876d1a52 100644 --- a/modules/text/language_model/bert-base-uncased/module.py +++ b/modules/text/language_model/bert-base-uncased/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="bert-base-uncased", - version="2.0.1", + version="2.0.2", summary= "bert_uncased_L-12_H-768_A-12, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -72,6 +72,12 @@ class Bert(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-uncased', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-uncased', **kwargs) else: @@ -85,8 +91,28 @@ class Bert(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -109,6 +135,35 @@ class Bert(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/bert-large-cased/README.md b/modules/text/language_model/bert-large-cased/README.md index 1cd6285f..376e9fbc 100644 --- a/modules/text/language_model/bert-large-cased/README.md +++ b/modules/text/language_model/bert-large-cased/README.md @@ -163,3 +163,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/bert-large-cased/module.py b/modules/text/language_model/bert-large-cased/module.py index d456b78f..b60eae26 100644 --- a/modules/text/language_model/bert-large-cased/module.py +++ b/modules/text/language_model/bert-large-cased/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="bert-large-cased", - version="2.0.1", + version="2.0.2", summary= "bert_cased_L-24_H-1024_A-16, 24-layer, 1024-hidden, 16-heads, 340M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -72,6 +72,12 @@ class Bert(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-large-cased', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-large-cased', **kwargs) else: @@ -85,8 +91,28 @@ class Bert(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -109,6 +135,35 @@ class Bert(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/bert-large-uncased/README.md b/modules/text/language_model/bert-large-uncased/README.md index 1a40ce6a..072bfcba 100644 --- a/modules/text/language_model/bert-large-uncased/README.md +++ b/modules/text/language_model/bert-large-uncased/README.md @@ -163,3 +163,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/bert-large-uncased/module.py b/modules/text/language_model/bert-large-uncased/module.py index cedcba1d..026d8188 100644 --- a/modules/text/language_model/bert-large-uncased/module.py +++ b/modules/text/language_model/bert-large-uncased/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="bert-large-uncased", - version="2.0.1", + version="2.0.2", summary= "bert_uncased_L-24_H-1024_A-16, 24-layer, 1024-hidden, 16-heads, 340M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -72,6 +72,12 @@ class Bert(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-large-uncased', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-large-uncased', **kwargs) else: @@ -85,8 +91,28 @@ class Bert(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -109,6 +135,35 @@ class Bert(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/chinese_bert_wwm/README.md b/modules/text/language_model/chinese_bert_wwm/README.md index 61eabad7..734a203e 100644 --- a/modules/text/language_model/chinese_bert_wwm/README.md +++ b/modules/text/language_model/chinese_bert_wwm/README.md @@ -156,3 +156,7 @@ paddlehub >= 2.0.0 * 2.0.0 全面升级动态图,接口有所变化。任务名称调整,增加序列标注任务`token-cls` + +* 2.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/chinese_bert_wwm/module.py b/modules/text/language_model/chinese_bert_wwm/module.py index b225bb4e..259f8b42 100644 --- a/modules/text/language_model/chinese_bert_wwm/module.py +++ b/modules/text/language_model/chinese_bert_wwm/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="chinese-bert-wwm", - version="2.0.0", + version="2.0.1", summary= "chinese-bert-wwm, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="ymcui", @@ -81,6 +81,12 @@ class BertWwm(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-wwm-chinese', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-wwm-chinese', **kwargs) else: @@ -94,8 +100,28 @@ class BertWwm(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -118,6 +144,35 @@ class BertWwm(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/chinese_bert_wwm_ext/README.md b/modules/text/language_model/chinese_bert_wwm_ext/README.md index 7a287a30..5aac1aee 100644 --- a/modules/text/language_model/chinese_bert_wwm_ext/README.md +++ b/modules/text/language_model/chinese_bert_wwm_ext/README.md @@ -156,3 +156,7 @@ paddlehub >= 2.0.0 * 2.0.0 全面升级动态图,接口有所变化。任务名称调整,增加序列标注任务`token-cls` + +* 2.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/chinese_bert_wwm_ext/module.py b/modules/text/language_model/chinese_bert_wwm_ext/module.py index 2a4e8256..8e6e8c6f 100644 --- a/modules/text/language_model/chinese_bert_wwm_ext/module.py +++ b/modules/text/language_model/chinese_bert_wwm_ext/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="chinese-bert-wwm-ext", - version="2.0.0", + version="2.0.1", summary= "chinese-bert-wwm-ext, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="ymcui", @@ -81,6 +81,12 @@ class BertWwm(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-wwm-ext-chinese', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-wwm-ext-chinese', **kwargs) else: @@ -94,8 +100,28 @@ class BertWwm(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -118,6 +144,35 @@ class BertWwm(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/chinese_electra_base/README.md b/modules/text/language_model/chinese_electra_base/README.md index b3dfff0c..ea96ff39 100644 --- a/modules/text/language_model/chinese_electra_base/README.md +++ b/modules/text/language_model/chinese_electra_base/README.md @@ -155,3 +155,7 @@ paddlehub >= 2.0.0 * 2.0.0 全面升级动态图,接口有所变化。任务名称调整,增加序列标注任务`token-cls` + +* 2.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/chinese_electra_base/module.py b/modules/text/language_model/chinese_electra_base/module.py index 338c6605..84e32039 100644 --- a/modules/text/language_model/chinese_electra_base/module.py +++ b/modules/text/language_model/chinese_electra_base/module.py @@ -28,7 +28,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="chinese-electra-base", - version="2.0.0", + version="2.0.1", summary= "chinese-electra-base, 12-layer, 768-hidden, 12-heads, 102M parameters. The module is executed as paddle.dygraph.", author="ymcui", @@ -80,6 +80,12 @@ class Electra(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='chinese-electra-base', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='chinese-electra-base', **kwargs) else: @@ -93,8 +99,28 @@ class Electra(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Electra(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(query_token_embedding.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(title_token_embedding.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/chinese_electra_small/README.md b/modules/text/language_model/chinese_electra_small/README.md index 4a5f0e99..620ae041 100644 --- a/modules/text/language_model/chinese_electra_small/README.md +++ b/modules/text/language_model/chinese_electra_small/README.md @@ -155,3 +155,7 @@ paddlehub >= 2.0.0 * 2.0.0 全面升级动态图,接口有所变化。任务名称调整,增加序列标注任务`token-cls` + +* 2.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/chinese_electra_small/module.py b/modules/text/language_model/chinese_electra_small/module.py index 763f7d4f..03401f01 100644 --- a/modules/text/language_model/chinese_electra_small/module.py +++ b/modules/text/language_model/chinese_electra_small/module.py @@ -28,7 +28,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="chinese-electra-small", - version="2.0.0", + version="2.0.1", summary= "chinese-electra-small, 12-layer, 256-hidden, 4-heads, 12M parameters. The module is executed as paddle.dygraph.", author="ymcui", @@ -80,6 +80,12 @@ class Electra(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='chinese-electra-small', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='chinese-electra-small', **kwargs) else: @@ -93,8 +99,28 @@ class Electra(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Electra(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(query_token_embedding.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(title_token_embedding.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/electra_base/README.md b/modules/text/language_model/electra_base/README.md index df076cc0..cee97e1b 100644 --- a/modules/text/language_model/electra_base/README.md +++ b/modules/text/language_model/electra_base/README.md @@ -151,3 +151,7 @@ paddlehub >= 2.0.0 * 1.0.0 初始发布,动态图版本模型,支持文本分类`seq-cls`和序列标注`token-cls`任务的fine-tune + +* 1.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/electra_base/module.py b/modules/text/language_model/electra_base/module.py index 1cfd62ff..6ef01fbb 100644 --- a/modules/text/language_model/electra_base/module.py +++ b/modules/text/language_model/electra_base/module.py @@ -28,7 +28,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="electra-base", - version="1.0.0", + version="1.0.1", summary= "electra-base, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class Electra(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='electra-base', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='electra-base', **kwargs) else: @@ -93,8 +99,28 @@ class Electra(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Electra(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(query_token_embedding.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(title_token_embedding.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/electra_large/README.md b/modules/text/language_model/electra_large/README.md index 81f931d8..4619d132 100644 --- a/modules/text/language_model/electra_large/README.md +++ b/modules/text/language_model/electra_large/README.md @@ -151,3 +151,7 @@ paddlehub >= 2.0.0 * 1.0.0 初始发布,动态图版本模型,支持文本分类`seq-cls`和序列标注`token-cls`任务的fine-tune + +* 1.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/electra_large/module.py b/modules/text/language_model/electra_large/module.py index ae11788d..31efe69c 100644 --- a/modules/text/language_model/electra_large/module.py +++ b/modules/text/language_model/electra_large/module.py @@ -28,7 +28,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="electra-large", - version="1.0.0", + version="1.0.1", summary= "electra-large, 24-layer, 1024-hidden, 16-heads, 335M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class Electra(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='electra-large', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='electra-large', **kwargs) else: @@ -93,8 +99,28 @@ class Electra(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Electra(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(query_token_embedding.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(title_token_embedding.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/electra_small/README.md b/modules/text/language_model/electra_small/README.md index 65ec7548..6d90d9d9 100644 --- a/modules/text/language_model/electra_small/README.md +++ b/modules/text/language_model/electra_small/README.md @@ -151,3 +151,7 @@ paddlehub >= 2.0.0 * 1.0.0 初始发布,动态图版本模型,支持文本分类`seq-cls`和序列标注`token-cls`任务的fine-tune + +* 1.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/electra_small/module.py b/modules/text/language_model/electra_small/module.py index ad60dd88..697411f3 100644 --- a/modules/text/language_model/electra_small/module.py +++ b/modules/text/language_model/electra_small/module.py @@ -28,7 +28,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="electra-small", - version="1.0.0", + version="1.0.1", summary= "electra-small, 12-layer, 256-hidden, 4-heads, 14M parameters. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class Electra(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='electra-small', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ElectraModel.from_pretrained(pretrained_model_name_or_path='electra-small', **kwargs) else: @@ -93,8 +99,28 @@ class Electra(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Electra(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(query_token_embedding.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(title_token_embedding.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/ernie/README.md b/modules/text/language_model/ernie/README.md index 1a42cc99..cd2ad5ff 100644 --- a/modules/text/language_model/ernie/README.md +++ b/modules/text/language_model/ernie/README.md @@ -184,3 +184,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/ernie/module.py b/modules/text/language_model/ernie/module.py index 37a99500..ecb2e40e 100644 --- a/modules/text/language_model/ernie/module.py +++ b/modules/text/language_model/ernie/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="ernie", - version="2.0.1", + version="2.0.2", summary= "Baidu's ERNIE, Enhanced Representation through kNowledge IntEgration, max_seq_len=512 when predtrained. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -72,6 +72,12 @@ class Ernie(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ErnieModel.from_pretrained(pretrained_model_name_or_path='ernie-1.0', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ErnieModel.from_pretrained(pretrained_model_name_or_path='ernie-1.0', **kwargs) else: @@ -85,8 +91,28 @@ class Ernie(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -109,6 +135,35 @@ class Ernie(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/ernie_tiny/README.md b/modules/text/language_model/ernie_tiny/README.md index 04ace50c..03fc0acd 100644 --- a/modules/text/language_model/ernie_tiny/README.md +++ b/modules/text/language_model/ernie_tiny/README.md @@ -176,3 +176,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/ernie_v2_eng_base/README.md b/modules/text/language_model/ernie_v2_eng_base/README.md index 3f747302..818471c2 100644 --- a/modules/text/language_model/ernie_v2_eng_base/README.md +++ b/modules/text/language_model/ernie_v2_eng_base/README.md @@ -172,3 +172,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/ernie_v2_eng_base/module.py b/modules/text/language_model/ernie_v2_eng_base/module.py index 59ea31b7..fd5a6033 100644 --- a/modules/text/language_model/ernie_v2_eng_base/module.py +++ b/modules/text/language_model/ernie_v2_eng_base/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="ernie_v2_eng_base", - version="2.0.1", + version="2.0.2", summary= "Baidu's ERNIE 2.0, Enhanced Representation through kNowledge IntEgration, max_seq_len=512 when predtrained. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class ErnieV2(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ErnieModel.from_pretrained(pretrained_model_name_or_path='ernie-2.0-en', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ErnieModel.from_pretrained(pretrained_model_name_or_path='ernie-2.0-en', **kwargs) else: @@ -93,8 +99,28 @@ class ErnieV2(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class ErnieV2(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/ernie_v2_eng_large/README.md b/modules/text/language_model/ernie_v2_eng_large/README.md index 08c100f1..d11edff1 100644 --- a/modules/text/language_model/ernie_v2_eng_large/README.md +++ b/modules/text/language_model/ernie_v2_eng_large/README.md @@ -171,3 +171,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/ernie_v2_eng_large/module.py b/modules/text/language_model/ernie_v2_eng_large/module.py index 0d54a670..5be8965d 100644 --- a/modules/text/language_model/ernie_v2_eng_large/module.py +++ b/modules/text/language_model/ernie_v2_eng_large/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="ernie_v2_eng_large", - version="2.0.1", + version="2.0.2", summary= "Baidu's ERNIE 2.0, Enhanced Representation through kNowledge IntEgration, max_seq_len=512 when predtrained. The module is executed as paddle.dygraph.", author="paddlepaddle", @@ -80,6 +80,12 @@ class ErnieV2(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = ErnieModel.from_pretrained(pretrained_model_name_or_path='ernie-2.0-large-en', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = ErnieModel.from_pretrained(pretrained_model_name_or_path='ernie-2.0-large-en', **kwargs) else: @@ -93,8 +99,28 @@ class ErnieV2(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class ErnieV2(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/rbt3/README.md b/modules/text/language_model/rbt3/README.md index 89d69289..ccc76744 100644 --- a/modules/text/language_model/rbt3/README.md +++ b/modules/text/language_model/rbt3/README.md @@ -156,3 +156,7 @@ paddlehub >= 2.0.0 * 2.0.0 全面升级动态图,接口有所变化。任务名称调整,增加序列标注任务`token-cls` + +* 2.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/rbt3/module.py b/modules/text/language_model/rbt3/module.py index 63d2b5db..8c8fe789 100644 --- a/modules/text/language_model/rbt3/module.py +++ b/modules/text/language_model/rbt3/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="rbt3", - version="2.0.0", + version="2.0.1", summary="rbt3, 3-layer, 768-hidden, 12-heads, 38M parameters ", author="ymcui", author_email="ymcui@ir.hit.edu.cn", @@ -80,6 +80,12 @@ class Roberta(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='rbt3', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='rbt3', **kwargs) else: @@ -93,8 +99,28 @@ class Roberta(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Roberta(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/rbtl3/README.md b/modules/text/language_model/rbtl3/README.md index 80b1c67e..de1e475a 100644 --- a/modules/text/language_model/rbtl3/README.md +++ b/modules/text/language_model/rbtl3/README.md @@ -156,3 +156,7 @@ paddlehub >= 2.0.0 * 2.0.0 全面升级动态图,接口有所变化。任务名称调整,增加序列标注任务`token-cls` + +* 2.0.1 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/rbtl3/module.py b/modules/text/language_model/rbtl3/module.py index ac00a9a5..e2d35795 100644 --- a/modules/text/language_model/rbtl3/module.py +++ b/modules/text/language_model/rbtl3/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="rbtl3", - version="2.0.0", + version="2.0.1", summary="rbtl3, 3-layer, 1024-hidden, 16-heads, 61M parameters ", author="ymcui", author_email="ymcui@ir.hit.edu.cn", @@ -80,6 +80,12 @@ class Roberta(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='rbtl3', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='rbtl3', **kwargs) else: @@ -93,8 +99,28 @@ class Roberta(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -117,6 +143,35 @@ class Roberta(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/roberta-wwm-ext-large/README.md b/modules/text/language_model/roberta-wwm-ext-large/README.md index 0b5f46ca..74629300 100644 --- a/modules/text/language_model/roberta-wwm-ext-large/README.md +++ b/modules/text/language_model/roberta-wwm-ext-large/README.md @@ -158,3 +158,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/roberta-wwm-ext-large/module.py b/modules/text/language_model/roberta-wwm-ext-large/module.py index aa45811d..74a4c471 100644 --- a/modules/text/language_model/roberta-wwm-ext-large/module.py +++ b/modules/text/language_model/roberta-wwm-ext-large/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="roberta-wwm-ext-large", - version="2.0.1", + version="2.0.2", summary= "chinese-roberta-wwm-ext-large, 24-layer, 1024-hidden, 16-heads, 340M parameters. The module is executed as paddle.dygraph.", author="ymcui", @@ -81,6 +81,12 @@ class Roberta(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='roberta-wwm-ext-large', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='roberta-wwm-ext-large', **kwargs) else: @@ -94,8 +100,28 @@ class Roberta(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -118,6 +144,35 @@ class Roberta(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output diff --git a/modules/text/language_model/roberta-wwm-ext/README.md b/modules/text/language_model/roberta-wwm-ext/README.md index 2eedd7d3..19db19d8 100644 --- a/modules/text/language_model/roberta-wwm-ext/README.md +++ b/modules/text/language_model/roberta-wwm-ext/README.md @@ -158,3 +158,7 @@ paddlehub >= 2.0.0 * 2.0.1 任务名称调整,增加序列标注任务`token-cls` + +* 2.0.2 + + 增加文本匹配任务`text-matching` \ No newline at end of file diff --git a/modules/text/language_model/roberta-wwm-ext/module.py b/modules/text/language_model/roberta-wwm-ext/module.py index 8fa2bbe7..844ff23d 100644 --- a/modules/text/language_model/roberta-wwm-ext/module.py +++ b/modules/text/language_model/roberta-wwm-ext/module.py @@ -29,7 +29,7 @@ from paddlehub.utils.log import logger @moduleinfo( name="roberta-wwm-ext", - version="2.0.1", + version="2.0.2", summary= "chinese-roberta-wwm-ext, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.", author="ymcui", @@ -81,6 +81,12 @@ class Roberta(nn.Layer): self.metric = ChunkEvaluator( label_list=[self.label_map[i] for i in sorted(self.label_map.keys())] ) + elif task == 'text-matching': + self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='roberta-wwm-ext', **kwargs) + self.dropout = paddle.nn.Dropout(0.1) + self.classifier = paddle.nn.Linear(self.model.config['hidden_size']*3, 2) + self.criterion = paddle.nn.loss.CrossEntropyLoss() + self.metric = paddle.metric.Accuracy() elif task is None: self.model = RobertaModel.from_pretrained(pretrained_model_name_or_path='roberta-wwm-ext', **kwargs) else: @@ -94,8 +100,28 @@ class Roberta(nn.Layer): self.set_state_dict(state_dict) logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint)) - def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, seq_lengths=None, labels=None): - result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + attention_mask=None, + query_input_ids=None, + query_token_type_ids=None, + query_position_ids=None, + query_attention_mask=None, + title_input_ids=None, + title_token_type_ids=None, + title_position_ids=None, + title_attention_mask=None, + seq_lengths=None, + labels=None): + + if self.task != 'text-matching': + result = self.model(input_ids, token_type_ids, position_ids, attention_mask) + else: + query_result = self.model(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) + title_result = self.model(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask) + if self.task == 'seq-cls': logits = result probs = F.softmax(logits, axis=1) @@ -118,6 +144,35 @@ class Roberta(nn.Layer): _, _, f1_score = map(float, self.metric.accumulate()) return token_level_probs, loss, {'f1_score': f1_score} return token_level_probs + elif self.task == 'text-matching': + query_token_embedding, _ = query_result + query_token_embedding = self.dropout(query_token_embedding) + query_attention_mask = paddle.unsqueeze( + (query_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + query_token_embedding = query_token_embedding * query_attention_mask + query_sum_embedding = paddle.sum(query_token_embedding, axis=1) + query_sum_mask = paddle.sum(query_attention_mask, axis=1) + query_mean = query_sum_embedding / query_sum_mask + + title_token_embedding, _ = title_result + title_token_embedding = self.dropout(title_token_embedding) + title_attention_mask = paddle.unsqueeze( + (title_input_ids != self.model.pad_token_id).astype(self.model.pooler.dense.weight.dtype), axis=2) + title_token_embedding = title_token_embedding * title_attention_mask + title_sum_embedding = paddle.sum(title_token_embedding, axis=1) + title_sum_mask = paddle.sum(title_attention_mask, axis=1) + title_mean = title_sum_embedding / title_sum_mask + + sub = paddle.abs(paddle.subtract(query_mean, title_mean)) + projection = paddle.concat([query_mean, title_mean, sub], axis=-1) + logits = self.classifier(projection) + probs = F.softmax(logits) + if labels is not None: + loss = self.criterion(logits, labels) + correct = self.metric.compute(probs, labels) + acc = self.metric.update(correct) + return probs, loss, {'acc': acc} + return probs else: sequence_output, pooled_output = result return sequence_output, pooled_output -- GitLab