match.py 7.6 KB
Newer Older
X
xixiaoyao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# -*- coding: UTF-8 -*-
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

W
wangxiao1021 已提交
16

X
xixiaoyao 已提交
17 18
import paddle.fluid as fluid
from paddle.fluid import layers
W
wangxiao1021 已提交
19
from paddlepalm.head.base_head import Head
X
xixiaoyao 已提交
20 21
import numpy as np
import os
W
wangxiao1021 已提交
22 23 24 25 26 27 28 29 30 31 32 33
import json


def computeHingeLoss(pos, neg, margin):
    loss_part1 = fluid.layers.elementwise_sub(
        fluid.layers.fill_constant_batch_size_like(
            input=pos, shape=[-1, 1], value=margin, dtype='float32'), pos)
    loss_part2 = fluid.layers.elementwise_add(loss_part1, neg)
    loss_part3 = fluid.layers.elementwise_max(
        fluid.layers.fill_constant_batch_size_like(
            input=loss_part2, shape=[-1, 1], value=0.0, dtype='float32'), loss_part2)
    return loss_part3
X
xixiaoyao 已提交
34

W
wangxiao1021 已提交
35 36

class Match(Head):
X
xixiaoyao 已提交
37 38 39
    '''
    matching
    '''
W
wangxiao1021 已提交
40 41 42 43 44 45 46 47 48 49 50
   
    def __init__(self, num_classes, input_dim, dropout_prob=0.0, param_initializer_range=0.02, \
        learning_strategy='pointwise', margin=0.5, phase='train'):

        """  
        Args:
            phase: train, eval, pred
            lang: en, ch, ...
            learning_strategy: pointwise, pairwise
        """
        
X
xixiaoyao 已提交
51
        self._is_training = phase == 'train'
W
wangxiao1021 已提交
52 53 54
        self._hidden_size = input_dim
    
        self._num_classes = num_classes
X
xixiaoyao 已提交
55

W
wangxiao1021 已提交
56 57 58 59 60
        self._dropout_prob = dropout_prob if phase == 'train' else 0.0
        self._param_initializer = fluid.initializer.TruncatedNormal(
            scale=param_initializer_range)
        self._learning_strategy = learning_strategy 
        self._margin = margin
X
xixiaoyao 已提交
61

W
wangxiao1021 已提交
62
    
X
xixiaoyao 已提交
63
        self._preds = []
W
wangxiao1021 已提交
64
        self._preds_logits = []
X
xixiaoyao 已提交
65 66 67
    
    @property
    def inputs_attrs(self):
W
wangxiao1021 已提交
68
        reader = {}
X
xixiaoyao 已提交
69
        bb = {"sentence_pair_embedding": [[-1, self._hidden_size], 'float32']}
W
wangxiao1021 已提交
70 71 72 73 74 75
        if self._is_training:
            if self._learning_strategy == 'pointwise':
                reader["label_ids"] = [[-1], 'int64']
            elif self._learning_strategy == 'pairwise':
                bb["sentence_pair_embedding_neg"] = [[-1, self._hidden_size], 'float32']

X
xixiaoyao 已提交
76 77 78 79 80 81 82
        return {'reader': reader, 'backbone': bb}

    @property
    def outputs_attrs(self):
        if self._is_training:
            return {"loss": [[1], 'float32']}
        else:
W
wangxiao1021 已提交
83 84 85 86 87
            if self._learning_strategy=='paiwise':
                return {"probs": [[-1, 1], 'float32']}
            else:
                return {"logits": [[-1, self._num_classes], 'float32'],
                        "probs": [[-1, self._num_classes], 'float32']}
X
xixiaoyao 已提交
88 89 90

    def build(self, inputs, scope_name=""):

W
wangxiao1021 已提交
91 92
        # inputs          
        cls_feats = inputs["backbone"]["sentence_pair_embedding"] 
X
xixiaoyao 已提交
93 94 95 96 97
        if self._is_training:
            cls_feats = fluid.layers.dropout(
                x=cls_feats,
                dropout_prob=self._dropout_prob,
                dropout_implementation="upscale_in_train")
W
wangxiao1021 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
            if self._learning_strategy == 'pairwise':
                cls_feats_neg = inputs["backbone"]["sentence_pair_embedding_neg"]
                cls_feats_neg = fluid.layers.dropout(
                x=cls_feats_neg,
                dropout_prob=self._dropout_prob,
                dropout_implementation="upscale_in_train")
            elif self._learning_strategy == 'pointwise':
                labels = inputs["reader"]["label_ids"] 
        
        # loss
        # for pointwise
        if self._learning_strategy == 'pointwise':
            logits = fluid.layers.fc(
                input=cls_feats,
                size=self._num_classes,
                param_attr=fluid.ParamAttr(
                    name=scope_name+"cls_out_w",
                    initializer=self._param_initializer),
                bias_attr=fluid.ParamAttr(
                    name=scope_name+"cls_out_b",
                    initializer=fluid.initializer.Constant(0.)))
            probs = fluid.layers.softmax(logits)
            if self._is_training:
                ce_loss = fluid.layers.cross_entropy(
                    input=probs, label=labels)
                loss = fluid.layers.mean(x=ce_loss)
                return {'loss': loss}
            # for pred
            else:
                return {'logits': logits,
                        'probs': probs}
        # for pairwise
        elif self._learning_strategy == 'pairwise':
            pos_score = fluid.layers.fc(
                input=cls_feats,
                size=1,
                act = "sigmoid",
                param_attr=fluid.ParamAttr(
                    name=scope_name+"cls_out_w_pr",
                    initializer=self._param_initializer),
                bias_attr=fluid.ParamAttr(
                    name=scope_name+"cls_out_b_pr",
                    initializer=fluid.initializer.Constant(0.)))
            pos_score = fluid.layers.reshape(x=pos_score, shape=[-1, 1], inplace=True)
X
xixiaoyao 已提交
142

W
wangxiao1021 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
            if self._is_training:
                neg_score = fluid.layers.fc(
                    input=cls_feats_neg,
                    size=1,
                    act = "sigmoid",
                    param_attr=fluid.ParamAttr(
                        name=scope_name+"cls_out_w_pr",
                        initializer=self._param_initializer),
                    bias_attr=fluid.ParamAttr(
                        name=scope_name+"cls_out_b_pr",
                        initializer=fluid.initializer.Constant(0.)))        
                neg_score = fluid.layers.reshape(x=neg_score, shape=[-1, 1], inplace=True)
        
                loss = fluid.layers.mean(computeHingeLoss(pos_score, neg_score, self._margin))
                return {'loss': loss}
            # for pred
            else:
                return {'probs': pos_score}
        
X
xixiaoyao 已提交
162 163


W
wangxiao1021 已提交
164
    def batch_postprocess(self, rt_outputs):
X
xixiaoyao 已提交
165
        if not self._is_training:
W
wangxiao1021 已提交
166 167 168 169 170 171 172 173 174
            probs = []
            logits = []
            probs = rt_outputs['probs']
            self._preds.extend(probs.tolist())
            if self._learning_strategy == 'pointwise':
                logits = rt_outputs['logits']
                self._preds_logits.extend(logits.tolist())
        
    def epoch_postprocess(self, post_inputs, output_dir=None):
X
xixiaoyao 已提交
175 176
        # there is no post_inputs needed and not declared in epoch_inputs_attrs, hence no elements exist in post_inputs
        if not self._is_training:
W
wangxiao1021 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189
            if output_dir is None:
                raise ValueError('argument output_dir not found in config. Please add it into config dict/file.')
            with open(os.path.join(output_dir, 'predictions.json'), 'w') as writer:
                for i in range(len(self._preds)):
                    if self._learning_strategy == 'pointwise':
                        label = 0 if self._preds[i][0] > self._preds[i][1] else 1
                        result = {'index': i, 'label': label, 'logits': self._preds_logits[i], 'probs': self._preds[i]}
                    elif self._learning_strategy == 'pairwise':
                        label = 0 if self._preds[i][0] < 0.5 else 1
                        result = {'index': i, 'label': label, 'probs': self._preds[i][0]}
                    result = json.dumps(result, ensure_ascii=False)
                    writer.write(result+'\n')
            print('Predictions saved at '+os.path.join(output_dir, 'predictions.json'))