nets.py 7.9 KB
Newer Older
Q
Qiao Longfei 已提交
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved
D
dongdaxiang 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
D
dongdaxiang 已提交
14

D
dongdaxiang 已提交
15
import paddle.fluid as fluid
16 17 18 19 20
import paddle.fluid.layers.nn as nn
import paddle.fluid.layers.tensor as tensor
import paddle.fluid.layers.control_flow as cf
import paddle.fluid.layers.io as io

D
dongdaxiang 已提交
21 22 23

class BowEncoder(object):
    """ bow-encoder """
24

D
dongdaxiang 已提交
25 26 27 28
    def __init__(self):
        self.param_name = ""

    def forward(self, emb):
29 30
        return nn.sequence_pool(input=emb, pool_type='sum')

D
dongdaxiang 已提交
31 32 33

class CNNEncoder(object):
    """ cnn-encoder"""
34 35

    def __init__(self,
Z
zhangwenhui03 已提交
36
                 param_name="cnn",
D
dongdaxiang 已提交
37 38 39 40 41 42 43 44 45
                 win_size=3,
                 ksize=128,
                 act='tanh',
                 pool_type='max'):
        self.param_name = param_name
        self.win_size = win_size
        self.ksize = ksize
        self.act = act
        self.pool_type = pool_type
46

D
dongdaxiang 已提交
47 48 49 50 51 52 53
    def forward(self, emb):
        return fluid.nets.sequence_conv_pool(
            input=emb,
            num_filters=self.ksize,
            filter_size=self.win_size,
            act=self.act,
            pool_type=self.pool_type,
Z
zhangwenhui03 已提交
54 55
            param_attr=self.param_name + ".param",
            bias_attr=self.param_name + ".bias")
Z
zhangwenhui03 已提交
56
        
D
dongdaxiang 已提交
57

58

D
dongdaxiang 已提交
59 60
class GrnnEncoder(object):
    """ grnn-encoder """
61

Z
zhangwenhui03 已提交
62
    def __init__(self, param_name="grnn", hidden_size=128):
D
dongdaxiang 已提交
63
        self.param_name = param_name
D
dongdaxiang 已提交
64
        self.hidden_size = hidden_size
65

D
dongdaxiang 已提交
66
    def forward(self, emb):
D
dongdaxiang 已提交
67 68 69
        fc0 = nn.fc(
            input=emb, 
            size=self.hidden_size * 3, 
Z
zhangwenhui03 已提交
70 71 72
            param_attr=self.param_name + "_fc.w",
            bias_attr=False)
        
73
        gru_h = nn.dynamic_gru(
D
dongdaxiang 已提交
74
            input=fc0,
75 76
            size=self.hidden_size,
            is_reverse=False,
Z
zhangwenhui03 已提交
77 78
            param_attr=self.param_name + ".param",
            bias_attr=self.param_name + ".bias")
79 80
        return nn.sequence_pool(input=gru_h, pool_type='max')

D
dongdaxiang 已提交
81

D
dongdaxiang 已提交
82 83
'''this is a very simple Encoder factory
most default argument values are used'''
84 85


D
dongdaxiang 已提交
86 87 88 89
class SimpleEncoderFactory(object):
    def __init__(self):
        pass

D
dongdaxiang 已提交
90
    ''' create an encoder through create function '''
91

D
dongdaxiang 已提交
92 93 94 95 96 97 98 99 100 101 102
    def create(self, enc_type, enc_hid_size):
        if enc_type == "bow":
            bow_encode = BowEncoder()
            return bow_encode
        elif enc_type == "cnn":
            cnn_encode = CNNEncoder(ksize=enc_hid_size)
            return cnn_encode
        elif enc_type == "gru":
            rnn_encode = GrnnEncoder(hidden_size=enc_hid_size)
            return rnn_encode

103

D
dongdaxiang 已提交
104 105
class MultiviewSimnet(object):
    """ multi-view simnet """
106 107

    def __init__(self, embedding_size, embedding_dim, hidden_size):
D
dongdaxiang 已提交
108 109
        self.embedding_size = embedding_size
        self.embedding_dim = embedding_dim
110
        self.emb_shape = [self.embedding_size, self.embedding_dim]
D
dongdaxiang 已提交
111 112 113 114 115 116 117 118 119 120
        self.hidden_size = hidden_size
        self.margin = 0.1

    def set_query_encoder(self, encoders):
        self.query_encoders = encoders

    def set_title_encoder(self, encoders):
        self.title_encoders = encoders

    def get_correct(self, x, y):
121 122
        less = tensor.cast(cf.less_than(x, y), dtype='float32')
        correct = nn.reduce_sum(less)
D
dongdaxiang 已提交
123 124 125 126
        return correct

    def train_net(self):
        # input fields for query, pos_title, neg_title
127
        q_slots = [
128 129
            fluid.data(
                name="q%d" % i, shape=[None, 1], lod_level=1, dtype='int64')
130 131 132
            for i in range(len(self.query_encoders))
        ]
        pt_slots = [
133 134
            fluid.data(
                name="pt%d" % i, shape=[None, 1], lod_level=1, dtype='int64')
135 136 137
            for i in range(len(self.title_encoders))
        ]
        nt_slots = [
138 139
            fluid.data(
                name="nt%d" % i, shape=[None, 1], lod_level=1, dtype='int64')
140 141
            for i in range(len(self.title_encoders))
        ]
D
dongdaxiang 已提交
142 143

        # lookup embedding for each slot
144
        q_embs = [
145
            fluid.embedding(
Z
zhangwenhui03 已提交
146
                input=query, size=self.emb_shape, param_attr="emb")
147 148 149
            for query in q_slots
        ]
        pt_embs = [
150
            fluid.embedding(
Z
zhangwenhui03 已提交
151
                input=title, size=self.emb_shape, param_attr="emb")
152 153 154
            for title in pt_slots
        ]
        nt_embs = [
155
            fluid.embedding(
Z
zhangwenhui03 已提交
156
                input=title, size=self.emb_shape, param_attr="emb")
157 158 159
            for title in nt_slots
        ]

D
dongdaxiang 已提交
160
        # encode each embedding field with encoder
161 162 163 164 165 166 167 168 169
        q_encodes = [
            self.query_encoders[i].forward(emb) for i, emb in enumerate(q_embs)
        ]
        pt_encodes = [
            self.title_encoders[i].forward(emb) for i, emb in enumerate(pt_embs)
        ]
        nt_encodes = [
            self.title_encoders[i].forward(emb) for i, emb in enumerate(nt_embs)
        ]
D
dongdaxiang 已提交
170 171

        # concat multi view for query, pos_title, neg_title
172 173 174
        q_concat = nn.concat(q_encodes)
        pt_concat = nn.concat(pt_encodes)
        nt_concat = nn.concat(nt_encodes)
D
dongdaxiang 已提交
175 176

        # projection of hidden layer
Z
zhangwenhui03 已提交
177 178 179
        q_hid = nn.fc(q_concat, size=self.hidden_size, param_attr='q_fc.w', bias_attr='q_fc.b')
        pt_hid = nn.fc(pt_concat, size=self.hidden_size, param_attr='t_fc.w', bias_attr='t_fc.b')
        nt_hid = nn.fc(nt_concat, size=self.hidden_size, param_attr='t_fc.w', bias_attr='t_fc.b')
D
dongdaxiang 已提交
180 181

        # cosine of hidden layers
182 183 184
        cos_pos = nn.cos_sim(q_hid, pt_hid)
        cos_neg = nn.cos_sim(q_hid, nt_hid)

D
dongdaxiang 已提交
185
        # pairwise hinge_loss
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
        loss_part1 = nn.elementwise_sub(
            tensor.fill_constant_batch_size_like(
                input=cos_pos,
                shape=[-1, 1],
                value=self.margin,
                dtype='float32'),
            cos_pos)

        loss_part2 = nn.elementwise_add(loss_part1, cos_neg)

        loss_part3 = nn.elementwise_max(
            tensor.fill_constant_batch_size_like(
                input=loss_part2, shape=[-1, 1], value=0.0, dtype='float32'),
            loss_part2)

        avg_cost = nn.mean(loss_part3)
D
dongdaxiang 已提交
202
        correct = self.get_correct(cos_neg, cos_pos)
D
dongdaxiang 已提交
203 204

        return q_slots + pt_slots + nt_slots, avg_cost, correct
205 206 207

    def pred_net(self, query_fields, pos_title_fields, neg_title_fields):
        q_slots = [
208 209
            fluid.data(
                name="q%d" % i, shape=[None, 1], lod_level=1, dtype='int64')
210 211 212
            for i in range(len(self.query_encoders))
        ]
        pt_slots = [
213 214
            fluid.data(
                name="pt%d" % i, shape=[None, 1], lod_level=1, dtype='int64')
215 216
            for i in range(len(self.title_encoders))
        ]
D
dongdaxiang 已提交
217
        # lookup embedding for each slot
218
        q_embs = [
219
            fluid.embedding(
Z
zhangwenhui03 已提交
220
                input=query, size=self.emb_shape, param_attr="emb")
221 222 223
            for query in q_slots
        ]
        pt_embs = [
224
            fluid.embedding(
Z
zhangwenhui03 已提交
225
                input=title, size=self.emb_shape, param_attr="emb")
226 227
            for title in pt_slots
        ]
D
dongdaxiang 已提交
228
        # encode each embedding field with encoder
229 230 231 232 233 234
        q_encodes = [
            self.query_encoder[i].forward(emb) for i, emb in enumerate(q_embs)
        ]
        pt_encodes = [
            self.title_encoders[i].forward(emb) for i, emb in enumerate(pt_embs)
        ]
D
dongdaxiang 已提交
235
        # concat multi view for query, pos_title, neg_title
236 237
        q_concat = nn.concat(q_encodes)
        pt_concat = nn.concat(pt_encodes)
D
dongdaxiang 已提交
238
        # projection of hidden layer
Z
zhangwenhui03 已提交
239 240
        q_hid = nn.fc(q_concat, size=self.hidden_size, param_attr='q_fc.w', bias_attr='q_fc.b')
        pt_hid = nn.fc(pt_concat, size=self.hidden_size, param_attr='t_fc.w', bias_attr='t_fc.b')
D
dongdaxiang 已提交
241
        # cosine of hidden layers
242
        cos = nn.cos_sim(q_hid, pt_hid)
D
dongdaxiang 已提交
243
        return cos