model.py 8.6 KB
Newer Older
F
frankwhzhang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
16
import numpy as np
F
frankwhzhang 已提交
17 18 19 20 21 22 23 24 25
import paddle.fluid as fluid

from paddlerec.core.utils import envs
from paddlerec.core.model import Model as ModelBase


class Model(ModelBase):
    def __init__(self, config):
        ModelBase.__init__(self, config)
26 27 28 29 30 31 32 33 34 35
        self.item_len = envs.get_global_env("hyper_parameters.self.item_len",
                                            None, self._namespace)
        self.hidden_size = envs.get_global_env("hyper_parameters.hidden_size",
                                               None, self._namespace)
        self.user_vocab = envs.get_global_env("hyper_parameters.user_vocab",
                                              None, self._namespace)
        self.item_vocab = envs.get_global_env("hyper_parameters.item_vocab",
                                              None, self._namespace)
        self.embed_size = envs.get_global_env("hyper_parameters.embed_size",
                                              None, self._namespace)
F
frankwhzhang 已提交
36 37 38 39 40 41 42 43 44

    def input_data(self, is_infer=False):
        user_slot_names = fluid.data(
            name='user_slot_names',
            shape=[None, 1],
            dtype='int64',
            lod_level=1)
        item_slot_names = fluid.data(
            name='item_slot_names',
45
            shape=[None, self.item_len],
F
frankwhzhang 已提交
46 47 48 49
            dtype='int64',
            lod_level=1)
        lens = fluid.data(name='lens', shape=[None], dtype='int64')
        labels = fluid.data(
50 51 52 53
            name='labels',
            shape=[None, self.item_len],
            dtype='int64',
            lod_level=1)
F
frankwhzhang 已提交
54 55

        inputs = [user_slot_names] + [item_slot_names] + [lens] + [labels]
56

F
frankwhzhang 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
        if is_infer:
            self._infer_data_var = inputs
            self._infer_data_loader = fluid.io.DataLoader.from_generator(
                feed_list=self._infer_data_var,
                capacity=64,
                use_double_buffer=False,
                iterable=False)
        else:
            self._data_var = inputs
            self._data_loader = fluid.io.DataLoader.from_generator(
                feed_list=self._data_var,
                capacity=10000,
                use_double_buffer=False,
                iterable=False)

        return inputs

74
    def _fluid_sequence_pad(self, input, pad_value, maxlen=None):
F
frankwhzhang 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
        """
        args:
            input: (batch*seq_len, dim)
        returns:
            (batch, max_seq_len, dim)
        """
        pad_value = fluid.layers.cast(
            fluid.layers.assign(input=np.array([pad_value], 'float32')),
            input.dtype)
        input_padded, _ = fluid.layers.sequence_pad(
            input, pad_value,
            maxlen=maxlen)  # (batch, max_seq_len, 1), (batch, 1)
        # TODO, maxlen=300, used to solve issues: https://github.com/PaddlePaddle/Paddle/issues/14164
        return input_padded

90
    def _fluid_sequence_get_pos(self, lodtensor):
F
frankwhzhang 已提交
91 92 93 94 95 96 97 98 99 100 101 102
        """
        args:
            lodtensor: lod = [[0,4,7]]
        return:
            pos: lod = [[0,4,7]]
                 data = [0,1,2,3,0,1,3]
                 shape = [-1, 1]
        """
        lodtensor = fluid.layers.reduce_sum(lodtensor, dim=1, keep_dim=True)
        assert lodtensor.shape == (-1, 1), (lodtensor.shape())
        ones = fluid.layers.cast(lodtensor * 0 + 1,
                                 'float32')  # (batch*seq_len, 1)
103 104
        ones_padded = self._fluid_sequence_pad(ones,
                                               0)  # (batch, max_seq_len, 1)
F
frankwhzhang 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
        ones_padded = fluid.layers.squeeze(ones_padded,
                                           [2])  # (batch, max_seq_len)
        seq_len = fluid.layers.cast(
            fluid.layers.reduce_sum(
                ones_padded, 1, keep_dim=True), 'int64')  # (batch, 1)
        seq_len = fluid.layers.squeeze(seq_len, [1])

        pos = fluid.layers.cast(
            fluid.layers.cumsum(
                ones_padded, 1, exclusive=True), 'int64')
        pos = fluid.layers.sequence_unpad(pos, seq_len)  # (batch*seq_len, 1)
        pos.stop_gradient = True
        return pos

    def net(self, inputs, is_infer=False):
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
        # user encode
        user_embedding = fluid.embedding(
            input=inputs[0],
            size=[self.user_vocab, self.embed_size],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Xavier(),
                regularizer=fluid.regularizer.L2Decay(1e-5)),
            is_sparse=True)

        user_feature = fluid.layers.fc(
            input=user_embedding,
            size=self.hidden_size,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormal(
                    loc=0.0, scale=np.sqrt(1.0 / self.hidden_size))),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=0.0)),
F
frankwhzhang 已提交
137 138
            act='relu',
            name='user_feature_fc')
139 140 141 142 143 144 145 146
        # item encode
        item_embedding = fluid.embedding(
            input=inputs[1],
            size=[self.item_vocab, self.embed_size],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Xavier(),
                regularizer=fluid.regularizer.L2Decay(1e-5)),
            is_sparse=True)
F
frankwhzhang 已提交
147 148 149 150

        item_embedding = fluid.layers.sequence_unpad(
            x=item_embedding, length=inputs[2])

151 152 153 154 155 156 157 158
        item_fc = fluid.layers.fc(
            input=item_embedding,
            size=self.hidden_size,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormal(
                    loc=0.0, scale=np.sqrt(1.0 / self.hidden_size))),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=0.0)),
F
frankwhzhang 已提交
159 160 161
            act='relu',
            name='item_fc')

162 163 164 165 166 167 168 169 170
        pos = self._fluid_sequence_get_pos(item_fc)
        pos_embed = fluid.embedding(
            input=pos,
            size=[self.user_vocab, self.embed_size],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Xavier(),
                regularizer=fluid.regularizer.L2Decay(1e-5)),
            is_sparse=True)

F
frankwhzhang 已提交
171 172 173
        pos_embed = fluid.layers.squeeze(pos_embed, [1])

        # item gru
174 175 176
        gru_input = fluid.layers.fc(
            input=fluid.layers.concat([item_fc, pos_embed], 1),
            size=self.hidden_size * 3,
F
frankwhzhang 已提交
177 178
            name='item_gru_fc')

179 180 181 182 183 184 185 186 187 188 189 190
        # forward gru
        item_gru_forward = fluid.layers.dynamic_gru(
            input=gru_input,
            size=self.hidden_size,
            is_reverse=False,
            h_0=user_feature)
        # backward gru
        item_gru_backward = fluid.layers.dynamic_gru(
            input=gru_input,
            size=self.hidden_size,
            is_reverse=True,
            h_0=user_feature)
F
frankwhzhang 已提交
191 192 193 194

        item_gru = fluid.layers.concat(
            [item_gru_forward, item_gru_backward], axis=1)

195 196 197 198 199 200 201 202
        out_click_fc1 = fluid.layers.fc(
            input=item_gru,
            size=self.hidden_size,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormal(
                    loc=0.0, scale=np.sqrt(1.0 / self.hidden_size))),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=0.0)),
F
frankwhzhang 已提交
203 204 205
            act='relu',
            name='out_click_fc1')

206 207 208 209
        click_prob = fluid.layers.fc(input=out_click_fc1,
                                     size=2,
                                     act='softmax',
                                     name='out_click_fc2')
F
frankwhzhang 已提交
210 211

        labels = fluid.layers.sequence_unpad(x=inputs[3], length=inputs[2])
212

F
frankwhzhang 已提交
213 214
        auc_val, batch_auc, auc_states = fluid.layers.auc(input=click_prob,
                                                          label=labels)
215

F
frankwhzhang 已提交
216 217 218
        if is_infer:
            self._infer_results["AUC"] = auc_val
            return
219

F
frankwhzhang 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232
        loss = fluid.layers.reduce_mean(
            fluid.layers.cross_entropy(
                input=click_prob, label=labels))
        self._cost = loss
        self._metrics['auc'] = auc_val

    def train_net(self):
        input_data = self.input_data()
        self.net(input_data)

    def infer_net(self):
        input_data = self.input_data(is_infer=True)
        self.net(input_data, is_infer=True)