model.py 11.9 KB
Newer Older
M
add gnn  
malin10 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
T
tangwei 已提交
16 17
import numpy as np

M
add gnn  
malin10 已提交
18 19 20
import paddle.fluid as fluid
import paddle.fluid.layers as layers

21 22
from paddlerec.core.utils import envs
from paddlerec.core.model import Model as ModelBase
M
add gnn  
malin10 已提交
23 24 25 26 27 28


class Model(ModelBase):
    def __init__(self, config):
        ModelBase.__init__(self, config)
        self.init_config()
T
for mat  
tangwei 已提交
29

M
add gnn  
malin10 已提交
30 31
    def init_config(self):
        self._fetch_interval = 1
T
for mat  
tangwei 已提交
32
        self.items_num, self.ins_num = self.config_read(
T
tangwei 已提交
33 34 35 36 37 38 39 40 41 42
            envs.get_global_env("hyper_parameters.config_path", None,
                                self._namespace))
        self.train_batch_size = envs.get_global_env("batch_size", None,
                                                    "train.reader")
        self.evaluate_batch_size = envs.get_global_env("batch_size", None,
                                                       "evaluate.reader")
        self.hidden_size = envs.get_global_env(
            "hyper_parameters.sparse_feature_dim", None, self._namespace)
        self.step = envs.get_global_env(
            "hyper_parameters.gnn_propogation_steps", None, self._namespace)
M
add gnn  
malin10 已提交
43 44

    def config_read(self, config_path=None):
T
for mat  
tangwei 已提交
45
        if config_path is None:
T
tangwei 已提交
46 47
            raise ValueError(
                "please set train.model.hyper_parameters.config_path at first")
M
add gnn  
malin10 已提交
48 49 50 51 52 53 54
        with open(config_path, "r") as fin:
            item_nums = int(fin.readline().strip())
            ins_nums = int(fin.readline().strip())
        return item_nums, ins_nums

    def input(self, bs):
        self.items = fluid.data(
T
tangwei 已提交
55
            name="items", shape=[bs, -1],
T
for mat  
tangwei 已提交
56
            dtype="int64")  # [batch_size, uniq_max]
M
add gnn  
malin10 已提交
57
        self.seq_index = fluid.data(
T
tangwei 已提交
58
            name="seq_index", shape=[bs, -1, 2],
T
for mat  
tangwei 已提交
59
            dtype="int32")  # [batch_size, seq_max, 2]
M
add gnn  
malin10 已提交
60
        self.last_index = fluid.data(
T
tangwei 已提交
61
            name="last_index", shape=[bs, 2], dtype="int32")  # [batch_size, 2]
M
add gnn  
malin10 已提交
62
        self.adj_in = fluid.data(
T
tangwei 已提交
63
            name="adj_in", shape=[bs, -1, -1],
T
for mat  
tangwei 已提交
64
            dtype="float32")  # [batch_size, seq_max, seq_max]
M
add gnn  
malin10 已提交
65
        self.adj_out = fluid.data(
T
tangwei 已提交
66
            name="adj_out", shape=[bs, -1, -1],
T
for mat  
tangwei 已提交
67
            dtype="float32")  # [batch_size, seq_max, seq_max]
M
add gnn  
malin10 已提交
68
        self.mask = fluid.data(
T
tangwei 已提交
69
            name="mask", shape=[bs, -1, 1],
T
for mat  
tangwei 已提交
70
            dtype="float32")  # [batch_size, seq_max, 1]
M
add gnn  
malin10 已提交
71
        self.label = fluid.data(
T
tangwei 已提交
72
            name="label", shape=[bs, 1], dtype="int64")  # [batch_size, 1]
M
add gnn  
malin10 已提交
73

T
tangwei 已提交
74 75 76 77
        res = [
            self.items, self.seq_index, self.last_index, self.adj_in,
            self.adj_out, self.mask, self.label
        ]
M
add gnn  
malin10 已提交
78
        return res
T
for mat  
tangwei 已提交
79

M
add gnn  
malin10 已提交
80 81 82 83
    def train_input(self):
        res = self.input(self.train_batch_size)
        self._data_var = res

T
tangwei 已提交
84 85
        use_dataloader = envs.get_global_env("hyper_parameters.use_DataLoader",
                                             False, self._namespace)
M
add gnn  
malin10 已提交
86 87 88

        if self._platform != "LINUX" or use_dataloader:
            self._data_loader = fluid.io.DataLoader.from_generator(
T
tangwei 已提交
89 90 91 92
                feed_list=self._data_var,
                capacity=256,
                use_double_buffer=False,
                iterable=False)
M
add gnn  
malin10 已提交
93 94

    def net(self, items_num, hidden_size, step, bs):
T
for mat  
tangwei 已提交
95
        stdv = 1.0 / math.sqrt(hidden_size)
M
add gnn  
malin10 已提交
96

T
tangwei 已提交
97 98 99 100
        def embedding_layer(input,
                            table_name,
                            emb_dim,
                            initializer_instance=None):
M
add gnn  
malin10 已提交
101 102 103 104
            emb = fluid.embedding(
                input=input,
                size=[items_num, emb_dim],
                param_attr=fluid.ParamAttr(
T
tangwei 已提交
105
                    name=table_name, initializer=initializer_instance), )
T
for mat  
tangwei 已提交
106 107 108
            return emb

        sparse_initializer = fluid.initializer.Uniform(low=-stdv, high=stdv)
T
tangwei 已提交
109 110
        items_emb = embedding_layer(self.items, "emb", hidden_size,
                                    sparse_initializer)
M
add gnn  
malin10 已提交
111 112
        pre_state = items_emb
        for i in range(step):
T
tangwei 已提交
113 114
            pre_state = layers.reshape(
                x=pre_state, shape=[bs, -1, hidden_size])
M
add gnn  
malin10 已提交
115 116 117 118 119 120
            state_in = layers.fc(
                input=pre_state,
                name="state_in",
                size=hidden_size,
                act=None,
                num_flatten_dims=2,
T
tangwei 已提交
121 122 123 124 125 126
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Uniform(
                        low=-stdv, high=stdv)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Uniform(
                        low=-stdv, high=stdv)))  # [batch_size, uniq_max, h]
M
add gnn  
malin10 已提交
127 128 129 130 131 132
            state_out = layers.fc(
                input=pre_state,
                name="state_out",
                size=hidden_size,
                act=None,
                num_flatten_dims=2,
T
tangwei 已提交
133 134 135 136 137 138
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Uniform(
                        low=-stdv, high=stdv)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Uniform(
                        low=-stdv, high=stdv)))  # [batch_size, uniq_max, h]
T
for mat  
tangwei 已提交
139

T
tangwei 已提交
140 141 142 143
            state_adj_in = layers.matmul(self.adj_in,
                                         state_in)  # [batch_size, uniq_max, h]
            state_adj_out = layers.matmul(
                self.adj_out, state_out)  # [batch_size, uniq_max, h]
T
for mat  
tangwei 已提交
144

M
add gnn  
malin10 已提交
145
            gru_input = layers.concat([state_adj_in, state_adj_out], axis=2)
T
for mat  
tangwei 已提交
146

T
tangwei 已提交
147 148 149 150 151 152
            gru_input = layers.reshape(
                x=gru_input, shape=[-1, hidden_size * 2])
            gru_fc = layers.fc(input=gru_input,
                               name="gru_fc",
                               size=3 * hidden_size,
                               bias_attr=False)
M
add gnn  
malin10 已提交
153 154
            pre_state, _, _ = fluid.layers.gru_unit(
                input=gru_fc,
T
tangwei 已提交
155 156
                hidden=layers.reshape(
                    x=pre_state, shape=[-1, hidden_size]),
M
add gnn  
malin10 已提交
157
                size=3 * hidden_size)
T
for mat  
tangwei 已提交
158

M
add gnn  
malin10 已提交
159 160 161
        final_state = layers.reshape(pre_state, shape=[bs, -1, hidden_size])
        seq = layers.gather_nd(final_state, self.seq_index)
        last = layers.gather_nd(final_state, self.last_index)
T
for mat  
tangwei 已提交
162

M
add gnn  
malin10 已提交
163 164 165 166 167 168 169
        seq_fc = layers.fc(
            input=seq,
            name="seq_fc",
            size=hidden_size,
            bias_attr=False,
            act=None,
            num_flatten_dims=2,
T
tangwei 已提交
170 171 172 173 174 175 176 177 178 179 180
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-stdv, high=stdv)))  # [batch_size, seq_max, h]
        last_fc = layers.fc(input=last,
                            name="last_fc",
                            size=hidden_size,
                            bias_attr=False,
                            act=None,
                            num_flatten_dims=1,
                            param_attr=fluid.ParamAttr(
                                initializer=fluid.initializer.Uniform(
                                    low=-stdv, high=stdv)))  # [bathc_size, h]
T
for mat  
tangwei 已提交
181

M
add gnn  
malin10 已提交
182
        seq_fc_t = layers.transpose(
T
for mat  
tangwei 已提交
183
            seq_fc, perm=[1, 0, 2])  # [seq_max, batch_size, h]
T
tangwei 已提交
184 185
        add = layers.elementwise_add(seq_fc_t,
                                     last_fc)  # [seq_max, batch_size, h]
M
add gnn  
malin10 已提交
186 187 188
        b = layers.create_parameter(
            shape=[hidden_size],
            dtype='float32',
T
for mat  
tangwei 已提交
189 190 191 192
            default_initializer=fluid.initializer.Constant(value=0.0))  # [h]
        add = layers.elementwise_add(add, b)  # [seq_max, batch_size, h]

        add_sigmoid = layers.sigmoid(add)  # [seq_max, batch_size, h]
M
add gnn  
malin10 已提交
193
        add_sigmoid = layers.transpose(
T
for mat  
tangwei 已提交
194 195
            add_sigmoid, perm=[1, 0, 2])  # [batch_size, seq_max, h]

M
add gnn  
malin10 已提交
196 197 198 199 200 201 202
        weight = layers.fc(
            input=add_sigmoid,
            name="weight_fc",
            size=1,
            act=None,
            num_flatten_dims=2,
            bias_attr=False,
T
tangwei 已提交
203 204
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-stdv, high=stdv)))  # [batch_size, seq_max, 1]
M
add gnn  
malin10 已提交
205
        weight *= self.mask
T
tangwei 已提交
206 207 208 209
        weight_mask = layers.elementwise_mul(
            seq, weight, axis=0)  # [batch_size, seq_max, h]
        global_attention = layers.reduce_sum(
            weight_mask, dim=1)  # [batch_size, h]
T
for mat  
tangwei 已提交
210

M
add gnn  
malin10 已提交
211
        final_attention = layers.concat(
T
for mat  
tangwei 已提交
212
            [global_attention, last], axis=1)  # [batch_size, 2*h]
M
add gnn  
malin10 已提交
213 214 215 216 217 218 219
        final_attention_fc = layers.fc(
            input=final_attention,
            name="final_attention_fc",
            size=hidden_size,
            bias_attr=False,
            act=None,
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
T
for mat  
tangwei 已提交
220 221 222 223 224 225 226 227
                low=-stdv, high=stdv)))  # [batch_size, h]

        # all_vocab = layers.create_global_var(
        #     shape=[items_num - 1],
        #     value=0,
        #     dtype="int64",
        #     persistable=True,
        #     name="all_vocab")
M
add gnn  
malin10 已提交
228
        all_vocab = np.arange(1, items_num).reshape((-1)).astype('int32')
T
tangwei 已提交
229 230
        all_vocab = fluid.layers.cast(
            x=fluid.layers.assign(all_vocab), dtype='int64')
M
add gnn  
malin10 已提交
231 232 233 234 235 236 237

        all_emb = fluid.embedding(
            input=all_vocab,
            param_attr=fluid.ParamAttr(
                name="emb",
                initializer=fluid.initializer.Uniform(
                    low=-stdv, high=stdv)),
T
for mat  
tangwei 已提交
238 239
            size=[items_num, hidden_size])  # [all_vocab, h]

M
add gnn  
malin10 已提交
240 241
        logits = layers.matmul(
            x=final_attention_fc, y=all_emb,
T
for mat  
tangwei 已提交
242
            transpose_y=True)  # [batch_size, all_vocab]
M
add gnn  
malin10 已提交
243
        softmax = layers.softmax_with_cross_entropy(
T
for mat  
tangwei 已提交
244
            logits=logits, label=self.label)  # [batch_size, 1]
M
add gnn  
malin10 已提交
245 246 247 248 249 250 251 252 253 254 255 256
        self.loss = layers.reduce_mean(softmax)  # [1]
        self.acc = layers.accuracy(input=logits, label=self.label, k=20)

    def avg_loss(self):
        self._cost = self.loss

    def metrics(self):
        self._metrics["LOSS"] = self.loss
        self._metrics["train_acc"] = self.acc

    def train_net(self):
        self.train_input()
T
tangwei 已提交
257 258
        self.net(self.items_num, self.hidden_size, self.step,
                 self.train_batch_size)
M
add gnn  
malin10 已提交
259 260 261 262
        self.avg_loss()
        self.metrics()

    def optimizer(self):
T
tangwei 已提交
263 264
        learning_rate = envs.get_global_env("hyper_parameters.learning_rate",
                                            None, self._namespace)
M
add gnn  
malin10 已提交
265
        step_per_epoch = self.ins_num // self.train_batch_size
T
tangwei 已提交
266 267 268 269
        decay_steps = envs.get_global_env("hyper_parameters.decay_steps", None,
                                          self._namespace)
        decay_rate = envs.get_global_env("hyper_parameters.decay_rate", None,
                                         self._namespace)
M
add gnn  
malin10 已提交
270
        l2 = envs.get_global_env("hyper_parameters.l2", None, self._namespace)
T
for mat  
tangwei 已提交
271
        optimizer = fluid.optimizer.Adam(
M
add gnn  
malin10 已提交
272 273 274 275 276 277 278
            learning_rate=fluid.layers.exponential_decay(
                learning_rate=learning_rate,
                decay_steps=decay_steps * step_per_epoch,
                decay_rate=decay_rate),
            regularization=fluid.regularizer.L2DecayRegularizer(
                regularization_coeff=l2))

T
for mat  
tangwei 已提交
279
        return optimizer
M
add gnn  
malin10 已提交
280 281 282 283

    def infer_input(self):
        self._reader_namespace = "evaluate.reader"
        res = self.input(self.evaluate_batch_size)
T
for mat  
tangwei 已提交
284
        self._infer_data_var = res
M
add gnn  
malin10 已提交
285 286

        self._infer_data_loader = fluid.io.DataLoader.from_generator(
T
tangwei 已提交
287 288 289 290
            feed_list=self._infer_data_var,
            capacity=64,
            use_double_buffer=False,
            iterable=False)
T
for mat  
tangwei 已提交
291

M
add gnn  
malin10 已提交
292
    def infer_net(self):
T
for mat  
tangwei 已提交
293
        self.infer_input()
T
tangwei 已提交
294 295
        self.net(self.items_num, self.hidden_size, self.step,
                 self.evaluate_batch_size)
M
add gnn  
malin10 已提交
296
        self._infer_results['acc'] = self.acc
T
for mat  
tangwei 已提交
297
        self._infer_results['loss'] = self.loss