test_recommender_system.py 6.9 KB
Newer Older
D
dzhwinter 已提交
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import math
import sys
Q
Qiao Longfei 已提交
17
import numpy as np
18
import paddle.v2 as paddle
Q
Qiao Longfei 已提交
19
import paddle.v2.fluid.core as core
20
import paddle.v2.fluid.framework as framework
Q
Qiao Longfei 已提交
21 22
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
Q
Qiao Longfei 已提交
23
from paddle.v2.fluid.executor import Executor
Q
Qiao Longfei 已提交
24
from paddle.v2.fluid.optimizer import SGDOptimizer
25

26 27
IS_SPARSE = True
USE_GPU = False
28 29 30 31 32 33 34 35 36
BATCH_SIZE = 256


def get_usr_combined_features():
    # FIXME(dzh) : old API integer_value(10) may has range check.
    # currently we don't have user configurated check.

    USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1

F
fengjiayi 已提交
37
    uid = layers.data(name='user_id', shape=[1], dtype='int64')
38 39 40

    usr_emb = layers.embedding(
        input=uid,
F
fengjiayi 已提交
41
        dtype='float32',
42
        size=[USR_DICT_SIZE, 32],
Y
Yu Yang 已提交
43
        param_attr='user_table',
44
        is_sparse=IS_SPARSE)
45

Q
Qiao Longfei 已提交
46
    usr_fc = layers.fc(input=usr_emb, size=32)
47 48 49

    USR_GENDER_DICT_SIZE = 2

F
fengjiayi 已提交
50
    usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
51 52 53 54

    usr_gender_emb = layers.embedding(
        input=usr_gender_id,
        size=[USR_GENDER_DICT_SIZE, 16],
Y
Yu Yang 已提交
55
        param_attr='gender_table',
56
        is_sparse=IS_SPARSE)
57

Q
Qiao Longfei 已提交
58
    usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
59 60

    USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
F
fengjiayi 已提交
61
    usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
62 63 64 65

    usr_age_emb = layers.embedding(
        input=usr_age_id,
        size=[USR_AGE_DICT_SIZE, 16],
66
        is_sparse=IS_SPARSE,
Y
Yu Yang 已提交
67
        param_attr='age_table')
68

Q
Qiao Longfei 已提交
69
    usr_age_fc = layers.fc(input=usr_age_emb, size=16)
70 71

    USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
F
fengjiayi 已提交
72
    usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
73 74 75 76

    usr_job_emb = layers.embedding(
        input=usr_job_id,
        size=[USR_JOB_DICT_SIZE, 16],
Y
Yu Yang 已提交
77
        param_attr='job_table',
78
        is_sparse=IS_SPARSE)
79

Q
Qiao Longfei 已提交
80
    usr_job_fc = layers.fc(input=usr_job_emb, size=16)
81 82

    concat_embed = layers.concat(
Q
Qiao Longfei 已提交
83
        input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
84

Q
Qiao Longfei 已提交
85
    usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
86 87 88 89 90 91 92 93

    return usr_combined_features


def get_mov_combined_features():

    MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1

F
fengjiayi 已提交
94
    mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
95 96 97

    mov_emb = layers.embedding(
        input=mov_id,
F
fengjiayi 已提交
98
        dtype='float32',
99
        size=[MOV_DICT_SIZE, 32],
Y
Yu Yang 已提交
100
        param_attr='movie_table',
101
        is_sparse=IS_SPARSE)
102

Q
Qiao Longfei 已提交
103
    mov_fc = layers.fc(input=mov_emb, size=32)
104 105 106

    CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())

F
fengjiayi 已提交
107
    category_id = layers.data(name='category_id', shape=[1], dtype='int64')
108 109

    mov_categories_emb = layers.embedding(
Q
Qiao Longfei 已提交
110
        input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
111 112

    mov_categories_hidden = layers.sequence_pool(
Q
Qiao Longfei 已提交
113
        input=mov_categories_emb, pool_type="sum")
114 115 116

    MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())

F
fengjiayi 已提交
117
    mov_title_id = layers.data(name='movie_title', shape=[1], dtype='int64')
118 119

    mov_title_emb = layers.embedding(
Q
Qiao Longfei 已提交
120
        input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
121 122 123 124 125 126

    mov_title_conv = nets.sequence_conv_pool(
        input=mov_title_emb,
        num_filters=32,
        filter_size=3,
        act="tanh",
127
        pool_type="sum")
128 129

    concat_embed = layers.concat(
Q
Qiao Longfei 已提交
130
        input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
131 132

    # FIXME(dzh) : need tanh operator
Q
Qiao Longfei 已提交
133
    mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
134 135 136 137 138 139 140 141 142

    return mov_combined_features


def model():
    usr_combined_features = get_usr_combined_features()
    mov_combined_features = get_mov_combined_features()

    # need cos sim
Q
Qiao Longfei 已提交
143
    inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
T
typhoonzero 已提交
144
    scale_infer = layers.scale(x=inference, scale=5.0)
145

F
fengjiayi 已提交
146
    label = layers.data(name='score', shape=[1], dtype='float32')
147

T
typhoonzero 已提交
148
    square_cost = layers.square_error_cost(input=scale_infer, label=label)
149

150
    avg_cost = layers.mean(x=square_cost)
151 152 153 154 155 156

    return avg_cost


def main():
    cost = model()
Q
Qiao Longfei 已提交
157
    sgd_optimizer = SGDOptimizer(learning_rate=0.2)
158
    opts = sgd_optimizer.minimize(cost)
159

160
    if USE_GPU:
D
dzhwinter 已提交
161
        place = core.CUDAPlace(0)
162 163 164 165
    else:
        place = core.CPUPlace()

    exe = Executor(place)
166
    exe.run(framework.default_startup_program())
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214

    train_reader = paddle.batch(
        paddle.reader.shuffle(
            paddle.dataset.movielens.train(), buf_size=8192),
        batch_size=BATCH_SIZE)

    feeding = {
        'user_id': 0,
        'gender_id': 1,
        'age_id': 2,
        'job_id': 3,
        'movie_id': 4,
        'category_id': 5,
        'movie_title': 6,
        'score': 7
    }

    def func_feed(feeding, data):
        feed_tensors = {}
        for (key, idx) in feeding.iteritems():
            tensor = core.LoDTensor()
            if key != "category_id" and key != "movie_title":
                if key == "score":
                    numpy_data = np.array(map(lambda x: x[idx], data)).astype(
                        "float32")
                else:
                    numpy_data = np.array(map(lambda x: x[idx], data)).astype(
                        "int64")
            else:
                numpy_data = map(lambda x: np.array(x[idx]).astype("int64"),
                                 data)
                lod_info = [len(item) for item in numpy_data]
                offset = 0
                lod = [offset]
                for item in lod_info:
                    offset += item
                    lod.append(offset)
                numpy_data = np.concatenate(numpy_data, axis=0)
                tensor.set_lod([lod])

            numpy_data = numpy_data.reshape([numpy_data.shape[0], 1])
            tensor.set(numpy_data, place)
            feed_tensors[key] = tensor
        return feed_tensors

    PASS_NUM = 100
    for pass_id in range(PASS_NUM):
        for data in train_reader():
215
            outs = exe.run(framework.default_main_program(),
216 217 218
                           feed=func_feed(feeding, data),
                           fetch_list=[cost])
            out = np.array(outs[0])
219 220
            if out[0] < 6.0:
                # if avg cost less than 6.0, we think our code is good.
221
                exit(0)
222 223
            if math.isnan(float(out[0])):
                sys.exit("got NaN loss, training failed.")
224 225 226


main()