trainer_config.py 3.4 KB
Newer Older
Z
zhangjinchao01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle.trainer_config_helpers import *

try:
    import cPickle as pickle
except ImportError:
    import pickle

is_predict = get_config_arg('is_predict', bool, False)

META_FILE = 'data/meta.bin'

with open(META_FILE, 'rb') as f:
    # load meta file
    meta = pickle.load(f)

settings(batch_size=1600, learning_rate=1e-3,
         learning_method=RMSPropOptimizer())


def construct_feature(name):
    """
    Construct movie/user features.

    This method read from meta data. Then convert feature to neural network due
    to feature type. The map relation as follow.

    * id: embedding => fc
    * embedding:
        is_sequence:  embedding => context_projection => fc => pool
        not sequence: embedding => fc
    * one_hot_dense:  fc => fc

    Then gather all features vector, and use a fc layer to combined them as
    return.

    :param name: 'movie' or 'user'
    :type name: basestring
    :return: combined feature output
    :rtype: LayerOutput
    """
    __meta__ = meta[name]['__meta__']['raw_meta']
    fusion = []
    for each_meta in __meta__:
        type_name = each_meta['type']
        slot_name = each_meta.get('name', '%s_id' % name)
        if type_name == 'id':
            slot_dim = each_meta['max']
            embedding = embedding_layer(input=data_layer(slot_name,
                                                          size=slot_dim),
64
                                        size=256)
Z
zhangjinchao01 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
            fusion.append(fc_layer(input=embedding,
                                   size=256))
        elif type_name == 'embedding':
            is_seq = each_meta['seq'] == 'sequence'
            slot_dim = len(each_meta['dict'])
            din = data_layer(slot_name, slot_dim)
            embedding = embedding_layer(input=din, size=256)
            if is_seq:
                fusion.append(
                    text_conv_pool(input=embedding, context_len=5,
                                   hidden_size=256))
            else:
                fusion.append(fc_layer(input=embedding,
                                       size=256))
        elif type_name == 'one_hot_dense':
            slot_dim = len(each_meta['dict'])
            hidden = fc_layer(input=data_layer(slot_name, slot_dim),
                              size=256)
            fusion.append(fc_layer(input=hidden,
                                   size=256))

    return fc_layer(name="%s_fusion" % name, input=fusion, size=256)


movie_feature = construct_feature("movie")
user_feature = construct_feature("user")
similarity = cos_sim(a=movie_feature, b=user_feature)
if not is_predict:
    outputs(regression_cost(input=similarity,
                            label=data_layer('rating', size=1)))

    define_py_data_sources2('data/train.list', 'data/test.list', module='dataprovider',
                           obj='process', args={'meta': meta})
else:
    outputs(similarity)