model.py 4.4 KB
Newer Older
T
tangwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

T
tangwei 已提交
15
import math
T
tangwei 已提交
16

T
tangwei 已提交
17
import paddle.fluid as fluid
T
tangwei 已提交
18

19
from paddlerec.core.utils import envs
C
Chengmo 已提交
20
from paddlerec.core.model import ModelBase
T
tangwei 已提交
21 22


T
tangwei 已提交
23
class Model(ModelBase):
T
tangwei 已提交
24
    def __init__(self, config):
T
tangwei 已提交
25
        ModelBase.__init__(self, config)
T
tangwei 已提交
26

27
    def _init_hyper_parameters(self):
S
seiriosPlus 已提交
28 29 30 31 32 33 34 35 36 37
        self.is_distributed = False
        self.distributed_embedding = False

        if envs.get_fleet_mode().upper() == "PSLIB":
            self.is_distributed = True

        if envs.get_global_env("hyper_parameters.distributed_embedding",
                               0) == 1:
            self.distributed_embedding = True

X
fix  
xjqbest 已提交
38 39 40 41 42
        self.sparse_feature_number = envs.get_global_env(
            "hyper_parameters.sparse_feature_number")
        self.sparse_feature_dim = envs.get_global_env(
            "hyper_parameters.sparse_feature_dim")
        self.learning_rate = envs.get_global_env(
X
test  
xjqbest 已提交
43
            "hyper_parameters.optimizer.learning_rate")
44 45

    def net(self, input, is_infer=False):
X
xujiaqi01 已提交
46
        self.sparse_inputs = self._sparse_data_var[1:]
X
xjqbest 已提交
47
        self.dense_input = self._dense_data_var[0]
X
xujiaqi01 已提交
48
        self.label_input = self._sparse_data_var[0]
T
tangwei 已提交
49

T
tangwei 已提交
50
        def embedding_layer(input):
S
seiriosPlus 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
            if self.distributed_embedding:
                emb = fluid.contrib.layers.sparse_embedding(
                    input=input,
                    size=[
                        self.sparse_feature_number, self.sparse_feature_dim
                    ],
                    param_attr=fluid.ParamAttr(
                        name="SparseFeatFactors",
                        initializer=fluid.initializer.Uniform()))
            else:
                emb = fluid.layers.embedding(
                    input=input,
                    is_sparse=True,
                    is_distributed=self.is_distributed,
                    size=[
                        self.sparse_feature_number, self.sparse_feature_dim
                    ],
                    param_attr=fluid.ParamAttr(
                        name="SparseFeatFactors",
                        initializer=fluid.initializer.Uniform()))
T
tangwei 已提交
71
            emb_sum = fluid.layers.sequence_pool(input=emb, pool_type='sum')
T
tangwei 已提交
72 73 74
            return emb_sum

        sparse_embed_seq = list(map(embedding_layer, self.sparse_inputs))
X
xjqbest 已提交
75 76
        concated = fluid.layers.concat(
            sparse_embed_seq + [self.dense_input], axis=1)
T
tangwei 已提交
77 78

        fcs = [concated]
X
fix  
xjqbest 已提交
79
        hidden_layers = envs.get_global_env("hyper_parameters.fc_sizes")
T
tangwei 已提交
80 81

        for size in hidden_layers:
82 83 84 85 86 87 88 89
            output = fluid.layers.fc(
                input=fcs[-1],
                size=size,
                act='relu',
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Normal(
                        scale=1.0 / math.sqrt(fcs[-1].shape[1]))))
            fcs.append(output)
T
tangwei 已提交
90 91 92 93 94 95

        predict = fluid.layers.fc(
            input=fcs[-1],
            size=2,
            act="softmax",
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
T
tangwei 已提交
96
                scale=1 / math.sqrt(fcs[-1].shape[1]))))
T
tangwei 已提交
97 98 99 100 101

        self.predict = predict

        auc, batch_auc, _ = fluid.layers.auc(input=self.predict,
                                             label=self.label_input,
T
tangwei 已提交
102
                                             num_thresholds=2**12,
T
tangwei 已提交
103
                                             slide_steps=20)
X
fix  
xjqbest 已提交
104 105 106 107 108
        if is_infer:
            self._infer_results["AUC"] = auc
            self._infer_results["BATCH_AUC"] = batch_auc
            return

T
tangwei 已提交
109 110
        self._metrics["AUC"] = auc
        self._metrics["BATCH_AUC"] = batch_auc
X
fix  
xjqbest 已提交
111 112 113 114
        cost = fluid.layers.cross_entropy(
            input=self.predict, label=self.label_input)
        avg_cost = fluid.layers.reduce_mean(cost)
        self._cost = avg_cost
T
tangwei12 已提交
115

T
tangwei 已提交
116
    def optimizer(self):
117
        optimizer = fluid.optimizer.Adam(self.learning_rate, lazy_mode=True)
T
tangwei 已提交
118 119
        return optimizer

T
tangwei 已提交
120
    def infer_net(self):
121
        pass