未验证 提交 007c73c8 编写于 作者: T tangwei12 提交者: GitHub

Merge branch 'master' into master

...@@ -35,9 +35,10 @@ ...@@ -35,9 +35,10 @@
| :------: | :-----------------------------------------------------------------------: | :---------: | :---------: | :-----------: | | :------: | :-----------------------------------------------------------------------: | :---------: | :---------: | :-----------: |
| 内容理解 | [Text-Classifcation](models/contentunderstanding/classification/model.py) | ✓ | x | ✓ | | 内容理解 | [Text-Classifcation](models/contentunderstanding/classification/model.py) | ✓ | x | ✓ |
| 内容理解 | [TagSpace](models/contentunderstanding/tagspace/model.py) | ✓ | x | ✓ | | 内容理解 | [TagSpace](models/contentunderstanding/tagspace/model.py) | ✓ | x | ✓ |
| 召回 | [DSSM](models/match/dssm/model.py) | ✓ | x | ✓ | | 匹配 | [DSSM](models/match/dssm/model.py) | ✓ | x | ✓ |
| 召回 | [MultiView-Simnet](models/match/multiview-simnet/model.py) | ✓ | x | ✓ | | 匹配 | [MultiView-Simnet](models/match/multiview-simnet/model.py) | ✓ | x | ✓ |
| 召回 | [TDM](models/treebased/tdm/model.py) | ✓ | x | ✓ | | 召回 | [TDM](models/treebased/tdm/model.py) | ✓ | x | ✓ |
| 召回 | [fasttext](models/recall/fasttext/model.py) | ✓ | x | x |
| 召回 | [Word2Vec](models/recall/word2vec/model.py) | ✓ | x | ✓ | | 召回 | [Word2Vec](models/recall/word2vec/model.py) | ✓ | x | ✓ |
| 召回 | [SSR](models/recall/ssr/model.py) | ✓ | ✓ | ✓ | | 召回 | [SSR](models/recall/ssr/model.py) | ✓ | ✓ | ✓ |
| 召回 | [Gru4Rec](models/recall/gru4rec/model.py) | ✓ | ✓ | ✓ | | 召回 | [Gru4Rec](models/recall/gru4rec/model.py) | ✓ | ✓ | ✓ |
......
...@@ -312,13 +312,27 @@ class SingleTrainer(TranspileTrainer): ...@@ -312,13 +312,27 @@ class SingleTrainer(TranspileTrainer):
def load(self, is_fleet=False): def load(self, is_fleet=False):
dirname = envs.get_global_env( dirname = envs.get_global_env(
"runner." + self._runner_name + ".init_model_path", None) "runner." + self._runner_name + ".init_model_path", None)
load_vars = envs.get_global_env(
"runner." + self._runner_name + ".load_vars", None)
def name_has_embedding(var):
res = False
for var_name in load_vars:
if var_name == var.name:
return True
return res
if dirname is None or dirname == "": if dirname is None or dirname == "":
return return
print("going to load ", dirname) print("going to load ", dirname)
if is_fleet: if is_fleet:
fleet.load_persistables(self._exe, dirname) fleet.load_persistables(self._exe, dirname)
else: else:
fluid.io.load_persistables(self._exe, dirname) if load_vars is None or len(load_vars) == 0:
fluid.io.load_persistables(self._exe, dirname)
else:
fluid.io.load_vars(
self._exe, dirname, predicate=name_has_embedding)
def save(self, epoch_id, is_fleet=False): def save(self, epoch_id, is_fleet=False):
def need_save(epoch_id, epoch_interval, is_last=False): def need_save(epoch_id, epoch_interval, is_last=False):
......
...@@ -16,11 +16,10 @@ from paddlerec.core.utils import envs ...@@ -16,11 +16,10 @@ from paddlerec.core.utils import envs
class ValueFormat: class ValueFormat:
def __init__(self, type, value, value_handler): def __init__(self, value_type, value, value_handler):
self.type = type self.value_type = value_type
self.value = value self.value = value
self.value_handler = value_handler self.value_handler = value_handler
self.help = help
def is_valid(self, name, value): def is_valid(self, name, value):
ret = self.is_type_valid(name, value) ret = self.is_type_valid(name, value)
...@@ -31,24 +30,24 @@ class ValueFormat: ...@@ -31,24 +30,24 @@ class ValueFormat:
return ret return ret
def is_type_valid(self, name, value): def is_type_valid(self, name, value):
if self.type == "int": if self.value_type == "int":
if not isinstance(value, int): if not isinstance(value, int):
print("\nattr {} should be int, but {} now\n".format( print("\nattr {} should be int, but {} now\n".format(
name, self.type)) name, self.value_type))
return False return False
return True return True
elif self.type == "str": elif self.value_type == "str":
if not isinstance(value, str): if not isinstance(value, str):
print("\nattr {} should be str, but {} now\n".format( print("\nattr {} should be str, but {} now\n".format(
name, self.type)) name, self.value_type))
return False return False
return True return True
elif self.type == "strs": elif self.value_type == "strs":
if not isinstance(value, list): if not isinstance(value, list):
print("\nattr {} should be list(str), but {} now\n".format( print("\nattr {} should be list(str), but {} now\n".format(
name, self.type)) name, self.value_type))
return False return False
for v in value: for v in value:
if not isinstance(v, str): if not isinstance(v, str):
...@@ -57,10 +56,10 @@ class ValueFormat: ...@@ -57,10 +56,10 @@ class ValueFormat:
return False return False
return True return True
elif self.type == "ints": elif self.value_type == "ints":
if not isinstance(value, list): if not isinstance(value, list):
print("\nattr {} should be list(int), but {} now\n".format( print("\nattr {} should be list(int), but {} now\n".format(
name, self.type)) name, self.value_type))
return False return False
for v in value: for v in value:
if not isinstance(v, int): if not isinstance(v, int):
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# global settings
debug: false
workspace: "paddlerec.models.rank.deep_crossing"
dataset:
- name: train_sample
type: QueueDataset
batch_size: 5
data_path: "{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots: "label feat_idx"
dense_slots: "feat_value:39"
- name: infer_sample
type: QueueDataset
batch_size: 5
data_path: "{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots: "label feat_idx"
dense_slots: "feat_value:39"
hyper_parameters:
# 用户自定义配置
optimizer:
class: SGD
learning_rate: 0.0001
sparse_feature_number: 1086460
sparse_feature_dim: 8
reg: 0.001
num_field: 39
residual_unit_num: 4
residual_w_dim: 128
mode: train_runner
# if infer, change mode to "infer_runner" and change phase to "infer_phase"
runner:
- name: train_runner
trainer_class: single_train
epochs: 1
device: cpu
init_model_path: ""
save_checkpoint_interval: 1
save_inference_interval: 1
save_checkpoint_path: "increment"
save_inference_path: "inference"
print_interval: 1
- name: infer_runner
trainer_class: single_infer
epochs: 1
device: cpu
init_model_path: "increment/0"
print_interval: 1
phase:
- name: phase1
model: "{workspace}/model.py"
dataset_name: train_sample
thread_num: 1
#- name: infer_phase
# model: "{workspace}/model.py"
# dataset_name: infer_sample
# thread_num: 1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import OrderedDict
import paddle.fluid as fluid
from paddlerec.core.utils import envs
from paddlerec.core.model import Model as ModelBase
class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)
def _init_hyper_parameters(self):
self.is_distributed = True if envs.get_trainer(
) == "CtrTrainer" else False
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number", None)
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim", None)
self.reg = envs.get_global_env("hyper_parameters.reg", 1e-4)
self.num_field = envs.get_global_env("hyper_parameters.num_field",
None)
self.residual_unit_num = envs.get_global_env(
"hyper_parameters.residual_unit_num", 1)
self.residual_w_dim = envs.get_global_env(
"hyper_parameters.residual_w_dim", 32)
self.concat_size = self.num_field * (self.sparse_feature_dim + 1)
def resudual_unit(self, x):
inter_layer = fluid.layers.fc(
input=x,
size=self.residual_w_dim,
act='relu',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
scale=1.0 / math.sqrt(self.concat_size))))
output = fluid.layers.fc(
input=inter_layer,
size=self.concat_size,
act=None,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
scale=1.0 / math.sqrt(self.residual_w_dim))))
output = output + x
return fluid.layers.relu6(output, threshold=10000000.0)
def net(self, inputs, is_infer=False):
raw_feat_idx = self._sparse_data_var[1] # (batch_size * num_field) * 1
raw_feat_value = self._dense_data_var[0] # batch_size * num_field
self.label = self._sparse_data_var[0] # batch_size * 1
init_value_ = 0.1
feat_idx = raw_feat_idx
feat_value = fluid.layers.reshape(
raw_feat_value,
[-1, self.num_field, 1]) # batch_size * num_field * 1
# ------------------------- first order term --------------------------
first_weights_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, 1],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0, scale=init_value_),
regularizer=fluid.regularizer.L1DecayRegularizer(self.reg))
) # (batch_size * num_field) * 1 * 1(embedding_size)
first_weights = fluid.layers.reshape(
first_weights_re,
shape=[-1, self.num_field, 1]) # batch_size * num_field * 1
# ------------------------- second order term --------------------------
feat_embeddings_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, self.sparse_feature_dim],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0,
scale=init_value_ /
math.sqrt(float(self.sparse_feature_dim))))
) # (batch_size * num_field) * 1 * embedding_size
feat_embeddings = fluid.layers.reshape(
feat_embeddings_re,
shape=[-1, self.num_field, self.sparse_feature_dim
]) # batch_size * num_field * embedding_size
feat_embeddings = feat_embeddings * feat_value # batch_size * num_field * embedding_size
concated = fluid.layers.concat(
[feat_embeddings, first_weights], axis=2)
concated = fluid.layers.reshape(
concated,
shape=[-1, self.num_field * (self.sparse_feature_dim + 1)])
for _ in range(self.residual_unit_num):
concated = self.resudual_unit(concated)
predict = fluid.layers.fc(
input=concated,
size=1,
act="sigmoid",
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(self.concat_size))))
self.predict = predict
cost = fluid.layers.log_loss(
input=self.predict, label=fluid.layers.cast(self.label,
"float32")) # log_loss
avg_cost = fluid.layers.reduce_sum(cost)
self._cost = avg_cost
predict_2d = fluid.layers.concat([1 - self.predict, self.predict], 1)
label_int = fluid.layers.cast(self.label, 'int64')
auc_var, batch_auc_var, _ = fluid.layers.auc(input=predict_2d,
label=label_int,
slide_steps=0)
self._metrics["AUC"] = auc_var
self._metrics["BATCH_AUC"] = batch_auc_var
if is_infer:
self._infer_results["AUC"] = auc_var
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
# global settings # global settings
debug: false debug: false
workspace: "paddlerec.models.rank.fm" workspace: "paddlerec.models.rank.ffm"
dataset: dataset:
- name: train_sample - name: train_sample
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# global settings
debug: false
workspace: "paddlerec.models.rank.fgcnn"
dataset:
- name: train_sample
type: QueueDataset
batch_size: 5
data_path: "{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots: "label feat_idx"
dense_slots: "feat_value:39"
- name: infer_sample
type: QueueDataset
batch_size: 5
data_path: "{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots: "label feat_idx"
dense_slots: "feat_value:39"
hyper_parameters:
# 用户自定义配置
optimizer:
class: Adam
learning_rate: 0.0001
sparse_feature_number: 1086460
sparse_feature_dim: 9
is_sparse: False
use_batchnorm: False
filters: [38,40,42,44]
new_filters: [3,3,3,3]
pooling_size: [2,2,2,2]
fc_sizes: [4096, 2048]
num_field: 39
act: "relu"
mode: train_runner
# if infer, change mode to "infer_runner" and change phase to "infer_phase"
runner:
- name: train_runner
trainer_class: single_train
epochs: 1
device: cpu
init_model_path: ""
save_checkpoint_interval: 1
save_inference_interval: 1
save_checkpoint_path: "increment"
save_inference_path: "inference"
print_interval: 1
- name: infer_runner
trainer_class: single_infer
epochs: 1
device: cpu
init_model_path: "increment/0"
print_interval: 1
phase:
- name: phase1
model: "{workspace}/model.py"
dataset_name: train_sample
thread_num: 1
#- name: infer_phase
# model: "{workspace}/model.py"
# dataset_name: infer_sample
# thread_num: 1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import OrderedDict
import paddle.fluid as fluid
from paddlerec.core.utils import envs
from paddlerec.core.model import Model as ModelBase
class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)
def _init_hyper_parameters(self):
self.is_distributed = True if envs.get_trainer(
) == "CtrTrainer" else False
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number", None)
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim", None)
self.is_sparse = envs.get_global_env("hyper_parameters.is_sparse",
False)
self.use_batchnorm = envs.get_global_env(
"hyper_parameters.use_batchnorm", False)
self.filters = envs.get_global_env("hyper_parameters.filters",
[38, 40, 42, 44])
self.filter_size = envs.get_global_env("hyper_parameters.filter_size",
[1, 9])
self.pooling_size = envs.get_global_env(
"hyper_parameters.pooling_size", [2, 2, 2, 2])
self.new_filters = envs.get_global_env("hyper_parameters.new_filters",
[3, 3, 3, 3])
self.hidden_layers = envs.get_global_env("hyper_parameters.fc_sizes")
self.num_field = envs.get_global_env("hyper_parameters.num_field",
None)
self.act = envs.get_global_env("hyper_parameters.act", None)
def net(self, inputs, is_infer=False):
raw_feat_idx = self._sparse_data_var[1] # (batch_size * num_field) * 1
raw_feat_value = self._dense_data_var[0] # batch_size * num_field
self.label = self._sparse_data_var[0] # batch_size * 1
init_value_ = 0.1
feat_idx = raw_feat_idx
feat_value = fluid.layers.reshape(
raw_feat_value,
[-1, self.num_field, 1]) # batch_size * num_field * 1
# ------------------------- Embedding layers --------------------------
feat_embeddings_re = fluid.embedding(
input=feat_idx,
is_sparse=self.is_sparse,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, self.sparse_feature_dim],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0,
scale=init_value_ /
math.sqrt(float(self.sparse_feature_dim))))
) # (batch_size * num_field) * 1 * embedding_size
feat_embeddings = fluid.layers.reshape(
feat_embeddings_re,
shape=[-1, self.num_field, self.sparse_feature_dim
]) # batch_size * num_field * embedding_size
feat_embeddings = feat_embeddings * feat_value # batch_size * num_field * embedding_size
featuer_generation_input = fluid.layers.reshape(
feat_embeddings,
shape=[0, 1, self.num_field, self.sparse_feature_dim])
new_feature_list = []
new_feature_field_num = 0
# ------------------------- Feature Generation --------------------------
for i in range(len(self.filters)):
conv_out = fluid.layers.conv2d(
featuer_generation_input,
num_filters=self.filters[i],
filter_size=self.filter_size,
padding="SAME",
act="tanh")
pool_out = fluid.layers.pool2d(
conv_out,
pool_size=[self.pooling_size[i], 1],
pool_type="max",
pool_stride=[self.pooling_size[i], 1])
pool_out_shape = pool_out.shape[2]
new_feature_field_num += self.new_filters[i] * pool_out_shape
flat_pool_out = fluid.layers.flatten(pool_out)
recombination_out = fluid.layers.fc(input=flat_pool_out,
size=self.new_filters[i] *
self.sparse_feature_dim *
pool_out_shape,
act='tanh')
new_feature_list.append(recombination_out)
featuer_generation_input = pool_out
new_featues = fluid.layers.concat(new_feature_list, axis=1)
new_features_map = fluid.layers.reshape(
new_featues,
shape=[0, new_feature_field_num, self.sparse_feature_dim])
all_features = fluid.layers.concat(
[feat_embeddings, new_features_map], axis=1)
interaction_list = []
for i in range(all_features.shape[1]):
for j in range(i + 1, all_features.shape[1]):
interaction_list.append(
fluid.layers.reduce_sum(
all_features[:, i, :] * all_features[:, j, :],
dim=1,
keep_dim=True))
new_feature_dnn_input = fluid.layers.concat(interaction_list, axis=1)
feat_embeddings_dnn_input = fluid.layers.reshape(
feat_embeddings,
shape=[0, self.num_field * self.sparse_feature_dim])
dnn_input = fluid.layers.concat(
[feat_embeddings_dnn_input, new_feature_dnn_input], axis=1)
# ------------------------- DNN --------------------------
fcs = [dnn_input]
for size in self.hidden_layers:
output = fluid.layers.fc(
input=fcs[-1],
size=size,
act=self.act,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1.0 / math.sqrt(fcs[-1].shape[1]))))
fcs.append(output)
predict = fluid.layers.fc(
input=fcs[-1],
size=1,
act="sigmoid",
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fcs[-1].shape[1]))))
# ------------------------- Predict --------------------------
self.predict = predict
cost = fluid.layers.log_loss(
input=self.predict, label=fluid.layers.cast(self.label, "float32"))
avg_cost = fluid.layers.reduce_sum(cost)
self._cost = avg_cost
predict_2d = fluid.layers.concat([1 - self.predict, self.predict], 1)
label_int = fluid.layers.cast(self.label, 'int64')
auc_var, batch_auc_var, _ = fluid.layers.auc(input=predict_2d,
label=label_int,
slide_steps=0)
self._metrics["AUC"] = auc_var
self._metrics["BATCH_AUC"] = batch_auc_var
if is_infer:
self._infer_results["AUC"] = auc_var
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# global settings
debug: false
workspace: "paddlerec.models.rank.fnn"
dataset:
- name: train_sample
type: QueueDataset
batch_size: 5
data_path: "{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots: "label feat_idx"
dense_slots: "feat_value:39"
- name: infer_sample
type: QueueDataset
batch_size: 5
data_path: "{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots: "label feat_idx"
dense_slots: "feat_value:39"
hyper_parameters:
# 用户自定义配置
optimizer:
class: SGD
learning_rate: 0.0001
sparse_feature_number: 1086460
sparse_feature_dim: 9
reg: 0.001
num_field: 39
fc_sizes: [512, 256, 128, 32]
mode: train_FM_runner #for FM phase: train_FM_runner for dnn phase: train_DNN_runner
# if infer, change mode to "infer_runner" and change phase to "infer_phase"
runner:
- name: train_FM_runner
trainer_class: single_train
epochs: 1
device: cpu
init_model_path: ""
save_checkpoint_interval: 1
save_inference_interval: 1
save_checkpoint_path: "increment"
save_inference_path: "inference"
print_interval: 1
- name: train_DNN_runner
trainer_class: single_train
epochs: 1
device: cpu
init_model_path: "increment/0"
load_vars: ["embedding_1.w_0", "embedding_0.w_0"]
save_checkpoint_interval: 1
save_inference_interval: 1
save_checkpoint_path: "increment_fnn"
save_inference_path: "inference_fnn"
print_interval: 1
- name: infer_runner
trainer_class: single_infer
epochs: 1
device: cpu
init_model_path: "increment/0"
print_interval: 1
phase:
- name: phase1
model: "{workspace}/fm_model.py" # for FM phase: fm_model.py for dnn phase model.py
dataset_name: train_sample
thread_num: 1
#- name: infer_phase
# model: "{workspace}/model.py"
# dataset_name: infer_sample
# thread_num: 1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import OrderedDict
import paddle.fluid as fluid
from paddlerec.core.utils import envs
from paddlerec.core.model import Model as ModelBase
class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)
def _init_hyper_parameters(self):
self.is_distributed = True if envs.get_trainer(
) == "CtrTrainer" else False
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number", None)
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim", None)
self.reg = envs.get_global_env("hyper_parameters.reg", 1e-4)
self.num_field = envs.get_global_env("hyper_parameters.num_field",
None)
def net(self, inputs, is_infer=False):
raw_feat_idx = self._sparse_data_var[1] # (batch_size * num_field) * 1
raw_feat_value = self._dense_data_var[0] # batch_size * num_field
self.label = self._sparse_data_var[0] # batch_size * 1
init_value_ = 0.1
feat_idx = raw_feat_idx
feat_value = fluid.layers.reshape(
raw_feat_value,
[-1, self.num_field, 1]) # batch_size * num_field * 1
# ------------------------- first order term --------------------------
first_weights_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, 1],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0, scale=init_value_),
regularizer=fluid.regularizer.L1DecayRegularizer(self.reg))
) # (batch_size * num_field) * 1 * 1(embedding_size)
first_weights = fluid.layers.reshape(
first_weights_re,
shape=[-1, self.num_field, 1]) # batch_size * num_field * 1
y_first_order = fluid.layers.reduce_sum((first_weights * feat_value),
1) # batch_size * 1
b_linear = fluid.layers.create_parameter(
shape=[1],
dtype='float32',
default_initializer=fluid.initializer.ConstantInitializer(
value=0)) # 1
# ------------------------- second order term --------------------------
feat_embeddings_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, self.sparse_feature_dim],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0,
scale=init_value_ /
math.sqrt(float(self.sparse_feature_dim))))
) # (batch_size * num_field) * 1 * embedding_size
feat_embeddings = fluid.layers.reshape(
feat_embeddings_re,
shape=[-1, self.num_field, self.sparse_feature_dim
]) # batch_size * num_field * embedding_size
feat_embeddings = feat_embeddings * feat_value # batch_size * num_field * embedding_size
# sum_square part
summed_features_emb = fluid.layers.reduce_sum(
feat_embeddings, 1) # batch_size * embedding_size
summed_features_emb_square = fluid.layers.square(
summed_features_emb) # batch_size * embedding_size
# square_sum part
squared_features_emb = fluid.layers.square(
feat_embeddings) # batch_size * num_field * embedding_size
squared_sum_features_emb = fluid.layers.reduce_sum(
squared_features_emb, 1) # batch_size * embedding_size
y_FM = 0.5 * fluid.layers.reduce_sum(
summed_features_emb_square - squared_sum_features_emb,
dim=1,
keep_dim=True) # batch_size * 1
# ------------------------- Predict --------------------------
self.predict = fluid.layers.sigmoid(b_linear + y_first_order + y_FM)
cost = fluid.layers.log_loss(
input=self.predict, label=fluid.layers.cast(self.label,
"float32")) # log_loss
avg_cost = fluid.layers.reduce_sum(cost)
self._cost = avg_cost
predict_2d = fluid.layers.concat([1 - self.predict, self.predict], 1)
label_int = fluid.layers.cast(self.label, 'int64')
auc_var, batch_auc_var, _ = fluid.layers.auc(input=predict_2d,
label=label_int,
slide_steps=0)
self._metrics["AUC"] = auc_var
self._metrics["BATCH_AUC"] = batch_auc_var
if is_infer:
self._infer_results["AUC"] = auc_var
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import OrderedDict
import paddle.fluid as fluid
from paddlerec.core.utils import envs
from paddlerec.core.model import Model as ModelBase
class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)
def _init_hyper_parameters(self):
self.is_distributed = True if envs.get_trainer(
) == "CtrTrainer" else False
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number", None)
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim", None)
self.reg = envs.get_global_env("hyper_parameters.reg", 1e-4)
self.num_field = envs.get_global_env("hyper_parameters.num_field",
None)
def net(self, inputs, is_infer=False):
raw_feat_idx = self._sparse_data_var[1] # (batch_size * num_field) * 1
raw_feat_value = self._dense_data_var[0] # batch_size * num_field
self.label = self._sparse_data_var[0] # batch_size * 1
init_value_ = 0.1
feat_idx = raw_feat_idx
feat_value = fluid.layers.reshape(
raw_feat_value,
[-1, self.num_field, 1]) # batch_size * num_field * 1
# ------------------------- first order term --------------------------
first_weights_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, 1],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0, scale=init_value_),
regularizer=fluid.regularizer.L1DecayRegularizer(self.reg))
) # (batch_size * num_field) * 1 * 1(embedding_size)
first_weights = fluid.layers.reshape(
first_weights_re,
shape=[-1, self.num_field, 1]) # batch_size * num_field * 1
# ------------------------- second order term --------------------------
feat_embeddings_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, self.sparse_feature_dim],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0,
scale=init_value_ /
math.sqrt(float(self.sparse_feature_dim))))
) # (batch_size * num_field) * 1 * embedding_size
feat_embeddings = fluid.layers.reshape(
feat_embeddings_re,
shape=[-1, self.num_field, self.sparse_feature_dim
]) # batch_size * num_field * embedding_size
feat_embeddings = feat_embeddings * feat_value # batch_size * num_field * embedding_size
concated = fluid.layers.concat(
[feat_embeddings, first_weights], axis=2)
concated = fluid.layers.reshape(
concated,
shape=[-1, self.num_field * (self.sparse_feature_dim + 1)])
fcs = [concated]
hidden_layers = envs.get_global_env("hyper_parameters.fc_sizes")
for size in hidden_layers:
output = fluid.layers.fc(
input=fcs[-1],
size=size,
act='relu',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1.0 / math.sqrt(fcs[-1].shape[1]))))
fcs.append(output)
predict = fluid.layers.fc(
input=fcs[-1],
size=1,
act="sigmoid",
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fcs[-1].shape[1]))))
self.predict = predict
cost = fluid.layers.log_loss(
input=self.predict, label=fluid.layers.cast(self.label,
"float32")) # log_loss
avg_cost = fluid.layers.reduce_sum(cost)
self._cost = avg_cost
predict_2d = fluid.layers.concat([1 - self.predict, self.predict], 1)
label_int = fluid.layers.cast(self.label, 'int64')
auc_var, batch_auc_var, _ = fluid.layers.auc(input=predict_2d,
label=label_int,
slide_steps=0)
self._metrics["AUC"] = auc_var
self._metrics["BATCH_AUC"] = batch_auc_var
if is_infer:
self._infer_results["AUC"] = auc_var
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# global settings # global settings
debug: false debug: false
workspace: "paddlerec.models.rank.deepfm" workspace: "paddlerec.models.rank.logistic_regression"
dataset: dataset:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
# global settings # global settings
debug: false debug: false
workspace: "paddlerec.models.rank.nfm" workspace: "paddlerec.models.rank.pnn"
dataset: dataset:
- name: train_sample - name: train_sample
......
# 排序模型库 # 排序模型库
## 简介 ## 简介
我们提供了常见的排序任务中使用的模型算法的PaddleRec实现, 单机训练&预测效果指标以及分布式训练&预测性能指标等。实现的排序模型包括 [多层神经网络](dnn)[Deep Cross Network](dcn)[DeepFM](deepfm)[xDeepFM](xdeepfm)[Deep Interest Network](din)[Wide&Deep](wide_deep) 我们提供了常见的排序任务中使用的模型算法的PaddleRec实现, 单机训练&预测效果指标以及分布式训练&预测性能指标等。实现的排序模型包括 [logistic regression](logistic_regression)[多层神经网络](dnn)[FM](fm)[FFM](ffm)[PNN](pnn)[多层神经网络](dnn)[Deep Cross Network](dcn)[DeepFM](deepfm)[xDeepFM](xdeepfm)[NFM](nfm)[AFM](afm)[Deep Interest Network](din)[Wide&Deep](wide_deep)
模型算法库在持续添加中,欢迎关注。 模型算法库在持续添加中,欢迎关注。
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
| DNN | 多层神经网络 | -- | | DNN | 多层神经网络 | -- |
| Logistic Regression | 逻辑回归 | -- | | Logistic Regression | 逻辑回归 | -- |
| FM | 因子分解机 | [Factorization Machine](https://ieeexplore.ieee.org/abstract/document/5694074)(2010) | | FM | 因子分解机 | [Factorization Machine](https://ieeexplore.ieee.org/abstract/document/5694074)(2010) |
| FFM | Field-Aware FM | [Field-aware Factorization Machines for CTR Prediction](https://dl.acm.org/doi/pdf/10.1145/2959100.2959134)(2016) |
| PNN | Product Network | [Product-based Neural Networks for User Response Prediction](https://arxiv.org/pdf/1611.00144.pdf)(2016) | | PNN | Product Network | [Product-based Neural Networks for User Response Prediction](https://arxiv.org/pdf/1611.00144.pdf)(2016) |
| wide&deep | Deep + wide(LR) | [Wide & Deep Learning for Recommender Systems](https://dl.acm.org/doi/pdf/10.1145/2988450.2988454)(2016) | | wide&deep | Deep + wide(LR) | [Wide & Deep Learning for Recommender Systems](https://dl.acm.org/doi/pdf/10.1145/2988450.2988454)(2016) |
| DeepFM | DeepFM | [DeepFM: A Factorization-Machine based Neural Network for CTR Prediction](https://arxiv.org/pdf/1703.04247.pdf)(2017) | | DeepFM | DeepFM | [DeepFM: A Factorization-Machine based Neural Network for CTR Prediction](https://arxiv.org/pdf/1703.04247.pdf)(2017) |
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册