Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleRec
提交
e9296e24
P
PaddleRec
项目概览
PaddlePaddle
/
PaddleRec
通知
68
Star
12
Fork
5
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
27
列表
看板
标记
里程碑
合并请求
10
Wiki
1
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleRec
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
27
Issue
27
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
1
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e9296e24
编写于
6月 08, 2020
作者:
Y
yaoxuefeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
tmp add fgcnn for bak
上级
3d790948
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
329 addition
and
0 deletion
+329
-0
models/rank/fgcnn/__init__.py
models/rank/fgcnn/__init__.py
+13
-0
models/rank/fgcnn/config.yaml
models/rank/fgcnn/config.yaml
+83
-0
models/rank/fgcnn/model.py
models/rank/fgcnn/model.py
+233
-0
未找到文件。
models/rank/fgcnn/__init__.py
0 → 100755
浏览文件 @
e9296e24
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
models/rank/fgcnn/config.yaml
0 → 100755
浏览文件 @
e9296e24
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# global settings
debug
:
false
workspace
:
"
paddlerec.models.rank.fgcnn"
dataset
:
-
name
:
train_sample
type
:
QueueDataset
batch_size
:
5
data_path
:
"
{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots
:
"
label
feat_idx"
dense_slots
:
"
feat_value:39"
-
name
:
infer_sample
type
:
QueueDataset
batch_size
:
5
data_path
:
"
{workspace}/../dataset/Criteo_data/sample_data/train"
sparse_slots
:
"
label
feat_idx"
dense_slots
:
"
feat_value:39"
hyper_parameters
:
# 用户自定义配置
optimizer
:
class
:
Adam
learning_rate
:
0.0001
sparse_feature_number
:
1086460
sparse_feature_dim
:
9
is_sparse
:
False
use_batchnorm
:
False
filters
:
[
38
,
40
,
42
,
44
]
new_filters
:
[
3
,
3
,
3
,
3
]
pooling_size
:
[
2
,
2
,
2
,
2
]
use_dropout
:
False
dropout_prob
:
0.9
fc_sizes
:
[
400
,
400
,
400
]
loss_type
:
"
log_loss"
# log_loss or square_loss
reg
:
0.001
num_field
:
39
act
:
"
relu"
mode
:
train_runner
# if infer, change mode to "infer_runner" and change phase to "infer_phase"
runner
:
-
name
:
train_runner
trainer_class
:
single_train
epochs
:
1
device
:
cpu
init_model_path
:
"
"
save_checkpoint_interval
:
1
save_inference_interval
:
1
save_checkpoint_path
:
"
increment"
save_inference_path
:
"
inference"
print_interval
:
1
-
name
:
infer_runner
trainer_class
:
single_infer
epochs
:
1
device
:
cpu
init_model_path
:
"
increment/0"
print_interval
:
1
phase
:
-
name
:
phase1
model
:
"
{workspace}/model.py"
dataset_name
:
train_sample
thread_num
:
1
#- name: infer_phase
# model: "{workspace}/model.py"
# dataset_name: infer_sample
# thread_num: 1
models/rank/fgcnn/model.py
0 → 100755
浏览文件 @
e9296e24
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
from
collections
import
OrderedDict
import
paddle.fluid
as
fluid
from
paddlerec.core.utils
import
envs
from
paddlerec.core.model
import
Model
as
ModelBase
class
Model
(
ModelBase
):
def
__init__
(
self
,
config
):
ModelBase
.
__init__
(
self
,
config
)
def
_init_hyper_parameters
(
self
):
self
.
is_distributed
=
True
if
envs
.
get_trainer
(
)
==
"CtrTrainer"
else
False
self
.
sparse_feature_number
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_number"
,
None
)
self
.
sparse_feature_dim
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_dim"
,
None
)
self
.
is_sparse
=
envs
.
get_global_env
(
"hyper_parameters.is_sparse"
,
False
)
self
.
use_batchnorm
=
envs
.
get_global_env
(
"hyper_parameters.use_batchnorm"
,
False
)
self
.
filters
=
envs
.
get_global_env
(
"hyper_parameters.filters"
,
[
38
,
40
,
42
,
44
])
self
.
filter_size
=
envs
.
get_global_env
(
"hyper_parameters.filter_size"
,
[
1
,
9
])
self
.
pooling_size
=
envs
.
get_global_env
(
"hyper_parameters.pooling_size"
,
[
2
,
2
,
2
,
2
])
self
.
new_filters
=
envs
.
get_global_env
(
"hyper_parameters.new_filters"
,
[
3
,
3
,
3
,
3
])
self
.
use_dropout
=
envs
.
get_global_env
(
"hyper_parameters.use_dropout"
,
False
)
self
.
dropout_prob
=
envs
.
get_global_env
(
"hyper_parameters.dropout_prob"
,
None
)
self
.
layer_sizes
=
envs
.
get_global_env
(
"hyper_parameters.fc_sizes"
,
None
)
self
.
loss_type
=
envs
.
get_global_env
(
"hyper_parameters.loss_type"
,
'logloss'
)
self
.
reg
=
envs
.
get_global_env
(
"hyper_parameters.reg"
,
1e-4
)
self
.
num_field
=
envs
.
get_global_env
(
"hyper_parameters.num_field"
,
None
)
self
.
act
=
envs
.
get_global_env
(
"hyper_parameters.act"
,
None
)
def
net
(
self
,
inputs
,
is_infer
=
False
):
raw_feat_idx
=
self
.
_sparse_data_var
[
1
]
# (batch_size * num_field) * 1
raw_feat_value
=
self
.
_dense_data_var
[
0
]
# batch_size * num_field
self
.
label
=
self
.
_sparse_data_var
[
0
]
# batch_size * 1
init_value_
=
0.1
feat_idx
=
raw_feat_idx
feat_value
=
fluid
.
layers
.
reshape
(
raw_feat_value
,
[
-
1
,
self
.
num_field
,
1
])
# batch_size * num_field * 1
# ------------------------- Embedding layers --------------------------
feat_embeddings_re
=
fluid
.
embedding
(
input
=
feat_idx
,
is_sparse
=
self
.
is_sparse
,
is_distributed
=
self
.
is_distributed
,
dtype
=
'float32'
,
size
=
[
self
.
sparse_feature_number
+
1
,
self
.
sparse_feature_dim
],
padding_idx
=
0
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
/
math
.
sqrt
(
float
(
self
.
sparse_feature_dim
))))
)
# (batch_size * num_field) * 1 * embedding_size
feat_embeddings
=
fluid
.
layers
.
reshape
(
feat_embeddings_re
,
shape
=
[
-
1
,
self
.
num_field
,
self
.
sparse_feature_dim
])
# batch_size * num_field * embedding_size
feat_embeddings
=
feat_embeddings
*
feat_value
# batch_size * num_field * embedding_size
featuer_generation_input
=
fluid
.
layers
.
reshape
(
feat_embeddings
,
shape
=
[
0
,
1
,
self
.
num_field
,
self
.
sparse_feature_dim
])
new_feature_list
=
[]
new_feature_field_num
=
0
for
i
in
range
(
len
(
self
.
filters
)):
conv_out
=
fluid
.
layers
.
conv2d
(
featuer_generation_input
,
num_filters
=
self
.
filters
[
i
],
filter_size
=
self
.
filter_size
,
padding
=
"SAME"
,
act
=
"tanh"
)
pool_out
=
fluid
.
layers
.
pool2d
(
conv_out
,
pool_size
=
[
self
.
pooling_size
[
i
],
1
],
pool_type
=
"max"
,
pool_stride
=
[
self
.
pooling_size
[
i
],
1
])
pool_out_shape
=
pool_out
.
shape
[
2
]
new_feature_field_num
+=
self
.
new_filters
[
i
]
*
pool_out_shape
print
(
"SHAPE>> {}"
.
format
(
pool_out_shape
))
flat_pool_out
=
fluid
.
layers
.
flatten
(
pool_out
)
recombination_out
=
fluid
.
layers
.
fc
(
input
=
flat_pool_out
,
size
=
self
.
new_filters
[
i
]
*
self
.
sparse_feature_dim
*
pool_out_shape
,
act
=
'tanh'
)
new_feature_list
.
append
(
recombination_out
)
featuer_generation_input
=
pool_out
new_featues
=
fluid
.
layers
.
concat
(
new_feature_list
,
axis
=
1
)
new_features_map
=
fluid
.
layers
.
reshape
(
new_featues
,
shape
=
[
0
,
new_feature_field_num
,
self
.
sparse_feature_dim
])
print
(
"new_feature shape: {}"
.
format
(
new_features_map
.
shape
))
#fluid.layers.Print(new_features_map)
all_features
=
fluid
.
layers
.
concat
(
[
feat_embeddings
,
new_features_map
],
axis
=
1
)
#fluid.layers.Print(all_features)
print
(
"all_feature shape: {}"
.
format
(
all_features
.
shape
))
interaction_list
=
[]
fluid
.
layers
.
Print
(
all_features
[:,
0
,
:])
for
i
in
range
(
all_features
.
shape
[
1
]):
for
j
in
range
(
i
+
1
,
all_features
.
shape
[
1
]):
interaction_list
.
append
(
fluid
.
layers
.
reduce_sum
(
all_features
[:,
i
,
:]
*
all_features
[:,
j
,
:],
dim
=
1
,
keep_dim
=
True
))
# sum_square part
summed_features_emb
=
fluid
.
layers
.
reduce_sum
(
feat_embeddings
,
1
)
# batch_size * embedding_size
summed_features_emb_square
=
fluid
.
layers
.
square
(
summed_features_emb
)
# batch_size * embedding_size
# square_sum part
squared_features_emb
=
fluid
.
layers
.
square
(
feat_embeddings
)
# batch_size * num_field * embedding_size
squared_sum_features_emb
=
fluid
.
layers
.
reduce_sum
(
squared_features_emb
,
1
)
# batch_size * embedding_size
y_FM
=
0.5
*
(
summed_features_emb_square
-
squared_sum_features_emb
)
# batch_size * embedding_size
if
self
.
use_batchnorm
:
y_FM
=
fluid
.
layers
.
batch_norm
(
input
=
y_FM
,
is_test
=
is_infer
)
if
self
.
use_dropout
:
y_FM
=
fluid
.
layers
.
dropout
(
x
=
y_FM
,
dropout_prob
=
self
.
dropout_prob
,
is_test
=
is_infer
)
# ------------------------- DNN --------------------------
y_dnn
=
y_FM
for
s
in
self
.
layer_sizes
:
if
self
.
use_batchnorm
:
y_dnn
=
fluid
.
layers
.
fc
(
input
=
y_dnn
,
size
=
s
,
act
=
self
.
act
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
/
math
.
sqrt
(
float
(
10
)))),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
)))
y_dnn
=
fluid
.
layers
.
batch_norm
(
input
=
y_dnn
,
act
=
self
.
act
,
is_test
=
is_infer
)
else
:
y_dnn
=
fluid
.
layers
.
fc
(
input
=
y_dnn
,
size
=
s
,
act
=
self
.
act
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
/
math
.
sqrt
(
float
(
10
)))),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
)))
if
self
.
use_dropout
:
y_dnn
=
fluid
.
layers
.
dropout
(
x
=
y_dnn
,
dropout_prob
=
self
.
dropout_prob
,
is_test
=
is_infer
)
y_dnn
=
fluid
.
layers
.
fc
(
input
=
y_dnn
,
size
=
1
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
)),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
)))
# ------------------------- Predict --------------------------
self
.
predict
=
fluid
.
layers
.
sigmoid
(
y_dnn
)
if
self
.
loss_type
==
"squqre_loss"
:
cost
=
fluid
.
layers
.
mse_loss
(
input
=
self
.
predict
,
label
=
fluid
.
layers
.
cast
(
self
.
label
,
"float32"
))
else
:
cost
=
fluid
.
layers
.
log_loss
(
input
=
self
.
predict
,
label
=
fluid
.
layers
.
cast
(
self
.
label
,
"float32"
))
# default log_loss
avg_cost
=
fluid
.
layers
.
reduce_sum
(
cost
)
self
.
_cost
=
avg_cost
predict_2d
=
fluid
.
layers
.
concat
([
1
-
self
.
predict
,
self
.
predict
],
1
)
label_int
=
fluid
.
layers
.
cast
(
self
.
label
,
'int64'
)
auc_var
,
batch_auc_var
,
_
=
fluid
.
layers
.
auc
(
input
=
predict_2d
,
label
=
label_int
,
slide_steps
=
0
)
self
.
_metrics
[
"AUC"
]
=
auc_var
self
.
_metrics
[
"BATCH_AUC"
]
=
batch_auc_var
if
is_infer
:
self
.
_infer_results
[
"AUC"
]
=
auc_var
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录