Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
PaddleRec
提交
d74043eb
P
PaddleRec
项目概览
BaiXuePrincess
/
PaddleRec
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleRec
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleRec
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d74043eb
编写于
6月 08, 2020
作者:
Y
yaoxuefeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add fgcnn
上级
e9296e24
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
31 addition
and
94 deletion
+31
-94
models/rank/fgcnn/config.yaml
models/rank/fgcnn/config.yaml
+1
-5
models/rank/fgcnn/model.py
models/rank/fgcnn/model.py
+30
-89
未找到文件。
models/rank/fgcnn/config.yaml
浏览文件 @
d74043eb
...
@@ -43,11 +43,7 @@ hyper_parameters:
...
@@ -43,11 +43,7 @@ hyper_parameters:
filters
:
[
38
,
40
,
42
,
44
]
filters
:
[
38
,
40
,
42
,
44
]
new_filters
:
[
3
,
3
,
3
,
3
]
new_filters
:
[
3
,
3
,
3
,
3
]
pooling_size
:
[
2
,
2
,
2
,
2
]
pooling_size
:
[
2
,
2
,
2
,
2
]
use_dropout
:
False
fc_sizes
:
[
4096
,
2048
]
dropout_prob
:
0.9
fc_sizes
:
[
400
,
400
,
400
]
loss_type
:
"
log_loss"
# log_loss or square_loss
reg
:
0.001
num_field
:
39
num_field
:
39
act
:
"
relu"
act
:
"
relu"
...
...
models/rank/fgcnn/model.py
浏览文件 @
d74043eb
...
@@ -44,16 +44,7 @@ class Model(ModelBase):
...
@@ -44,16 +44,7 @@ class Model(ModelBase):
"hyper_parameters.pooling_size"
,
[
2
,
2
,
2
,
2
])
"hyper_parameters.pooling_size"
,
[
2
,
2
,
2
,
2
])
self
.
new_filters
=
envs
.
get_global_env
(
"hyper_parameters.new_filters"
,
self
.
new_filters
=
envs
.
get_global_env
(
"hyper_parameters.new_filters"
,
[
3
,
3
,
3
,
3
])
[
3
,
3
,
3
,
3
])
self
.
hidden_layers
=
envs
.
get_global_env
(
"hyper_parameters.fc_sizes"
)
self
.
use_dropout
=
envs
.
get_global_env
(
"hyper_parameters.use_dropout"
,
False
)
self
.
dropout_prob
=
envs
.
get_global_env
(
"hyper_parameters.dropout_prob"
,
None
)
self
.
layer_sizes
=
envs
.
get_global_env
(
"hyper_parameters.fc_sizes"
,
None
)
self
.
loss_type
=
envs
.
get_global_env
(
"hyper_parameters.loss_type"
,
'logloss'
)
self
.
reg
=
envs
.
get_global_env
(
"hyper_parameters.reg"
,
1e-4
)
self
.
num_field
=
envs
.
get_global_env
(
"hyper_parameters.num_field"
,
self
.
num_field
=
envs
.
get_global_env
(
"hyper_parameters.num_field"
,
None
)
None
)
self
.
act
=
envs
.
get_global_env
(
"hyper_parameters.act"
,
None
)
self
.
act
=
envs
.
get_global_env
(
"hyper_parameters.act"
,
None
)
...
@@ -95,6 +86,9 @@ class Model(ModelBase):
...
@@ -95,6 +86,9 @@ class Model(ModelBase):
shape
=
[
0
,
1
,
self
.
num_field
,
self
.
sparse_feature_dim
])
shape
=
[
0
,
1
,
self
.
num_field
,
self
.
sparse_feature_dim
])
new_feature_list
=
[]
new_feature_list
=
[]
new_feature_field_num
=
0
new_feature_field_num
=
0
# ------------------------- Feature Generation --------------------------
for
i
in
range
(
len
(
self
.
filters
)):
for
i
in
range
(
len
(
self
.
filters
)):
conv_out
=
fluid
.
layers
.
conv2d
(
conv_out
=
fluid
.
layers
.
conv2d
(
featuer_generation_input
,
featuer_generation_input
,
...
@@ -109,7 +103,6 @@ class Model(ModelBase):
...
@@ -109,7 +103,6 @@ class Model(ModelBase):
pool_stride
=
[
self
.
pooling_size
[
i
],
1
])
pool_stride
=
[
self
.
pooling_size
[
i
],
1
])
pool_out_shape
=
pool_out
.
shape
[
2
]
pool_out_shape
=
pool_out
.
shape
[
2
]
new_feature_field_num
+=
self
.
new_filters
[
i
]
*
pool_out_shape
new_feature_field_num
+=
self
.
new_filters
[
i
]
*
pool_out_shape
print
(
"SHAPE>> {}"
.
format
(
pool_out_shape
))
flat_pool_out
=
fluid
.
layers
.
flatten
(
pool_out
)
flat_pool_out
=
fluid
.
layers
.
flatten
(
pool_out
)
recombination_out
=
fluid
.
layers
.
fc
(
input
=
flat_pool_out
,
recombination_out
=
fluid
.
layers
.
fc
(
input
=
flat_pool_out
,
size
=
self
.
new_filters
[
i
]
*
size
=
self
.
new_filters
[
i
]
*
...
@@ -122,102 +115,50 @@ class Model(ModelBase):
...
@@ -122,102 +115,50 @@ class Model(ModelBase):
new_features_map
=
fluid
.
layers
.
reshape
(
new_features_map
=
fluid
.
layers
.
reshape
(
new_featues
,
new_featues
,
shape
=
[
0
,
new_feature_field_num
,
self
.
sparse_feature_dim
])
shape
=
[
0
,
new_feature_field_num
,
self
.
sparse_feature_dim
])
print
(
"new_feature shape: {}"
.
format
(
new_features_map
.
shape
))
#fluid.layers.Print(new_features_map)
all_features
=
fluid
.
layers
.
concat
(
all_features
=
fluid
.
layers
.
concat
(
[
feat_embeddings
,
new_features_map
],
axis
=
1
)
[
feat_embeddings
,
new_features_map
],
axis
=
1
)
#fluid.layers.Print(all_features)
print
(
"all_feature shape: {}"
.
format
(
all_features
.
shape
))
interaction_list
=
[]
interaction_list
=
[]
fluid
.
layers
.
Print
(
all_features
[:,
0
,
:])
for
i
in
range
(
all_features
.
shape
[
1
]):
for
i
in
range
(
all_features
.
shape
[
1
]):
for
j
in
range
(
i
+
1
,
all_features
.
shape
[
1
]):
for
j
in
range
(
i
+
1
,
all_features
.
shape
[
1
]):
interaction_list
.
append
(
interaction_list
.
append
(
fluid
.
layers
.
reduce_sum
(
fluid
.
layers
.
reduce_sum
(
all_features
[:,
i
,
:]
*
all_features
[:,
j
,
:],
all_features
[:,
i
,
:]
*
all_features
[:,
j
,
:],
dim
=
1
,
dim
=
1
,
keep_dim
=
True
))
keep_dim
=
True
))
new_feature_dnn_input
=
fluid
.
layers
.
concat
(
interaction_list
,
axis
=
1
)
feat_embeddings_dnn_input
=
fluid
.
layers
.
reshape
(
feat_embeddings
,
shape
=
[
0
,
self
.
num_field
*
self
.
sparse_feature_dim
])
dnn_input
=
fluid
.
layers
.
concat
(
[
feat_embeddings_dnn_input
,
new_feature_dnn_input
],
axis
=
1
)
# sum_square part
# ------------------------- DNN --------------------------
summed_features_emb
=
fluid
.
layers
.
reduce_sum
(
feat_embeddings
,
1
)
# batch_size * embedding_size
summed_features_emb_square
=
fluid
.
layers
.
square
(
summed_features_emb
)
# batch_size * embedding_size
# square_sum part
squared_features_emb
=
fluid
.
layers
.
square
(
feat_embeddings
)
# batch_size * num_field * embedding_size
squared_sum_features_emb
=
fluid
.
layers
.
reduce_sum
(
squared_features_emb
,
1
)
# batch_size * embedding_size
y_FM
=
0.5
*
(
summed_features_emb_square
-
squared_sum_features_emb
)
# batch_size * embedding_size
if
self
.
use_batchnorm
:
fcs
=
[
dnn_input
]
y_FM
=
fluid
.
layers
.
batch_norm
(
input
=
y_FM
,
is_test
=
is_infer
)
if
self
.
use_dropout
:
y_FM
=
fluid
.
layers
.
dropout
(
x
=
y_FM
,
dropout_prob
=
self
.
dropout_prob
,
is_test
=
is_infer
)
# ------------------------- DNN --------------------------
for
size
in
self
.
hidden_layers
:
output
=
fluid
.
layers
.
fc
(
input
=
fcs
[
-
1
],
size
=
size
,
act
=
self
.
act
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
scale
=
1.0
/
math
.
sqrt
(
fcs
[
-
1
].
shape
[
1
]))))
fcs
.
append
(
output
)
y_dnn
=
y_FM
predict
=
fluid
.
layers
.
fc
(
for
s
in
self
.
layer_sizes
:
input
=
fcs
[
-
1
],
if
self
.
use_batchnorm
:
y_dnn
=
fluid
.
layers
.
fc
(
input
=
y_dnn
,
size
=
s
,
act
=
self
.
act
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
/
math
.
sqrt
(
float
(
10
)))),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
)))
y_dnn
=
fluid
.
layers
.
batch_norm
(
input
=
y_dnn
,
act
=
self
.
act
,
is_test
=
is_infer
)
else
:
y_dnn
=
fluid
.
layers
.
fc
(
input
=
y_dnn
,
size
=
s
,
act
=
self
.
act
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
/
math
.
sqrt
(
float
(
10
)))),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
)))
if
self
.
use_dropout
:
y_dnn
=
fluid
.
layers
.
dropout
(
x
=
y_dnn
,
dropout_prob
=
self
.
dropout_prob
,
is_test
=
is_infer
)
y_dnn
=
fluid
.
layers
.
fc
(
input
=
y_dnn
,
size
=
1
,
size
=
1
,
act
=
None
,
act
=
"sigmoid"
,
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
scale
=
1
/
math
.
sqrt
(
fcs
[
-
1
].
shape
[
1
]))))
loc
=
0.0
,
scale
=
init_value_
)),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
TruncatedNormalInitializer
(
loc
=
0.0
,
scale
=
init_value_
)))
# ------------------------- Predict --------------------------
# ------------------------- Predict --------------------------
self
.
predict
=
fluid
.
layers
.
sigmoid
(
y_dnn
)
self
.
predict
=
predict
if
self
.
loss_type
==
"squqre_loss"
:
cost
=
fluid
.
layers
.
mse_loss
(
cost
=
fluid
.
layers
.
log_loss
(
input
=
self
.
predict
,
input
=
self
.
predict
,
label
=
fluid
.
layers
.
cast
(
self
.
label
,
"float32"
))
label
=
fluid
.
layers
.
cast
(
self
.
label
,
"float32"
))
else
:
cost
=
fluid
.
layers
.
log_loss
(
input
=
self
.
predict
,
label
=
fluid
.
layers
.
cast
(
self
.
label
,
"float32"
))
# default log_loss
avg_cost
=
fluid
.
layers
.
reduce_sum
(
cost
)
avg_cost
=
fluid
.
layers
.
reduce_sum
(
cost
)
self
.
_cost
=
avg_cost
self
.
_cost
=
avg_cost
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录