Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
PaddleRec
提交
56b07f61
P
PaddleRec
项目概览
BaiXuePrincess
/
PaddleRec
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleRec
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleRec
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
56b07f61
编写于
5月 25, 2020
作者:
F
frankwhzhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
merge init_slot into new model style
上级
69d53643
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
52 addition
and
38 deletion
+52
-38
core/model.py
core/model.py
+29
-1
models/rank/dnn/model.py
models/rank/dnn/model.py
+23
-37
未找到文件。
core/model.py
浏览文件 @
56b07f61
...
...
@@ -134,7 +134,35 @@ class Model(object):
return
self
.
_build_optimizer
(
optimizer
,
learning_rate
)
def
input_data
(
self
,
is_infer
=
False
):
return
None
sparse_slots
=
envs
.
get_global_env
(
"sparse_slots"
,
None
,
"train.reader"
)
dense_slots
=
envs
.
get_global_env
(
"dense_slots"
,
None
,
"train.reader"
)
if
sparse_slots
is
not
None
or
dense_slots
is
not
None
:
sparse_slots
=
sparse_slots
.
strip
().
split
(
" "
)
dense_slots
=
dense_slots
.
strip
().
split
(
" "
)
dense_slots_shape
=
[[
int
(
j
)
for
j
in
i
.
split
(
":"
)[
1
].
strip
(
"[]"
).
split
(
","
)
]
for
i
in
dense_slots
]
dense_slots
=
[
i
.
split
(
":"
)[
0
]
for
i
in
dense_slots
]
self
.
_dense_data_var
=
[]
data_var_
=
[]
for
i
in
range
(
len
(
dense_slots
)):
l
=
fluid
.
layers
.
data
(
name
=
dense_slots
[
i
],
shape
=
dense_slots_shape
[
i
],
dtype
=
"float32"
)
data_var_
.
append
(
l
)
self
.
_dense_data_var
.
append
(
l
)
self
.
_sparse_data_var
=
[]
for
name
in
sparse_slots
:
l
=
fluid
.
layers
.
data
(
name
=
name
,
shape
=
[
1
],
lod_level
=
1
,
dtype
=
"int64"
)
data_var_
.
append
(
l
)
self
.
_sparse_data_var
.
append
(
l
)
return
data_var_
else
:
return
None
def
net
(
self
,
is_infer
=
False
):
return
None
...
...
models/rank/dnn/model.py
浏览文件 @
56b07f61
...
...
@@ -24,40 +24,33 @@ class Model(ModelBase):
def
__init__
(
self
,
config
):
ModelBase
.
__init__
(
self
,
config
)
def
input
(
self
):
def
_init_hyper_parameters
(
self
):
self
.
is_distributed
=
True
if
envs
.
get_trainer
(
)
==
"CtrTrainer"
else
False
self
.
sparse_feature_number
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_number"
,
None
,
self
.
_namespace
)
self
.
sparse_feature_dim
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_dim"
,
None
,
self
.
_namespace
)
self
.
learning_rate
=
envs
.
get_global_env
(
"hyper_parameters.learning_rate"
,
None
,
self
.
_namespace
)
def
net
(
self
,
input
,
is_infer
=
False
):
self
.
sparse_inputs
=
self
.
_sparse_data_var
[
1
:]
self
.
dense_input
=
self
.
_dense_data_var
[
0
]
self
.
label_input
=
self
.
_sparse_data_var
[
0
]
def
net
(
self
):
is_distributed
=
True
if
envs
.
get_trainer
()
==
"CtrTrainer"
else
False
sparse_feature_number
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_number"
,
None
,
self
.
_namespace
)
sparse_feature_dim
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_dim"
,
None
,
self
.
_namespace
)
def
embedding_layer
(
input
):
emb
=
fluid
.
layers
.
embedding
(
input
=
input
,
is_sparse
=
True
,
is_distributed
=
is_distributed
,
size
=
[
s
parse_feature_number
,
sparse_feature_dim
],
is_distributed
=
self
.
is_distributed
,
size
=
[
s
elf
.
sparse_feature_number
,
self
.
sparse_feature_dim
],
param_attr
=
fluid
.
ParamAttr
(
name
=
"SparseFeatFactors"
,
initializer
=
fluid
.
initializer
.
Uniform
()),
)
emb_sum
=
fluid
.
layers
.
sequence_pool
(
input
=
emb
,
pool_type
=
'sum'
)
return
emb_sum
def
fc
(
input
,
output_size
):
output
=
fluid
.
layers
.
fc
(
input
=
input
,
size
=
output_size
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
scale
=
1.0
/
math
.
sqrt
(
input
.
shape
[
1
]))))
return
output
sparse_embed_seq
=
list
(
map
(
embedding_layer
,
self
.
sparse_inputs
))
concated
=
fluid
.
layers
.
concat
(
sparse_embed_seq
+
[
self
.
dense_input
],
axis
=
1
)
...
...
@@ -67,7 +60,14 @@ class Model(ModelBase):
self
.
_namespace
)
for
size
in
hidden_layers
:
fcs
.
append
(
fc
(
fcs
[
-
1
],
size
))
output
=
fluid
.
layers
.
fc
(
input
=
fcs
[
-
1
],
size
=
size
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
scale
=
1.0
/
math
.
sqrt
(
fcs
[
-
1
].
shape
[
1
]))))
fcs
.
append
(
output
)
predict
=
fluid
.
layers
.
fc
(
input
=
fcs
[
-
1
],
...
...
@@ -78,13 +78,10 @@ class Model(ModelBase):
self
.
predict
=
predict
def
avg_loss
(
self
):
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
self
.
predict
,
label
=
self
.
label_input
)
avg_cost
=
fluid
.
layers
.
reduce_mean
(
cost
)
self
.
_cost
=
avg_cost
def
metrics
(
self
):
auc
,
batch_auc
,
_
=
fluid
.
layers
.
auc
(
input
=
self
.
predict
,
label
=
self
.
label_input
,
num_thresholds
=
2
**
12
,
...
...
@@ -92,20 +89,9 @@ class Model(ModelBase):
self
.
_metrics
[
"AUC"
]
=
auc
self
.
_metrics
[
"BATCH_AUC"
]
=
batch_auc
def
train_net
(
self
):
self
.
_init_slots
()
self
.
input
()
self
.
net
()
self
.
avg_loss
()
self
.
metrics
()
def
optimizer
(
self
):
learning_rate
=
envs
.
get_global_env
(
"hyper_parameters.learning_rate"
,
None
,
self
.
_namespace
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
,
lazy_mode
=
True
)
optimizer
=
fluid
.
optimizer
.
Adam
(
self
.
learning_rate
,
lazy_mode
=
True
)
return
optimizer
def
infer_net
(
self
):
self
.
_init_slots
()
self
.
input
()
self
.
net
()
pass
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录