Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleRec
提交
bfce2242
P
PaddleRec
项目概览
PaddlePaddle
/
PaddleRec
通知
68
Star
12
Fork
5
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
27
列表
看板
标记
里程碑
合并请求
10
Wiki
1
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleRec
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
27
Issue
27
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
1
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
bfce2242
编写于
5月 20, 2020
作者:
T
tangwei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix code style
上级
988a26aa
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
69 addition
and
45 deletion
+69
-45
core/engine/cluster/cluster.py
core/engine/cluster/cluster.py
+3
-1
core/engine/local_cluster.py
core/engine/local_cluster.py
+11
-6
core/engine/local_mpi.py
core/engine/local_mpi.py
+5
-3
core/factory.py
core/factory.py
+15
-16
core/metrics/auc_metrics.py
core/metrics/auc_metrics.py
+11
-6
core/model.py
core/model.py
+24
-13
未找到文件。
core/engine/cluster/cluster.py
浏览文件 @
bfce2242
...
@@ -27,6 +27,7 @@ from paddlerec.core.utils import envs
...
@@ -27,6 +27,7 @@ from paddlerec.core.utils import envs
class
ClusterEngine
(
Engine
):
class
ClusterEngine
(
Engine
):
def
__init_impl__
(
self
):
def
__init_impl__
(
self
):
abs_dir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
abs_dir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
backend
=
envs
.
get_runtime_environ
(
"engine_backend"
)
backend
=
envs
.
get_runtime_environ
(
"engine_backend"
)
if
backend
==
"PaddleCloud"
:
if
backend
==
"PaddleCloud"
:
self
.
submit_script
=
os
.
path
.
join
(
abs_dir
,
"cloud/cluster.sh"
)
self
.
submit_script
=
os
.
path
.
join
(
abs_dir
,
"cloud/cluster.sh"
)
...
@@ -57,4 +58,5 @@ class ClusterEngine(Engine):
...
@@ -57,4 +58,5 @@ class ClusterEngine(Engine):
self
.
start_worker_procs
()
self
.
start_worker_procs
()
else
:
else
:
raise
ValueError
(
"role {} error, must in MASTER/WORKER"
.
format
(
role
))
raise
ValueError
(
"role {} error, must in MASTER/WORKER"
.
format
(
role
))
core/engine/local_cluster.py
浏览文件 @
bfce2242
...
@@ -46,10 +46,13 @@ class LocalClusterEngine(Engine):
...
@@ -46,10 +46,13 @@ class LocalClusterEngine(Engine):
ports
.
append
(
new_port
)
ports
.
append
(
new_port
)
break
break
user_endpoints
=
","
.
join
([
"127.0.0.1:"
+
str
(
x
)
for
x
in
ports
])
user_endpoints
=
","
.
join
([
"127.0.0.1:"
+
str
(
x
)
for
x
in
ports
])
user_endpoints_ips
=
[
x
.
split
(
":"
)[
0
]
for
x
in
user_endpoints
.
split
(
","
)]
user_endpoints_ips
=
[
user_endpoints_port
=
[
x
.
split
(
":"
)[
1
]
x
.
split
(
":"
)[
0
]
for
x
in
user_endpoints
.
split
(
","
)
for
x
in
user_endpoints
.
split
(
","
)]
]
user_endpoints_port
=
[
x
.
split
(
":"
)[
1
]
for
x
in
user_endpoints
.
split
(
","
)
]
factory
=
"paddlerec.core.factory"
factory
=
"paddlerec.core.factory"
cmd
=
[
sys
.
executable
,
"-u"
,
"-m"
,
factory
,
self
.
trainer
]
cmd
=
[
sys
.
executable
,
"-u"
,
"-m"
,
factory
,
self
.
trainer
]
...
@@ -97,8 +100,10 @@ class LocalClusterEngine(Engine):
...
@@ -97,8 +100,10 @@ class LocalClusterEngine(Engine):
if
len
(
log_fns
)
>
0
:
if
len
(
log_fns
)
>
0
:
log_fns
[
i
].
close
()
log_fns
[
i
].
close
()
procs
[
i
].
terminate
()
procs
[
i
].
terminate
()
print
(
"all workers already completed, you can view logs under the `{}` directory"
.
format
(
logs_dir
),
print
(
file
=
sys
.
stderr
)
"all workers already completed, you can view logs under the `{}` directory"
.
format
(
logs_dir
),
file
=
sys
.
stderr
)
def
run
(
self
):
def
run
(
self
):
self
.
start_procs
()
self
.
start_procs
()
core/engine/local_mpi.py
浏览文件 @
bfce2242
...
@@ -26,7 +26,6 @@ from paddlerec.core.engine.engine import Engine
...
@@ -26,7 +26,6 @@ from paddlerec.core.engine.engine import Engine
class
LocalMPIEngine
(
Engine
):
class
LocalMPIEngine
(
Engine
):
def
start_procs
(
self
):
def
start_procs
(
self
):
logs_dir
=
self
.
envs
[
"log_dir"
]
logs_dir
=
self
.
envs
[
"log_dir"
]
default_env
=
os
.
environ
.
copy
()
default_env
=
os
.
environ
.
copy
()
current_env
=
copy
.
copy
(
default_env
)
current_env
=
copy
.
copy
(
default_env
)
current_env
.
pop
(
"http_proxy"
,
None
)
current_env
.
pop
(
"http_proxy"
,
None
)
...
@@ -42,7 +41,8 @@ class LocalMPIEngine(Engine):
...
@@ -42,7 +41,8 @@ class LocalMPIEngine(Engine):
os
.
system
(
"mkdir -p {}"
.
format
(
logs_dir
))
os
.
system
(
"mkdir -p {}"
.
format
(
logs_dir
))
fn
=
open
(
"%s/job.log"
%
logs_dir
,
"w"
)
fn
=
open
(
"%s/job.log"
%
logs_dir
,
"w"
)
log_fns
.
append
(
fn
)
log_fns
.
append
(
fn
)
proc
=
subprocess
.
Popen
(
cmd
,
env
=
current_env
,
stdout
=
fn
,
stderr
=
fn
,
cwd
=
os
.
getcwd
())
proc
=
subprocess
.
Popen
(
cmd
,
env
=
current_env
,
stdout
=
fn
,
stderr
=
fn
,
cwd
=
os
.
getcwd
())
else
:
else
:
proc
=
subprocess
.
Popen
(
cmd
,
env
=
current_env
,
cwd
=
os
.
getcwd
())
proc
=
subprocess
.
Popen
(
cmd
,
env
=
current_env
,
cwd
=
os
.
getcwd
())
procs
.
append
(
proc
)
procs
.
append
(
proc
)
...
@@ -51,7 +51,9 @@ class LocalMPIEngine(Engine):
...
@@ -51,7 +51,9 @@ class LocalMPIEngine(Engine):
if
len
(
log_fns
)
>
0
:
if
len
(
log_fns
)
>
0
:
log_fns
[
i
].
close
()
log_fns
[
i
].
close
()
procs
[
i
].
wait
()
procs
[
i
].
wait
()
print
(
"all workers and parameter servers already completed"
,
file
=
sys
.
stderr
)
print
(
"all workers and parameter servers already completed"
,
file
=
sys
.
stderr
)
def
run
(
self
):
def
run
(
self
):
self
.
start_procs
()
self
.
start_procs
()
core/factory.py
浏览文件 @
bfce2242
...
@@ -19,24 +19,23 @@ import yaml
...
@@ -19,24 +19,23 @@ import yaml
from
paddlerec.core.utils
import
envs
from
paddlerec.core.utils
import
envs
trainer_abs
=
os
.
path
.
join
(
os
.
path
.
dirname
(
trainer_abs
=
os
.
path
.
join
(
os
.
path
.
abspath
(
__file__
)),
"trainers"
)
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)),
"trainers"
)
trainers
=
{}
trainers
=
{}
def
trainer_registry
():
def
trainer_registry
():
trainers
[
"SingleTrainer"
]
=
os
.
path
.
join
(
trainers
[
"SingleTrainer"
]
=
os
.
path
.
join
(
trainer_abs
,
"single_trainer.py"
)
trainer_abs
,
"single_trainer.py"
)
trainers
[
"ClusterTrainer"
]
=
os
.
path
.
join
(
trainer_abs
,
trainers
[
"ClusterTrainer"
]
=
os
.
path
.
join
(
"cluster_trainer.py"
)
trainer_abs
,
"cluster_trainer.py"
)
trainers
[
"CtrCodingTrainer"
]
=
os
.
path
.
join
(
trainer_abs
,
trainers
[
"CtrCodingTrainer"
]
=
os
.
path
.
join
(
"ctr_coding_trainer.py"
)
trainer_abs
,
"ctr_coding_trainer.py"
)
trainers
[
"CtrModulTrainer"
]
=
os
.
path
.
join
(
trainer_abs
,
trainers
[
"CtrModulTrainer"
]
=
os
.
path
.
join
(
"ctr_modul_trainer.py"
)
trainer_abs
,
"ctr_modul_trainer.py"
)
trainers
[
"TDMSingleTrainer"
]
=
os
.
path
.
join
(
trainer_abs
,
trainers
[
"TDMSingleTrainer"
]
=
os
.
path
.
join
(
"tdm_single_trainer.py"
)
trainer_abs
,
"tdm_single_trainer.py"
)
trainers
[
"TDMClusterTrainer"
]
=
os
.
path
.
join
(
trainer_abs
,
trainers
[
"TDMClusterTrainer"
]
=
os
.
path
.
join
(
"tdm_cluster_trainer.py"
)
trainer_abs
,
"tdm_cluster_trainer.py"
)
trainer_registry
()
trainer_registry
()
...
@@ -55,8 +54,8 @@ class TrainerFactory(object):
...
@@ -55,8 +54,8 @@ class TrainerFactory(object):
if
trainer_abs
is
None
:
if
trainer_abs
is
None
:
if
not
os
.
path
.
isfile
(
train_mode
):
if
not
os
.
path
.
isfile
(
train_mode
):
raise
IOError
(
raise
IOError
(
"trainer {} can not be recognized"
.
format
(
"trainer {} can not be recognized"
.
format
(
train_mode
))
train_mode
))
trainer_abs
=
train_mode
trainer_abs
=
train_mode
train_mode
=
"UserDefineTrainer"
train_mode
=
"UserDefineTrainer"
...
...
core/metrics/auc_metrics.py
浏览文件 @
bfce2242
...
@@ -22,7 +22,7 @@ from paddlerec.core.metric import Metric
...
@@ -22,7 +22,7 @@ from paddlerec.core.metric import Metric
class
AUCMetric
(
Metric
):
class
AUCMetric
(
Metric
):
"""
"""
Metric For
Paddle
Model
Metric For
Fluid
Model
"""
"""
def
__init__
(
self
,
config
,
fleet
):
def
__init__
(
self
,
config
,
fleet
):
...
@@ -83,7 +83,8 @@ class AUCMetric(Metric):
...
@@ -83,7 +83,8 @@ class AUCMetric(Metric):
if
scope
.
find_var
(
metric_item
[
'var'
].
name
)
is
None
:
if
scope
.
find_var
(
metric_item
[
'var'
].
name
)
is
None
:
result
[
metric_name
]
=
None
result
[
metric_name
]
=
None
continue
continue
result
[
metric_name
]
=
self
.
get_metric
(
scope
,
metric_item
[
'var'
].
name
)
result
[
metric_name
]
=
self
.
get_metric
(
scope
,
metric_item
[
'var'
].
name
)
return
result
return
result
def
calculate_auc
(
self
,
global_pos
,
global_neg
):
def
calculate_auc
(
self
,
global_pos
,
global_neg
):
...
@@ -178,14 +179,18 @@ class AUCMetric(Metric):
...
@@ -178,14 +179,18 @@ class AUCMetric(Metric):
self
.
_result
[
'mean_q'
]
=
0
self
.
_result
[
'mean_q'
]
=
0
return
self
.
_result
return
self
.
_result
if
'stat_pos'
in
result
and
'stat_neg'
in
result
:
if
'stat_pos'
in
result
and
'stat_neg'
in
result
:
result
[
'auc'
]
=
self
.
calculate_auc
(
result
[
'stat_pos'
],
result
[
'stat_neg'
])
result
[
'auc'
]
=
self
.
calculate_auc
(
result
[
'stat_pos'
],
result
[
'bucket_error'
]
=
self
.
calculate_auc
(
result
[
'stat_pos'
],
result
[
'stat_neg'
])
result
[
'stat_neg'
])
result
[
'bucket_error'
]
=
self
.
calculate_auc
(
result
[
'stat_pos'
],
result
[
'stat_neg'
])
if
'pos_ins_num'
in
result
:
if
'pos_ins_num'
in
result
:
result
[
'actual_ctr'
]
=
result
[
'pos_ins_num'
]
/
result
[
'total_ins_num'
]
result
[
'actual_ctr'
]
=
result
[
'pos_ins_num'
]
/
result
[
'total_ins_num'
]
if
'abserr'
in
result
:
if
'abserr'
in
result
:
result
[
'mae'
]
=
result
[
'abserr'
]
/
result
[
'total_ins_num'
]
result
[
'mae'
]
=
result
[
'abserr'
]
/
result
[
'total_ins_num'
]
if
'sqrerr'
in
result
:
if
'sqrerr'
in
result
:
result
[
'rmse'
]
=
math
.
sqrt
(
result
[
'sqrerr'
]
/
result
[
'total_ins_num'
])
result
[
'rmse'
]
=
math
.
sqrt
(
result
[
'sqrerr'
]
/
result
[
'total_ins_num'
])
if
'prob'
in
result
:
if
'prob'
in
result
:
result
[
'predict_ctr'
]
=
result
[
'prob'
]
/
result
[
'total_ins_num'
]
result
[
'predict_ctr'
]
=
result
[
'prob'
]
/
result
[
'total_ins_num'
]
if
abs
(
result
[
'predict_ctr'
])
>
1e-6
:
if
abs
(
result
[
'predict_ctr'
])
>
1e-6
:
...
...
core/model.py
浏览文件 @
bfce2242
...
@@ -20,7 +20,7 @@ from paddlerec.core.utils import envs
...
@@ -20,7 +20,7 @@ from paddlerec.core.utils import envs
class
Model
(
object
):
class
Model
(
object
):
"""
R
"""
Base Model
"""
"""
__metaclass__
=
abc
.
ABCMeta
__metaclass__
=
abc
.
ABCMeta
...
@@ -39,32 +39,43 @@ class Model(object):
...
@@ -39,32 +39,43 @@ class Model(object):
self
.
_platform
=
envs
.
get_platform
()
self
.
_platform
=
envs
.
get_platform
()
def
_init_slots
(
self
):
def
_init_slots
(
self
):
sparse_slots
=
envs
.
get_global_env
(
"sparse_slots"
,
None
,
"train.reader"
)
sparse_slots
=
envs
.
get_global_env
(
"sparse_slots"
,
None
,
"train.reader"
)
dense_slots
=
envs
.
get_global_env
(
"dense_slots"
,
None
,
"train.reader"
)
dense_slots
=
envs
.
get_global_env
(
"dense_slots"
,
None
,
"train.reader"
)
if
sparse_slots
is
not
None
or
dense_slots
is
not
None
:
if
sparse_slots
is
not
None
or
dense_slots
is
not
None
:
sparse_slots
=
sparse_slots
.
strip
().
split
(
" "
)
sparse_slots
=
sparse_slots
.
strip
().
split
(
" "
)
dense_slots
=
dense_slots
.
strip
().
split
(
" "
)
dense_slots
=
dense_slots
.
strip
().
split
(
" "
)
dense_slots_shape
=
[[
int
(
j
)
for
j
in
i
.
split
(
":"
)[
1
].
strip
(
"[]"
).
split
(
","
)]
for
i
in
dense_slots
]
dense_slots_shape
=
[[
int
(
j
)
for
j
in
i
.
split
(
":"
)[
1
].
strip
(
"[]"
).
split
(
","
)
]
for
i
in
dense_slots
]
dense_slots
=
[
i
.
split
(
":"
)[
0
]
for
i
in
dense_slots
]
dense_slots
=
[
i
.
split
(
":"
)[
0
]
for
i
in
dense_slots
]
self
.
_dense_data_var
=
[]
self
.
_dense_data_var
=
[]
for
i
in
range
(
len
(
dense_slots
)):
for
i
in
range
(
len
(
dense_slots
)):
l
=
fluid
.
layers
.
data
(
name
=
dense_slots
[
i
],
shape
=
dense_slots_shape
[
i
],
dtype
=
"float32"
)
l
=
fluid
.
layers
.
data
(
name
=
dense_slots
[
i
],
shape
=
dense_slots_shape
[
i
],
dtype
=
"float32"
)
self
.
_data_var
.
append
(
l
)
self
.
_data_var
.
append
(
l
)
self
.
_dense_data_var
.
append
(
l
)
self
.
_dense_data_var
.
append
(
l
)
self
.
_sparse_data_var
=
[]
self
.
_sparse_data_var
=
[]
for
name
in
sparse_slots
:
for
name
in
sparse_slots
:
l
=
fluid
.
layers
.
data
(
name
=
name
,
shape
=
[
1
],
lod_level
=
1
,
dtype
=
"int64"
)
l
=
fluid
.
layers
.
data
(
name
=
name
,
shape
=
[
1
],
lod_level
=
1
,
dtype
=
"int64"
)
self
.
_data_var
.
append
(
l
)
self
.
_data_var
.
append
(
l
)
self
.
_sparse_data_var
.
append
(
l
)
self
.
_sparse_data_var
.
append
(
l
)
dataset_class
=
envs
.
get_global_env
(
"dataset_class"
,
None
,
"train.reader"
)
dataset_class
=
envs
.
get_global_env
(
"dataset_class"
,
None
,
"train.reader"
)
if
dataset_class
==
"DataLoader"
:
if
dataset_class
==
"DataLoader"
:
self
.
_init_dataloader
()
self
.
_init_dataloader
()
def
_init_dataloader
(
self
):
def
_init_dataloader
(
self
):
self
.
_data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
self
.
_data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
feed_list
=
self
.
_data_var
,
capacity
=
64
,
use_double_buffer
=
False
,
iterable
=
False
)
feed_list
=
self
.
_data_var
,
capacity
=
64
,
use_double_buffer
=
False
,
iterable
=
False
)
def
get_inputs
(
self
):
def
get_inputs
(
self
):
return
self
.
_data_var
return
self
.
_data_var
...
@@ -96,8 +107,8 @@ class Model(object):
...
@@ -96,8 +107,8 @@ class Model(object):
"configured optimizer can only supported SGD/Adam/Adagrad"
)
"configured optimizer can only supported SGD/Adam/Adagrad"
)
if
name
==
"SGD"
:
if
name
==
"SGD"
:
reg
=
envs
.
get_global_env
(
reg
=
envs
.
get_global_env
(
"hyper_parameters.reg"
,
0.0001
,
"hyper_parameters.reg"
,
0.0001
,
self
.
_namespace
)
self
.
_namespace
)
optimizer_i
=
fluid
.
optimizer
.
SGD
(
optimizer_i
=
fluid
.
optimizer
.
SGD
(
lr
,
regularization
=
fluid
.
regularizer
.
L2DecayRegularizer
(
reg
))
lr
,
regularization
=
fluid
.
regularizer
.
L2DecayRegularizer
(
reg
))
elif
name
==
"ADAM"
:
elif
name
==
"ADAM"
:
...
@@ -111,10 +122,10 @@ class Model(object):
...
@@ -111,10 +122,10 @@ class Model(object):
return
optimizer_i
return
optimizer_i
def
optimizer
(
self
):
def
optimizer
(
self
):
learning_rate
=
envs
.
get_global_env
(
learning_rate
=
envs
.
get_global_env
(
"hyper_parameters.learning_rate"
,
"hyper_parameters.learning_rate"
,
None
,
self
.
_namespace
)
None
,
self
.
_namespace
)
optimizer
=
envs
.
get_global_env
(
optimizer
=
envs
.
get_global_env
(
"hyper_parameters.optimizer"
,
None
,
"hyper_parameters.optimizer"
,
None
,
self
.
_namespace
)
self
.
_namespace
)
print
(
">>>>>>>>>>>.learnig rate: %s"
%
learning_rate
)
print
(
">>>>>>>>>>>.learnig rate: %s"
%
learning_rate
)
return
self
.
_build_optimizer
(
optimizer
,
learning_rate
)
return
self
.
_build_optimizer
(
optimizer
,
learning_rate
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录