Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
8de4d31a
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8de4d31a
编写于
3月 07, 2019
作者:
H
heqiaozhi
提交者:
dongdaxiang
3月 29, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor async exe
上级
24863897
变更
4
展开全部
隐藏空白更改
内联
并排
Showing
4 changed file
with
269 addition
and
69 deletion
+269
-69
python/paddle/fluid/async_executor.py
python/paddle/fluid/async_executor.py
+9
-3
python/paddle/fluid/distributed/downpour.py
python/paddle/fluid/distributed/downpour.py
+63
-23
python/paddle/fluid/distributed/node.py
python/paddle/fluid/distributed/node.py
+24
-0
python/paddle/fluid/distributed/ps_pb2.py
python/paddle/fluid/distributed/ps_pb2.py
+173
-43
未找到文件。
python/paddle/fluid/async_executor.py
浏览文件 @
8de4d31a
...
...
@@ -121,7 +121,9 @@ class AsyncExecutor(object):
with
open
(
"trainer_desc.proto"
,
"w"
)
as
fout
:
fout
.
write
(
trainer
.
_desc
())
# define a trainer and a device_worker here
self
.
executor
.
run_from_files
(
program_desc
,
trainer
.
_desc
(),
debug
)
self
.
executor
.
run_from_files
(
program_desc
,
trainer
.
_desc
(),
debug
,
str
(
id
(
program_desc
)))
'''
def run(self,
...
...
@@ -194,7 +196,7 @@ class AsyncExecutor(object):
self.executor.run_from_files(program_desc,
data_feed.desc(), filelist, thread_num,
fetch_var_names, mode, debug)
fetch_var_names, mode, debug
, str(id(program_desc))
)
'''
def
download_data
(
self
,
...
...
@@ -313,7 +315,11 @@ class AsyncExecutor(object):
self
.
dist_desc
=
dist_desc
place
=
core
.
CPUPlace
()
executor
=
Executor
(
place
)
executor
.
run
(
startup_program
)
if
isinstance
(
startup_program
,
list
):
for
sp
in
startup_program
:
executor
.
run
(
sp
)
else
:
executor
.
run
(
startup_program
)
self
.
instance
.
barrier_all
()
#wait all server start
ips
=
self
.
instance
.
gather_ips
()
...
...
python/paddle/fluid/distributed/downpour.py
浏览文件 @
8de4d31a
...
...
@@ -43,9 +43,13 @@ class DownpourSGD(object):
self
.
learning_rate_
=
learning_rate
self
.
window_
=
window
self
.
type
=
"downpour"
self
.
data_norm_name
=
[
".batch_size"
,
".batch_square_sum"
,
".batch_sum"
,
".batch_size@GRAD"
,
".batch_square_sum@GRAD"
,
".batch_sum@GRAD"
]
def
minimize
(
self
,
loss
,
loss
es
,
startup_program
=
None
,
parameter_list
=
None
,
no_grad_set
=
None
):
...
...
@@ -65,39 +69,75 @@ class DownpourSGD(object):
worker_skipped_ops: operator names that need
to be skipped during execution
"""
params_grads
=
sorted
(
append_backward
(
loss
,
parameter_list
,
no_grad_set
),
key
=
lambda
x
:
x
[
0
].
name
)
table_name
=
find_distributed_lookup_table
(
loss
.
block
.
program
)
if
not
isinstance
(
losses
,
list
):
raise
ValueError
(
'losses is a list, just lick [model.cost]'
)
table_name
=
find_distributed_lookup_table
(
losses
[
0
].
block
.
program
)
prefetch_slots
=
find_distributed_lookup_table_inputs
(
loss
.
block
.
program
,
table_name
)
loss
es
[
0
]
.
block
.
program
,
table_name
)
prefetch_slots_emb
=
find_distributed_lookup_table_outputs
(
loss
.
block
.
program
,
table_name
)
losses
[
0
].
block
.
program
,
table_name
)
ps_param
=
pslib
.
PSParameter
()
server
=
DownpourServer
()
# window is communication strategy
worker
=
DownpourWorker
(
self
.
window_
)
# Todo(guru4elephant): support multiple tables definitions
# currently support one big sparse table
sparse_table_index
=
0
# currently merge all dense parameters into one dense table
dense_table_index
=
1
params
=
[]
grads
=
[]
for
i
in
params_grads
:
params
.
append
(
i
[
0
])
for
i
in
params_grads
:
grads
.
append
(
i
[
1
])
server
.
add_sparse_table
(
sparse_table_index
,
self
.
learning_rate_
,
prefetch_slots
,
prefetch_slots_emb
)
server
.
add_dense_table
(
dense_table_index
,
self
.
learning_rate_
,
params
,
grads
)
worker
.
add_sparse_table
(
sparse_table_index
,
self
.
learning_rate_
,
prefetch_slots
,
prefetch_slots_emb
)
worker
.
add_dense_table
(
dense_table_index
,
self
.
learning_rate_
,
params
,
grads
)
ps_param
=
pslib
.
PSParameter
()
dense_table_index
=
1
program_configs
=
[]
for
loss_index
in
range
(
len
(
losses
)):
program_config
=
ps_param
.
trainer_param
.
program_config
.
add
()
program_config
.
program_id
=
str
(
id
(
losses
[
loss_index
].
block
.
program
))
program_config
.
pull_sparse_table_id
.
extend
([
sparse_table_index
])
program_config
.
push_sparse_table_id
.
extend
([
sparse_table_index
])
params_grads
=
sorted
(
append_backward
(
losses
[
loss_index
],
parameter_list
,
no_grad_set
),
key
=
lambda
x
:
x
[
0
].
name
)
params
=
[]
grads
=
[]
data_norm_params
=
[]
data_norm_grads
=
[]
for
i
in
params_grads
:
is_data_norm_data
=
False
for
data_norm_name
in
self
.
data_norm_name
:
if
i
[
0
].
name
.
endswith
(
data_norm_name
):
is_data_norm_data
=
True
data_norm_params
.
append
(
i
[
0
])
if
not
is_data_norm_data
:
params
.
append
(
i
[
0
])
for
i
in
params_grads
:
is_data_norm_data
=
False
for
data_norm_grad
in
self
.
data_norm_name
:
if
i
[
0
].
name
.
endswith
(
data_norm_grad
):
is_data_norm_data
=
True
data_norm_grads
.
append
(
i
[
1
])
if
not
is_data_norm_data
:
grads
.
append
(
i
[
1
])
server
.
add_dense_table
(
dense_table_index
,
self
.
learning_rate_
,
params
,
grads
)
worker
.
add_dense_table
(
dense_table_index
,
self
.
learning_rate_
,
params
,
grads
)
program_config
.
pull_dense_table_id
.
extend
([
dense_table_index
])
program_config
.
push_dense_table_id
.
extend
([
dense_table_index
])
if
len
(
data_norm_params
)
!=
0
and
len
(
data_norm_grads
)
!=
0
:
dense_table_index
+=
1
server
.
add_data_norm_table
(
dense_table_index
,
self
.
learning_rate_
,
data_norm_params
,
data_norm_grads
)
worker
.
add_dense_table
(
dense_table_index
,
self
.
learning_rate_
,
data_norm_params
,
data_norm_grads
)
program_config
.
pull_dense_table_id
.
extend
([
dense_table_index
])
program_config
.
push_dense_table_id
.
extend
([
dense_table_index
])
dense_table_index
+=
1
program_configs
.
append
(
program_config
)
ps_param
.
server_param
.
CopyFrom
(
server
.
get_desc
())
ps_param
.
trainer_param
.
CopyFrom
(
worker
.
get_desc
())
for
program_config
in
program_configs
:
ps_param
.
trainer_param
.
program_config
.
extend
([
program_config
])
# Todo(guru4elephant): figure out how to support more sparse parameters
# currently only support lookup_table
worker_skipped_ops
=
[
"lookup_table"
,
"lookup_table_grad"
]
...
...
python/paddle/fluid/distributed/node.py
浏览文件 @
8de4d31a
...
...
@@ -112,6 +112,30 @@ class DownpourServer(Server):
fea_dim
+=
reduce
(
lambda
x
,
y
:
x
*
y
,
param
.
shape
,
1
)
table
.
accessor
.
fea_dim
=
fea_dim
def
add_data_norm_table
(
self
,
table_id
,
learning_rate
,
param_var
,
grad_var
):
"""
Args:
table_id(int): id of sparse params table
learning_rate(float): the learning rate used to update parameters.
\
Can be a float value
param_var(list): all dense param. it is a list.
grad_var(list): all dense grad parm it is a list.
Returns:
return None
"""
table
=
self
.
server_
.
downpour_server_param
.
downpour_table_param
.
add
()
table
.
table_id
=
table_id
table
.
table_class
=
"DownpourDenseTable"
table
.
type
=
pslib
.
PS_DENSE_TABLE
table
.
accessor
.
accessor_class
=
"DownpourDenseValueAccessor"
table
.
accessor
.
dense_sgd_param
.
name
=
"summary"
table
.
accessor
.
dense_sgd_param
.
summary
.
summary_decay_rate
=
0.999999
fea_dim
=
0
for
param
in
filter
(
lambda
x
:
x
.
name
.
find
(
"embedding"
)
==
-
1
,
param_var
):
fea_dim
+=
reduce
(
lambda
x
,
y
:
x
*
y
,
param
.
shape
,
1
)
table
.
accessor
.
fea_dim
=
fea_dim
def
get_desc
(
self
):
"""
Return downpour server program_desc
...
...
python/paddle/fluid/distributed/ps_pb2.py
浏览文件 @
8de4d31a
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录