Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
dd7a48bd
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
dd7a48bd
编写于
4月 23, 2018
作者:
W
Wu Yi
提交者:
GitHub
4月 23, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #10123 from jacquesqiao/split-optimize-op-into-signle-blocks
split optimization ops on pserver to independenty blocks
上级
d89a3068
ba1e68d5
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
27 addition
and
22 deletion
+27
-22
python/paddle/fluid/distribute_transpiler.py
python/paddle/fluid/distribute_transpiler.py
+23
-22
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+4
-0
未找到文件。
python/paddle/fluid/distribute_transpiler.py
浏览文件 @
dd7a48bd
...
...
@@ -368,21 +368,19 @@ class DistributeTranspiler:
else
:
recv_inputs
.
append
(
single_trainer_var
)
# step3
optimize_block
=
pserver_program
.
create_block
(
0
)
# step 4
# step 3
# Create a union-find data structure from optimize ops,
# If two ops are connected, we could add these two ops
# into one set.
ufind
=
self
.
_create_ufind
(
self
.
optimize_ops
)
# step
4
.2
# step
3
.2
# Iterate through the ops and append optimize op which
# located on current pserver
opt_op_on_pserver
=
[]
for
_
,
op
in
enumerate
(
self
.
optimize_ops
):
if
self
.
_is_opt_op
(
op
)
and
self
.
_is_opt_op_on_pserver
(
endpoint
,
op
):
opt_op_on_pserver
.
append
(
op
)
# step
4
.3
# step
3
.3
# Iterate through the ops, and if an op and the optimize ops
# which located on current pserver are in one set, then
# append it into the sub program.
...
...
@@ -415,29 +413,30 @@ class DistributeTranspiler:
else
:
self
.
_append_pserver_non_opt_ops
(
block
,
op
)
append_block
=
optimize_block
# append lr decay ops to the child block if exists
lr_ops
=
self
.
_get_lr_ops
()
if
len
(
lr_ops
)
>
0
:
lr_decay_block
=
pserver_program
.
create_block
(
pserver_program
.
num_blocks
-
1
)
for
_
,
op
in
enumerate
(
lr_ops
):
self
.
_append_pserver_non_opt_ops
(
append_block
,
op
)
append_block
=
pserver_program
.
create_block
(
append_block
.
idx
)
self
.
_append_pserver_non_opt_ops
(
lr_decay_block
,
op
)
# append op to the current block
p
er_opt_block
=
append_block
p
re_block_idx
=
pserver_program
.
num_blocks
-
1
for
idx
,
opt_op
in
enumerate
(
opt_op_on_pserver
):
per_opt_block
=
pserver_program
.
create_block
(
pre_block_idx
)
for
_
,
op
in
enumerate
(
self
.
optimize_ops
):
# optimizer is connected to itself
if
ufind
.
is_connected
(
op
,
opt_op
)
and
\
op
not
in
global_ops
:
if
ufind
.
is_connected
(
op
,
opt_op
)
and
op
not
in
global_ops
:
__append_optimize_op__
(
op
,
per_opt_block
)
if
idx
==
len
(
opt_op_on_pserver
)
-
1
and
global_ops
:
per_opt_block
=
pserver_program
.
create_block
(
append_block
.
idx
)
# append global ops
for
glb_op
in
global_ops
:
__append_optimize_op__
(
glb_op
,
per_opt_block
)
opt_state_block
=
None
if
global_ops
:
opt_state_block
=
pserver_program
.
create_block
(
pserver_program
.
num_blocks
-
1
)
for
glb_op
in
global_ops
:
__append_optimize_op__
(
glb_op
,
opt_state_block
)
# NOT USED: single block version:
#
...
...
@@ -451,10 +450,10 @@ class DistributeTranspiler:
prefetch_block
=
None
if
self
.
has_distributed_lookup_table
:
pserver_index
=
self
.
pserver_endpoints
.
index
(
endpoint
)
self
.
_create_table_optimize_block
(
pserver_index
,
pserver_program
,
append_block
)
table_opt_block
=
self
.
_create_table_optimize_block
(
pserver_index
,
pserver_program
,
pre_block_idx
)
prefetch_block
=
self
.
_create_prefetch_block
(
pserver_index
,
pserver_program
,
optimize
_block
)
pserver_index
,
pserver_program
,
table_opt
_block
)
# NOTE: if has_distributed_lookup_table is False, then prefetch_block will
# not be executed, so it's safe to use optimize_block to hold the place
...
...
@@ -470,7 +469,7 @@ class DistributeTranspiler:
inputs
=
{
'X'
:
recv_inputs
},
outputs
=
{},
attrs
=
{
"OptimizeBlock"
:
optimize_block
,
"OptimizeBlock"
:
pserver_program
.
block
(
1
)
,
"endpoint"
:
endpoint
,
"Fanin"
:
self
.
trainer_num
,
"PrefetchBlock"
:
prefetch_block
...
...
@@ -663,7 +662,7 @@ class DistributeTranspiler:
return
prefetch_block
def
_create_table_optimize_block
(
self
,
pserver_index
,
pserver_program
,
append_block
):
pre_block_idx
):
def
_clone_var
(
block
,
var
,
persistable
=
True
):
assert
isinstance
(
var
,
Variable
)
return
block
.
create_var
(
...
...
@@ -700,7 +699,7 @@ class DistributeTranspiler:
op
for
op
in
self
.
optimize_ops
if
op
.
input
(
"Param"
)[
0
]
==
self
.
table_name
][
0
]
table_opt_block
=
pserver_program
.
create_block
(
append_block
.
idx
)
table_opt_block
=
pserver_program
.
create_block
(
pre_block_
idx
)
# only support sgd now
assert
table_opt_op
.
type
==
"sgd"
...
...
@@ -724,6 +723,8 @@ class DistributeTranspiler:
outputs
=
outputs
,
attrs
=
table_opt_op
.
attrs
)
return
table_opt_block
# ====================== private transpiler functions =====================
def
_create_vars_from_blocklist
(
self
,
program
,
...
...
python/paddle/fluid/framework.py
浏览文件 @
dd7a48bd
...
...
@@ -1107,6 +1107,10 @@ class Program(object):
def
random_seed
(
self
):
return
self
.
_seed
@
property
def
num_blocks
(
self
):
return
self
.
desc
.
num_blocks
()
@
random_seed
.
setter
def
random_seed
(
self
,
seed
):
if
not
isinstance
(
seed
,
int
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录