Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
9cd59990
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9cd59990
编写于
8月 10, 2018
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix dist transpiler unordered dict issue
上级
be6ecec4
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
14 addition
and
11 deletion
+14
-11
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
+1
-0
python/paddle/fluid/transpiler/distribute_transpiler.py
python/paddle/fluid/transpiler/distribute_transpiler.py
+13
-11
未找到文件。
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
浏览文件 @
9cd59990
...
...
@@ -46,6 +46,7 @@ class TranspilerTest(unittest.TestCase):
def
get_main_program
(
self
):
main
=
fluid
.
Program
()
main
.
random_seed
=
1
with
fluid
.
program_guard
(
main
):
self
.
net_conf
()
self
.
origin_prog
=
main
.
clone
()
...
...
python/paddle/fluid/transpiler/distribute_transpiler.py
浏览文件 @
9cd59990
...
...
@@ -31,6 +31,7 @@ Steps to transpile pserver:
import
math
import
random
import
numpy
as
np
import
collections
from
.ps_dispatcher
import
RoundRobin
,
HashName
,
PSDispatcher
from
..
import
core
,
framework
...
...
@@ -218,8 +219,9 @@ class DistributeTranspiler(object):
# fc_b@GRAD_trainer_0, fc_b@GRAD_trainer_1 --> pserver2
# shuffle the map will avoid the uneven distribution above
grad_var_mapping_items
=
list
(
self
.
grad_var_mapping
.
items
())
if
not
self
.
config
.
slice_var_up
:
random
.
seed
(
self
.
trainer_num
)
random
.
seed
(
self
.
origin_program
.
random_seed
)
random
.
shuffle
(
grad_var_mapping_items
)
for
orig_varname
,
splited_vars
in
grad_var_mapping_items
:
...
...
@@ -557,14 +559,14 @@ class DistributeTranspiler(object):
# 1. create vars in pserver program to startup program
pserver_vars
=
pserver_program
.
global_block
().
vars
created_var_map
=
d
ict
()
created_var_map
=
collections
.
OrderedD
ict
()
for
_
,
var
in
list
(
pserver_vars
.
items
()):
tmpvar
=
s_prog
.
global_block
().
_clone_variable
(
var
)
created_var_map
[
var
.
name
]
=
tmpvar
# 2. rename op outputs
for
op
in
orig_s_prog
.
global_block
().
ops
:
new_outputs
=
d
ict
()
new_outputs
=
collections
.
OrderedD
ict
()
# do not append startup op if var is not on this pserver
op_on_pserver
=
False
for
key
in
op
.
output_names
:
...
...
@@ -703,7 +705,7 @@ class DistributeTranspiler(object):
self
.
origin_program
,
grad_blocks
,
add_trainer_suffix
=
self
.
trainer_num
>
1
)
self
.
grad_param_mapping
=
d
ict
()
self
.
grad_param_mapping
=
collections
.
OrderedD
ict
()
for
g
,
p
in
zip
(
grad_blocks
,
param_blocks
):
g_name
,
g_bid
,
_
=
g
.
split
(
":"
)
p_name
,
p_bid
,
_
=
p
.
split
(
":"
)
...
...
@@ -711,7 +713,7 @@ class DistributeTranspiler(object):
self
.
param_var_mapping
[
p_name
][
int
(
p_bid
)]
# create mapping of endpoint -> split var to create pserver side program
self
.
param_grad_ep_mapping
=
d
ict
()
self
.
param_grad_ep_mapping
=
collections
.
OrderedD
ict
()
[
self
.
param_grad_ep_mapping
.
update
({
ep
:
{
...
...
@@ -981,14 +983,14 @@ class DistributeTranspiler(object):
block_list (list[(varname, block_id, block_size)]): List of gradient blocks.
add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True.
Returns:
var_mapping (
d
ict(varname->[new_varname_variable])):A dict mapping
var_mapping (
collections.OrderedD
ict(varname->[new_varname_variable])):A dict mapping
from original var name to each var split.
"""
# varname->[(block_id, current_block_size)]
block_map
=
d
ict
()
block_map
=
collections
.
OrderedD
ict
()
var_mapping
=
d
ict
()
var_mapping
=
collections
.
OrderedD
ict
()
for
block_str
in
block_list
:
varname
,
offset
,
size
=
block_str
.
split
(
":"
)
if
varname
not
in
block_map
:
...
...
@@ -1181,7 +1183,7 @@ class DistributeTranspiler(object):
grad_to_block_id
,
origin_program
,
merged_var
):
program
=
optimize_block
.
program
pserver_block
=
program
.
global_block
()
new_inputs
=
d
ict
()
new_inputs
=
collections
.
OrderedD
ict
()
# update param/grad shape first, then other inputs like
# moment can use the updated shape
for
key
in
opt_op
.
input_names
:
...
...
@@ -1359,7 +1361,7 @@ class DistributeTranspiler(object):
def
_get_input_map_from_op
(
self
,
varmap
,
op
):
"""Returns a dict from op input name to the vars in varmap."""
iomap
=
d
ict
()
iomap
=
collections
.
OrderedD
ict
()
for
key
in
op
.
input_names
:
vars
=
[]
for
varname
in
op
.
input
(
key
):
...
...
@@ -1372,7 +1374,7 @@ class DistributeTranspiler(object):
def
_get_output_map_from_op
(
self
,
varmap
,
op
):
"""Returns a dict from op output name to the vars in varmap."""
iomap
=
d
ict
()
iomap
=
collections
.
OrderedD
ict
()
for
key
in
op
.
output_names
:
vars
=
[]
for
varname
in
op
.
output
(
key
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录