Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
5a579df9
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5a579df9
编写于
8月 22, 2019
作者:
C
chengduo
提交者:
GitHub
8月 22, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Speedup] Make dygraph data parallel faster (#19280)
* update parallel.py test=develop
上级
e3c68bde
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
51 addition
and
53 deletion
+51
-53
python/paddle/fluid/dygraph/parallel.py
python/paddle/fluid/dygraph/parallel.py
+8
-10
python/paddle/fluid/tests/unittests/test_dist_base.py
python/paddle/fluid/tests/unittests/test_dist_base.py
+39
-35
python/paddle/fluid/tests/unittests/test_parallel_dygraph_mnist.py
...ddle/fluid/tests/unittests/test_parallel_dygraph_mnist.py
+4
-8
未找到文件。
python/paddle/fluid/dygraph/parallel.py
浏览文件 @
5a579df9
...
...
@@ -188,16 +188,14 @@ class DataParallel(layers.Layer):
from
..layers
import
nn
for
coalesced_grad
,
origin_grad_vars
,
grad_shapes
in
coalesced_grads_and_grad_vars
:
grad_var_len
=
[
np
.
prod
(
g_shape
)
for
g_shape
in
grad_shapes
]
splited_vars
=
nn
.
split
(
coalesced_grad
,
num_or_sections
=
grad_var_len
,
dim
=
0
)
reshaped_grad_vars
=
[]
for
g_var
,
g_shape
in
zip
(
splited_vars
,
grad_shapes
):
reshaped_grad_vars
.
append
(
nn
.
reshape
(
x
=
g_var
,
shape
=
g_shape
,
inplace
=
True
))
for
origin_g_var
,
reshaped_g_var
in
zip
(
origin_grad_vars
,
reshaped_grad_vars
):
nn
.
assign
(
input
=
reshaped_g_var
,
output
=
origin_g_var
)
self
.
_helper
.
main_program
.
current_block
().
append_op
(
type
=
'split'
,
inputs
=
{
'X'
:
coalesced_grad
},
outputs
=
{
'Out'
:
origin_grad_vars
},
attrs
=
{
'sections'
:
grad_var_len
,
'axis'
:
0
})
for
g_var
,
g_shape
in
zip
(
origin_grad_vars
,
grad_shapes
):
nn
.
reshape
(
x
=
g_var
,
shape
=
g_shape
,
inplace
=
True
)
def
apply_collective_grads
(
self
):
"""
...
...
python/paddle/fluid/tests/unittests/test_dist_base.py
浏览文件 @
5a579df9
...
...
@@ -38,7 +38,14 @@ RUN_STEP = 5
DEFAULT_BATCH_SIZE
=
2
def
my_print
(
class_name
,
log_str
):
def
print_to_out
(
out_losses
):
if
six
.
PY2
:
print
(
pickle
.
dumps
(
out_losses
))
else
:
sys
.
stdout
.
buffer
.
write
(
pickle
.
dumps
(
out_losses
))
def
print_to_err
(
class_name
,
log_str
):
localtime
=
time
.
asctime
(
time
.
localtime
(
time
.
time
()))
print_str
=
localtime
+
"
\t
"
+
class_name
+
"
\t
"
+
log_str
if
six
.
PY2
:
...
...
@@ -99,9 +106,9 @@ class TestDistRunnerBase(object):
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
my_print
(
type
(
self
).
__name__
,
"run pserver startup program done."
)
print_to_err
(
type
(
self
).
__name__
,
"run pserver startup program done."
)
exe
.
run
(
pserver_prog
)
my_print
(
type
(
self
).
__name__
,
"run pserver main program done."
)
print_to_err
(
type
(
self
).
__name__
,
"run pserver main program done."
)
def
run_gpu_fleet_api_trainer
(
self
,
args
):
assert
args
.
update_method
==
"nccl2"
...
...
@@ -118,7 +125,7 @@ class TestDistRunnerBase(object):
role
=
role_maker
.
PaddleCloudRoleMaker
(
is_collective
=
True
)
fleet
.
init
(
role
)
my_print
(
"gpu_fleet"
,
"fleet.node_num:"
)
print_to_err
(
"gpu_fleet"
,
"fleet.node_num:"
)
#"fleet.node_id:", fleet.node_id(),
#"fleet.trainer_num:", fleet.worker_num())
...
...
@@ -154,15 +161,15 @@ class TestDistRunnerBase(object):
else
:
return
origin_batch
my_print
(
type
(
self
).
__name__
,
"begin to train on trainer"
)
print_to_err
(
type
(
self
).
__name__
,
"begin to train on trainer"
)
out_losses
=
[]
for
i
in
six
.
moves
.
xrange
(
RUN_STEP
):
loss
,
=
exe
.
run
(
dist_prog
,
fetch_list
=
[
avg_cost
.
name
],
feed
=
feeder
.
feed
(
get_data
()))
out_losses
.
append
(
loss
[
0
])
my_print
(
type
(
self
).
__name__
,
"run step %d finished"
%
i
)
my_print
(
type
(
self
).
__name__
,
"trainer run finished"
)
print_to_err
(
type
(
self
).
__name__
,
"run step %d finished"
%
i
)
print_to_err
(
type
(
self
).
__name__
,
"trainer run finished"
)
if
six
.
PY2
:
print
(
pickle
.
dumps
(
out_losses
))
...
...
@@ -182,7 +189,7 @@ class TestDistRunnerBase(object):
self
.
get_model
(
batch_size
=
args
.
batch_size
)
if
args
.
update_method
==
"pserver"
:
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"begin to run transpile on trainer with pserver mode"
)
t
=
self
.
get_transpiler
(
args
.
trainer_id
,
...
...
@@ -190,7 +197,7 @@ class TestDistRunnerBase(object):
args
.
endpoints
,
args
.
trainers
,
args
.
sync_mode
,
args
.
dc_asgd
)
trainer_prog
=
t
.
get_trainer_program
()
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"get trainer program done with pserver mode."
)
elif
args
.
update_method
==
"nccl2"
or
args
.
update_method
==
"nccl2_reduce_layer"
:
...
...
@@ -201,7 +208,7 @@ class TestDistRunnerBase(object):
if
args
.
use_hallreduce
:
config
.
use_hierarchical_allreduce
=
True
config
.
hierarchical_allreduce_inter_nranks
=
args
.
hallreduce_inter_nranks
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"begin to run transpile on trainer with nccl2 mode"
)
nccl2_t
=
fluid
.
DistributeTranspiler
(
config
=
config
)
...
...
@@ -211,16 +218,16 @@ class TestDistRunnerBase(object):
startup_program
=
fluid
.
default_startup_program
(),
trainers
=
args
.
endpoints
,
current_endpoint
=
args
.
current_endpoint
)
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"get trainer program done. with nccl2 mode"
)
trainer_prog
=
fluid
.
default_main_program
()
else
:
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"do nothing about main program, just use it"
)
trainer_prog
=
fluid
.
default_main_program
()
my_print
(
type
(
self
).
__name__
,
"use main program done."
)
print_to_err
(
type
(
self
).
__name__
,
"use main program done."
)
if
args
.
use_cuda
:
device_id
=
int
(
os
.
getenv
(
"FLAGS_selected_gpus"
,
"0"
))
...
...
@@ -230,7 +237,7 @@ class TestDistRunnerBase(object):
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
fluid
.
default_startup_program
())
my_print
(
type
(
self
).
__name__
,
"run worker startup program done."
)
print_to_err
(
type
(
self
).
__name__
,
"run worker startup program done."
)
exec_strategy
=
fluid
.
ExecutionStrategy
()
exec_strategy
.
num_threads
=
1
...
...
@@ -262,12 +269,12 @@ class TestDistRunnerBase(object):
build_stra
.
num_trainers
=
1
build_stra
.
trainer_id
=
0
my_print
(
type
(
self
).
__name__
,
"begin to compile with data parallel"
)
print_to_err
(
type
(
self
).
__name__
,
"begin to compile with data parallel"
)
binary
=
compiler
.
CompiledProgram
(
trainer_prog
).
with_data_parallel
(
loss_name
=
avg_cost
.
name
,
build_strategy
=
build_stra
,
exec_strategy
=
exec_strategy
)
my_print
(
type
(
self
).
__name__
,
"program compiled with data parallel"
)
print_to_err
(
type
(
self
).
__name__
,
"program compiled with data parallel"
)
feed_var_list
=
[
var
for
var
in
trainer_prog
.
global_block
().
vars
.
values
()
...
...
@@ -288,20 +295,17 @@ class TestDistRunnerBase(object):
else
:
return
origin_batch
my_print
(
type
(
self
).
__name__
,
"begin to train on trainer"
)
print_to_err
(
type
(
self
).
__name__
,
"begin to train on trainer"
)
out_losses
=
[]
for
i
in
six
.
moves
.
xrange
(
RUN_STEP
):
loss
,
=
exe
.
run
(
binary
,
fetch_list
=
[
avg_cost
.
name
],
feed
=
feeder
.
feed
(
get_data
()))
out_losses
.
append
(
loss
[
0
])
my_print
(
type
(
self
).
__name__
,
"run step %d finished"
%
i
)
my_print
(
type
(
self
).
__name__
,
"trainer run finished"
)
print_to_err
(
type
(
self
).
__name__
,
"run step %d finished"
%
i
)
print_to_err
(
type
(
self
).
__name__
,
"trainer run finished"
)
if
six
.
PY2
:
print
(
pickle
.
dumps
(
out_losses
))
else
:
sys
.
stdout
.
buffer
.
write
(
pickle
.
dumps
(
out_losses
))
print_to_out
(
out_losses
)
class
TestParallelDyGraphRunnerBase
(
object
):
...
...
@@ -344,23 +348,23 @@ class TestParallelDyGraphRunnerBase(object):
strategy
.
local_rank
=
args
.
trainer_id
strategy
.
trainer_endpoints
=
args
.
endpoints
.
split
(
","
)
strategy
.
current_endpoint
=
args
.
current_endpoint
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"begin to prepare context in dygraph with nccl2"
)
dygraph
.
parallel
.
prepare_context
(
strategy
)
model
=
dygraph
.
parallel
.
DataParallel
(
model
,
strategy
)
my_print
(
type
(
self
).
__name__
,
"model built in dygraph"
)
print_to_err
(
type
(
self
).
__name__
,
"model built in dygraph"
)
out_losses
=
[]
my_print
(
type
(
self
).
__name__
,
"begin to run dygraph training"
)
print_to_err
(
type
(
self
).
__name__
,
"begin to run dygraph training"
)
for
step_id
,
data
in
enumerate
(
train_reader
()):
data
=
_get_data
(
data
)
if
step_id
==
RUN_STEP
:
break
loss
=
self
.
run_one_loop
(
model
,
opt
,
data
)
if
step_id
%
10
==
0
:
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"loss at step %d: %f"
%
(
step_id
,
loss
))
"loss at step %d: %f"
%
(
step_id
,
loss
.
numpy
()
))
out_losses
.
append
(
loss
.
numpy
())
# FIXME(Yancey1989): scale the loss inplace
...
...
@@ -373,7 +377,7 @@ class TestParallelDyGraphRunnerBase(object):
opt
.
minimize
(
loss
)
model
.
clear_gradients
()
my_print
(
type
(
self
).
__name__
,
pickle
.
dumps
(
out_losses
)
)
print_to_out
(
out_losses
)
def
runtime_main
(
test_class
):
...
...
@@ -483,7 +487,7 @@ class TestDistBase(unittest.TestCase):
with
closing
(
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
))
as
s
:
s
.
bind
((
''
,
0
))
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"socket name: %s"
%
s
.
getsockname
()[
1
])
return
s
.
getsockname
()[
1
]
...
...
@@ -519,13 +523,13 @@ class TestDistBase(unittest.TestCase):
ps0_pipe
=
open
(
"/tmp/ps0_err.log"
,
"wb"
)
ps1_pipe
=
open
(
"/tmp/ps1_err.log"
,
"wb"
)
my_print
(
type
(
self
).
__name__
,
"going to start pserver process 0"
)
print_to_err
(
type
(
self
).
__name__
,
"going to start pserver process 0"
)
ps0_proc
=
subprocess
.
Popen
(
ps0_cmd
.
strip
().
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
stderr
=
ps0_pipe
,
env
=
required_envs
)
my_print
(
type
(
self
).
__name__
,
"going to start pserver process 1"
)
print_to_err
(
type
(
self
).
__name__
,
"going to start pserver process 1"
)
ps1_proc
=
subprocess
.
Popen
(
ps1_cmd
.
strip
().
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
...
...
@@ -641,13 +645,13 @@ class TestDistBase(unittest.TestCase):
tr0_pipe
=
open
(
"/tmp/tr0_err.log"
,
"wb"
)
tr1_pipe
=
open
(
"/tmp/tr1_err.log"
,
"wb"
)
my_print
(
type
(
self
).
__name__
,
"going to start trainer process 0"
)
print_to_err
(
type
(
self
).
__name__
,
"going to start trainer process 0"
)
tr0_proc
=
subprocess
.
Popen
(
tr0_cmd
.
strip
().
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
stderr
=
tr0_pipe
,
env
=
env0
)
my_print
(
type
(
self
).
__name__
,
"going to start trainer process 1"
)
print_to_err
(
type
(
self
).
__name__
,
"going to start trainer process 1"
)
tr1_proc
=
subprocess
.
Popen
(
tr1_cmd
.
strip
().
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
...
...
@@ -761,7 +765,7 @@ class TestDistBase(unittest.TestCase):
tr_pipe
=
open
(
"/tmp/tr{}_err.log"
.
format
(
i
),
"wb"
)
my_print
(
print_to_err
(
type
(
self
).
__name__
,
"going to start process {} with nccl2"
.
format
(
i
))
tr_proc
=
subprocess
.
Popen
(
...
...
python/paddle/fluid/tests/unittests/test_parallel_dygraph_mnist.py
浏览文件 @
5a579df9
...
...
@@ -13,13 +13,11 @@
# limitations under the License.
from
__future__
import
print_function
#
import unittest
import
unittest
from
test_dist_base
import
TestDistBase
import
paddle.fluid
as
fluid
#TODO(guru4elephant): should have dygraph test dist base
# current TestDistBase has some incompatible code with dygraph
'''
class
TestParallelDygraphMnist
(
TestDistBase
):
def
_setup_config
(
self
):
self
.
_sync_mode
=
False
...
...
@@ -27,11 +25,9 @@ class TestParallelDygraphMnist(TestDistBase):
self
.
_dygraph
=
True
def
test_mnist
(
self
):
return
if
fluid
.
core
.
is_compiled_with_cuda
():
self
.
check_with_place
(
"parallel_dygraph_mnist.py"
,
delta
=
1e-5
)
'''
if
__name__
==
"__main__"
:
#unittest.main()
pass
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录