Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
51898800
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
51898800
编写于
9月 26, 2020
作者:
M
MrChengmo
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix
上级
291e1594
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
26 addition
and
23 deletion
+26
-23
paddle/fluid/framework/distributed_strategy.proto
paddle/fluid/framework/distributed_strategy.proto
+1
-0
python/paddle/distributed/fleet/launch.py
python/paddle/distributed/fleet/launch.py
+4
-3
python/paddle/distributed/fleet/launch_utils.py
python/paddle/distributed/fleet/launch_utils.py
+9
-13
python/paddle/distributed/fleet/runtime/parameter_server_runtime.py
...dle/distributed/fleet/runtime/parameter_server_runtime.py
+12
-7
未找到文件。
paddle/fluid/framework/distributed_strategy.proto
浏览文件 @
51898800
...
...
@@ -97,6 +97,7 @@ message AsyncConfig {
optional
int32
thread_pool_size
=
6
[
default
=
1
];
optional
int32
send_wait_times
=
7
[
default
=
1
];
optional
bool
runtime_split_send_recv
=
8
[
default
=
false
];
optional
string
heter_worker_device
=
9
[
default
=
'cpu'
];
}
message
PipelineConfig
{
optional
int32
micro_batch
=
1
[
default
=
1
];
}
...
...
python/paddle/distributed/fleet/launch.py
浏览文件 @
51898800
...
...
@@ -268,9 +268,10 @@ def which_distributed_mode(args):
if
co_arg
in
" "
.
join
(
sys
.
argv
[
1
:
-
1
])
]
assert
(
len
(
has_ps_args
)
>
1
and
len
(
has_collective_args
)
>
1
),
"Only one mode(Collective or Parameter-Server ) can be selected at the same time, but more than one configuration was received."
if
len
(
has_ps_args
)
>
1
and
len
(
has_collective_args
)
>
1
:
raise
ValueError
(
"Only one mode(Collective or Parameter-Server ) can be selected at the same time, but more than one configuration was received."
)
if
fluid
.
core
.
is_compiled_with_cuda
():
cuda_device_num
=
fluid
.
core
.
get_cuda_device_count
()
...
...
python/paddle/distributed/fleet/launch_utils.py
浏览文件 @
51898800
...
...
@@ -610,7 +610,6 @@ def cloud_ps_heter_env_set(args):
assert
trainers_num
!=
0
environs
[
"PADDLE_TRAINERS_NUM"
]
=
trainers_num
environs
[
"TRAINERS_NUM"
]
=
trainers_num
environs
[
"PADDLE_HETER_TRAINER_DEVICE"
]
=
args
.
heter_worker_device
# hard code for paddlecloud custom-framework
environs
[
"PADDLE_HETER_TRAINER_IP_PORT_LIST"
]
=
paddle_trainer_endpoints
...
...
@@ -754,7 +753,7 @@ class ParameterServerLauncher(object):
"parsed from args: node_ips:{} current_node_ip:{} node_rank:{}"
.
format
(
self
.
node_ips
,
self
.
current_node_ip
,
self
.
node_rank
))
def
start_ps
(
self
,
args
):
def
start_ps
(
self
):
cluster
=
Cluster
(
hdfs
=
None
)
server_rank
=
0
worker_rank
=
0
...
...
@@ -799,13 +798,13 @@ class ParameterServerLauncher(object):
self
.
cmds
=
{
"worker"
:
[],
"server"
:
[],
"heter_worker"
:
[]}
self
.
log_fns
=
{
"worker"
:
[],
"server"
:
[],
"heter_worker"
:
[]}
self
.
start_pod_server
(
args
,
pod
)
self
.
start_pod_worker
(
args
,
pod
)
self
.
start_pod_heter_worker
(
args
,
pod
)
self
.
start_pod_server
(
self
.
args
,
pod
)
self
.
start_pod_worker
(
self
.
args
,
pod
)
self
.
start_pod_heter_worker
(
self
.
args
,
pod
)
logger
.
info
(
"Please check servers, workers and heter_worker logs in {}/workerlog.*, {}/serverlog.* and {}/heterlog.*"
.
format
(
args
.
log_dir
,
args
.
log_dir
,
args
.
log_dir
))
format
(
self
.
args
.
log_dir
,
self
.
args
.
log_dir
,
self
.
args
.
log_dir
))
# 4. wait for finish training
if
len
(
self
.
procs
[
"worker"
])
>
0
:
...
...
@@ -855,7 +854,6 @@ class ParameterServerLauncher(object):
"PADDLE_TRAINER_ENDPOINTS"
:
self
.
worker_endpoints
,
"PADDLE_HETER_TRAINER_IP_PORT_LIST"
:
self
.
heter_worker_endpoints
,
"PADDLE_HETER_TRAINER_DEVICE"
:
args
.
heter_worker_device
,
"PADDLE_PORT"
:
cur_server
.
endpoint
.
split
(
":"
)[
1
],
"TRAINING_ROLE"
:
"PSERVER"
,
"PADDLE_TRAINERS_NUM"
:
str
(
self
.
worker_num
),
...
...
@@ -905,10 +903,10 @@ class ParameterServerLauncher(object):
heter_device_num
=
0
device_list
=
[]
if
args
.
heter_worker_device
==
"gpu"
:
if
fluid
.
core
.
is_compiled_with_cuda
()
:
device_list
=
get_gpus
(
args
.
gpus
)
heter_device_num
=
len
(
device_list
)
elif
args
.
heter_worker_device
==
"xpu"
:
elif
fluid
.
core
.
is_compiled_with_xpu
()
:
heter_device_num
=
fluid
.
core
.
get_xpu_device_count
()
device_list
=
[
str
(
x
)
for
x
in
range
(
0
,
heter_device_num
)]
...
...
@@ -920,7 +918,6 @@ class ParameterServerLauncher(object):
"PADDLE_TRAINERS_NUM"
:
str
(
self
.
worker_num
),
"PADDLE_HETER_TRAINER_IP_PORT_LIST"
:
self
.
heter_worker_endpoints
,
"PADDLE_HETER_TRAINER_DEVICE"
:
args
.
heter_worker_device
,
"TRAINING_ROLE"
:
"TRAINER"
,
"PADDLE_TRAINER_ID"
:
str
(
cur_worker
.
rank
),
"PADDLE_WITH_GLOO"
:
"1"
,
...
...
@@ -972,10 +969,10 @@ class ParameterServerLauncher(object):
heter_device_num
=
0
device_list
=
[]
if
args
.
heter_worker_device
==
"gpu"
:
if
fluid
.
core
.
is_compiled_with_cuda
()
:
device_list
=
get_gpus
(
args
.
gpus
)
heter_device_num
=
len
(
device_list
)
elif
args
.
heter_worker_device
==
"xpu"
:
elif
fluid
.
core
.
is_compiled_with_xpu
()
:
heter_device_num
=
fluid
.
core
.
get_xpu_device_count
()
device_list
=
[
str
(
x
)
for
x
in
range
(
0
,
heter_device_num
)]
assert
heter_device_num
!=
0
...
...
@@ -987,7 +984,6 @@ class ParameterServerLauncher(object):
"PADDLE_TRAINER_ENDPOINTS"
:
self
.
worker_endpoints
,
"PADDLE_HETER_TRAINER_IP_PORT_LIST"
:
self
.
heter_worker_endpoints
,
"PADDLE_HETER_TRAINER_DEVICE"
:
args
.
heter_worker_device
,
"PADDLE_PORT"
:
cur_heter_worker
.
endpoint
.
split
(
":"
)[
1
],
"TRAINING_ROLE"
:
"HETER_TRAINER"
,
"PADDLE_TRAINERS_NUM"
:
str
(
self
.
worker_num
),
...
...
python/paddle/distributed/fleet/runtime/parameter_server_runtime.py
浏览文件 @
51898800
...
...
@@ -94,8 +94,8 @@ class ParameterServerRuntime(RuntimeBase):
return
False
if
var
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
FEED_MINIBATCH
or
\
var
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
FETCH_LIST
or
\
var
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
READER
:
var
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
FETCH_LIST
or
\
var
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
READER
:
return
False
return
var
.
persistable
...
...
@@ -199,15 +199,20 @@ class ParameterServerRuntime(RuntimeBase):
def
_get_executor
(
self
):
if
self
.
role_maker
.
_is_heter_worker
():
if
self
.
role_maker
.
_get_heter_worker_device
()
==
"GPU"
:
dist_strategy
=
self
.
context
[
"valid_strategy"
]
heter_worker_device
=
dist_strategy
.
a_sync_configs
[
"heter_worker_device"
].
upper
()
if
heter_worker_device
==
"GPU"
:
gpu_id
=
int
(
os
.
getenv
(
"FLAGS_selected_gpus"
,
"0"
))
executor
=
Executor
(
fluid
.
CUDAPlace
(
gpu_id
))
elif
self
.
role_maker
.
_get_heter_worker_device
()
==
"XPU"
:
elif
heter_worker_device
==
"XPU"
:
xpu_id
=
int
(
os
.
getenv
(
"FLAGS_selected_xpus"
,
"0"
))
executor
=
Executor
(
fluid
.
XPUPlace
(
xpu_id
))
elif
heter_worker_device
==
"CPU"
:
executor
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
else
:
raise
ValueError
(
"Not Support Device {}"
.
format
(
self
.
role_maker
.
_get_heter_worker_device
()
))
raise
ValueError
(
"
Heter Worker
Not Support Device {}"
.
format
(
heter_worker_device
))
else
:
executor
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
return
executor
...
...
@@ -312,7 +317,7 @@ class ParameterServerRuntime(RuntimeBase):
opts
=
_get_optimize_ops
(
self
.
origin_main_program
)
for
op
in
opts
:
if
"Param"
in
op
.
input_names
and
\
"LearningRate"
in
op
.
input_names
and
op
.
input
(
"Param"
)[
0
]
==
param_name
:
"LearningRate"
in
op
.
input_names
and
op
.
input
(
"Param"
)[
0
]
==
param_name
:
return
op
def
_save_dense_params
(
self
,
executor
,
dirname
,
context
,
main_program
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录