Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
d3105dbf
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d3105dbf
编写于
2月 07, 2021
作者:
S
sandyhouse
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update
上级
7aa0cc3c
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
39 addition
and
28 deletion
+39
-28
python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py
...ed/fleet/meta_optimizers/sharding/gradient_clip_helper.py
+36
-17
python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
...addle/distributed/fleet/meta_optimizers/sharding/utils.py
+3
-11
未找到文件。
python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py
浏览文件 @
d3105dbf
...
...
@@ -16,8 +16,8 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
class
GradientClipHelper
(
object
):
def
__init__
(
self
,
sharding
_ring_id
):
self
.
sharding_ring_id
=
sharding
_ring_id
def
__init__
(
self
,
mp
_ring_id
):
self
.
mp_ring_id
=
mp
_ring_id
def
_is_gradient_clip_op
(
self
,
op
):
return
op
.
desc
.
has_attr
(
"op_namescope"
)
\
...
...
@@ -31,6 +31,7 @@ class GradientClipHelper(object):
"""
deperated_vars
=
set
()
deperate_op_idx
=
set
()
reversed_x_paramname
=
[]
for
idx
,
op
in
enumerate
(
block
.
ops
):
if
not
self
.
_is_gradient_clip_op
(
op
):
continue
...
...
@@ -44,6 +45,8 @@ class GradientClipHelper(object):
if
shard
.
is_param
(
param_name
)
and
\
not
shard
.
has_param
(
param_name
):
deperate_op
=
True
elif
shard
.
is_param
(
param_name
):
reversed_x_paramname
.
append
(
param_name
)
if
deperate_op
:
deperate_op_idx
.
add
(
idx
)
...
...
@@ -65,31 +68,47 @@ class GradientClipHelper(object):
for
input_name
in
op
.
desc
.
input_arg_names
():
if
input_name
not
in
deperated_vars
:
reversed_inputs
.
append
(
input_name
)
op
.
desc
.
set_input
(
"X"
,
reversed_inputs
)
assert
(
len
(
op
.
desc
.
output_arg_names
())
==
1
)
sum_res
=
op
.
desc
.
output_arg_names
()[
0
]
block
.
_insert_op_without_sync
(
idx
+
1
,
type
=
'c_sync_comm_stream'
,
inputs
=
{
'X'
:
sum_res
},
outputs
=
{
'Out'
:
sum_res
},
attrs
=
{
'ring_id'
:
0
,
OP_ROLE_KEY
:
OpRole
.
Optimize
})
# this allreduce should not overlap with calc and should be scheduled in calc stream
# block._insert_op_without_sync(
# idx + 1,
# type='c_sync_comm_stream',
# inputs={'X': sum_res},
# outputs={'Out': sum_res},
# attrs={'ring_id': 0,
# OP_ROLE_KEY: OpRole.Optimize})
block
.
_insert_op_without_sync
(
idx
+
1
,
type
=
'c_allreduce_sum'
,
inputs
=
{
'X'
:
sum_res
},
outputs
=
{
'Out'
:
sum_res
},
attrs
=
{
'ring_id'
:
self
.
sharding_ring_id
,
OP_ROLE_KEY
:
OpRole
.
Optimize
'ring_id'
:
self
.
mp_ring_id
,
'op_namescope'
:
"/gradient_clip_model_parallelism"
,
'use_calc_stream'
:
True
,
OP_ROLE_KEY
:
OpRole
.
Optimize
,
})
block
.
_insert_op_without_sync
(
idx
+
1
,
type
=
'c_sync_calc_stream'
,
inputs
=
{
'X'
:
sum_res
},
outputs
=
{
'Out'
:
sum_res
},
attrs
=
{
OP_ROLE_KEY
:
OpRole
.
Optimize
})
# block._insert_op_without_sync(
# idx + 1,
# type='c_sync_calc_stream',
# inputs={'X': sum_res},
# outputs={'Out': sum_res},
# attrs={OP_ROLE_KEY: OpRole.Optimize})
# the grad sum here should take the all and only param in the current shard
to_check_param
=
set
(
reversed_x_paramname
)
should_check_param
=
set
(
shard
.
global_params
).
intersection
(
set
([
param
for
param
,
worker_idx
in
shard
.
global_param2device
.
items
()
if
worker_idx
==
shard
.
worker_idx
]))
assert
to_check_param
==
should_check_param
,
"amp check_finite_and_unscale checking miss [{}] and got unexpected [{}]"
.
format
(
should_check_param
-
to_check_param
,
to_check_param
-
should_check_param
)
for
var_name
in
deperated_vars
:
block
.
_remove_var
(
var_name
,
sync
=
False
)
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
100755 → 100644
浏览文件 @
d3105dbf
...
...
@@ -227,14 +227,9 @@ def get_valid_op_role(block, insert_idx):
return OpRole.Forward or OpRole.Backward
"""
op_role
=
block
.
ops
[
insert_idx
].
attr
(
'op_role'
)
#if (insert_idx >= len(block.ops)) or (
# op_role in [int(OpRole.Backward), int(OpRole.Optimize)]):
# return OpRole.Backward
#if op_role in [int(OpRole.Forward), int(OpRole.Loss)]:
# return OpRole.Forward
if
insert_idx
>=
len
(
block
.
ops
):
return
OpRole
.
Optimize
if
op_role
==
int
(
OpRole
.
Backward
):
return
OpRole
.
Backward
if
op_role
==
int
(
OpRole
.
Optimize
):
return
OpRole
.
Optimize
if
(
insert_idx
>=
len
(
block
.
ops
))
or
(
op_role
in
[
int
(
OpRole
.
Backward
),
int
(
OpRole
.
Optimize
)]):
return
OpRole
.
Backward
if
op_role
in
[
int
(
OpRole
.
Forward
),
int
(
OpRole
.
Loss
)]:
return
OpRole
.
Forward
...
...
@@ -485,9 +480,6 @@ def save_persistables(exe, dirname, main_program, filename=None):
This function handles the model saving for sharding training.
"""
if
main_program
.
_pipeline_opt
:
main_program
=
main_program
.
_pipeline_opt
[
'section_program'
][
'program'
]
def
is_opt_vars
(
var
):
# NOTE(liangjianzhong): The checks should be updated when add new compatible optimizer
# now only Momentum and adam are compatible with sharding
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录