Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a3790606
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a3790606
编写于
9月 16, 2021
作者:
L
lilong12
提交者:
GitHub
9月 16, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove distributed attributes at the last stage for auto parallel (#35605)
* update
上级
2c70b844
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
22 addition
and
0 deletion
+22
-0
python/paddle/distributed/auto_parallel/parallelizer.py
python/paddle/distributed/auto_parallel/parallelizer.py
+15
-0
python/paddle/fluid/tests/unittests/test_auto_parallel_parallelizer.py
.../fluid/tests/unittests/test_auto_parallel_parallelizer.py
+7
-0
未找到文件。
python/paddle/distributed/auto_parallel/parallelizer.py
浏览文件 @
a3790606
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
import
paddle
import
paddle
from
paddle.distributed.fleet
import
cloud_utils
from
paddle.distributed.fleet
import
cloud_utils
import
paddle.fluid.core
as
core
from
.context
import
DistributedContext
from
.context
import
DistributedContext
from
.context
import
get_default_distributed_context
from
.context
import
get_default_distributed_context
from
.completion
import
complete_annotation
from
.completion
import
complete_annotation
...
@@ -38,6 +39,16 @@ class AutoParallelizer:
...
@@ -38,6 +39,16 @@ class AutoParallelizer:
# self._dist_context = DistributedContext()
# self._dist_context = DistributedContext()
self
.
_dist_context
=
get_default_distributed_context
()
self
.
_dist_context
=
get_default_distributed_context
()
def
_remove_distributed_attrs
(
self
,
main_program
):
suffix
=
core
.
kAutoParallelSuffix
()
# distributed attributes for variable have been removed
# in previous process.
for
block
in
main_program
.
blocks
:
for
op
in
block
.
ops
:
for
attr_name
in
op
.
attr_names
:
if
suffix
in
attr_name
:
op
.
_remove_attr
(
attr_name
)
def
parallelize
(
self
,
def
parallelize
(
self
,
loss
,
loss
,
startup_program
=
None
,
startup_program
=
None
,
...
@@ -76,4 +87,8 @@ class AutoParallelizer:
...
@@ -76,4 +87,8 @@ class AutoParallelizer:
for
process_group
in
all_process_groups
:
for
process_group
in
all_process_groups
:
process_group
.
instantiate
()
process_group
.
instantiate
()
# The last step: remove all distributed attributes to be compatiable
# with inference.
self
.
_remove_distributed_attrs
(
partitioned_main_prog
)
return
dist_optimize_ops
,
dist_params_grads
,
partitioned_startup_prog
,
partitioned_main_prog
return
dist_optimize_ops
,
dist_params_grads
,
partitioned_startup_prog
,
partitioned_main_prog
python/paddle/fluid/tests/unittests/test_auto_parallel_parallelizer.py
浏览文件 @
a3790606
...
@@ -30,6 +30,7 @@ from paddle.fluid import layers
...
@@ -30,6 +30,7 @@ from paddle.fluid import layers
from
paddle.distributed
import
fleet
from
paddle.distributed
import
fleet
import
paddle.distributed.auto_parallel
as
auto
import
paddle.distributed.auto_parallel
as
auto
from
paddle.distributed.auto_parallel.utils
import
print_program_with_distributed_attr
from
paddle.distributed.auto_parallel.utils
import
print_program_with_distributed_attr
import
paddle.fluid.core
as
core
paddle
.
enable_static
()
paddle
.
enable_static
()
_global_parallel_strategy
=
None
_global_parallel_strategy
=
None
...
@@ -83,6 +84,7 @@ def mlp_pretrain_forward(train_program, start_program):
...
@@ -83,6 +84,7 @@ def mlp_pretrain_forward(train_program, start_program):
name
=
"label"
,
shape
=
[
batch_size
,
sequence_len
,
1
],
dtype
=
'float32'
)
name
=
"label"
,
shape
=
[
batch_size
,
sequence_len
,
1
],
dtype
=
'float32'
)
auto
.
shard_tensor
(
input
,
_global_process_mesh
,
dim_mapping
=
[
-
1
,
-
1
,
-
1
])
auto
.
shard_tensor
(
input
,
_global_process_mesh
,
dim_mapping
=
[
-
1
,
-
1
,
-
1
])
auto
.
set_pipeline_stage
(
1
)
mlp
=
MLPLayer
(
mlp
=
MLPLayer
(
hidden_size
=
hidden_size
,
hidden_size
=
hidden_size
,
...
@@ -129,6 +131,11 @@ class TestMLPAutoParallelizer(unittest.TestCase):
...
@@ -129,6 +131,11 @@ class TestMLPAutoParallelizer(unittest.TestCase):
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
)
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
)
_
,
_
,
distributed_startup_program
,
distributed_main_program
=
optimizer
.
minimize
(
_
,
_
,
distributed_startup_program
,
distributed_main_program
=
optimizer
.
minimize
(
loss
,
start_program
)
loss
,
start_program
)
suffix
=
core
.
kAutoParallelSuffix
()
for
block
in
distributed_main_program
.
blocks
:
for
op
in
block
.
ops
:
for
attr_name
in
op
.
attr_names
:
self
.
assertTrue
(
suffix
not
in
attr_name
)
# print_program_with_distributed_attr(distributed_main_program)
# print_program_with_distributed_attr(distributed_main_program)
self
.
assertIsNotNone
(
distributed_startup_program
)
self
.
assertIsNotNone
(
distributed_startup_program
)
self
.
assertIsNotNone
(
distributed_main_program
)
self
.
assertIsNotNone
(
distributed_main_program
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录