Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
e87f65c3
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e87f65c3
编写于
9月 28, 2022
作者:
Z
zhaoyingli
提交者:
GitHub
9月 28, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[AutoParallel] fix dist_split (#46505)
* [AutoParallel] fix dist_split * add unittest * update cmakelist
上级
e12a905e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
76 addition
and
2 deletion
+76
-2
python/paddle/distributed/auto_parallel/operators/dist_split.py
.../paddle/distributed/auto_parallel/operators/dist_split.py
+6
-2
python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt
...paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt
+1
-0
python/paddle/fluid/tests/unittests/auto_parallel/test_dist_split.py
...le/fluid/tests/unittests/auto_parallel/test_dist_split.py
+69
-0
未找到文件。
python/paddle/distributed/auto_parallel/operators/dist_split.py
浏览文件 @
e87f65c3
...
...
@@ -101,8 +101,12 @@ class DistributedSplitImpl(DistributedOperatorImpl):
return
changed
def
is_auto_compatible
(
self
,
dist_op
):
raise
NotImplementedError
(
"Auto Search is not supported by dist split yet."
)
if
(
not
self
.
is_input_compatible
(
dist_op
))
or
\
(
not
self
.
is_output_compatible
(
dist_op
))
or
\
(
not
self
.
is_compatible
(
dist_op
)):
return
False
return
True
@
staticmethod
def
forward
(
ctx
,
*
args
,
**
kwargs
):
...
...
python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt
浏览文件 @
e87f65c3
...
...
@@ -78,6 +78,7 @@ if(WITH_DISTRIBUTE AND WITH_GPU)
py_test_modules
(
test_dist_embedding MODULES test_dist_embedding ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_dist_slice MODULES test_dist_slice ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_dist_split MODULES test_dist_split ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_cluster MODULES test_cluster ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_comm_cost MODULES test_comm_cost ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_comp_cost MODULES test_comp_cost ENVS
${
dist_ENVS
}
)
...
...
python/paddle/fluid/tests/unittests/auto_parallel/test_dist_split.py
0 → 100644
浏览文件 @
e87f65c3
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
paddle
from
paddle.distributed.fleet
import
auto
from
paddle.fluid
import
program_guard
from
paddle.distributed.auto_parallel.utils
import
print_program_with_dist_attr
paddle
.
enable_static
()
def
make_program_dp2
():
main_program
=
paddle
.
fluid
.
Program
()
start_program
=
paddle
.
fluid
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
start_program
):
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
4
,
12
,
16
],
dtype
=
'float32'
)
x
.
stop_gradient
=
False
auto
.
shard_tensor
(
x
,
auto
.
ProcessMesh
([
0
,
1
],
dim_names
=
[
"x"
]),
[
"x"
,
None
,
None
])
out0
,
out1
,
out2
=
paddle
.
split
(
x
,
num_or_sections
=
3
,
axis
=
1
)
return
main_program
,
start_program
def
parallelizer
(
program_func
,
rank
):
from
paddle.distributed.auto_parallel.completion
import
Completer
from
paddle.distributed.auto_parallel.partitioner
import
Partitioner
from
paddle.distributed.auto_parallel.dist_context
import
DistributedContext
main_program
,
start_program
=
program_func
()
dist_context
=
DistributedContext
()
completer
=
Completer
(
dist_context
)
completer
.
complete_forward_annotation
(
main_program
)
dist_context
.
block_state
.
parse_forward_blocks
(
main_program
)
partitioner
=
Partitioner
(
dist_context
,
rank
)
dist_main_prog
,
_
,
_
=
partitioner
.
partition
(
main_program
,
start_program
,
[])
return
dist_main_prog
,
dist_context
class
TestDistSplit
(
unittest
.
TestCase
):
def
test_dist_split_dp2
(
self
):
for
rank
in
range
(
2
):
dist_main_prog
,
dist_context
=
parallelizer
(
make_program_dp2
,
rank
)
ops
=
dist_main_prog
.
global_block
().
ops
op_dist_attr
=
dist_context
.
get_op_dist_attr_for_program
(
ops
[
0
])
assert
op_dist_attr
.
impl_type
==
"split"
assert
op_dist_attr
.
impl_idx
==
0
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录