Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
6bdf1261
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6bdf1261
编写于
11月 24, 2022
作者:
Z
zhaoyingli
提交者:
GitHub
11月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[AutoParallel] dist_scale (#48295)
上级
4f975b41
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
163 addition
and
0 deletion
+163
-0
python/paddle/distributed/auto_parallel/operators/__init__.py
...on/paddle/distributed/auto_parallel/operators/__init__.py
+1
-0
python/paddle/distributed/auto_parallel/operators/dist_scale.py
.../paddle/distributed/auto_parallel/operators/dist_scale.py
+88
-0
python/paddle/fluid/tests/unittests/auto_parallel/test_dist_scale.py
...le/fluid/tests/unittests/auto_parallel/test_dist_scale.py
+74
-0
未找到文件。
python/paddle/distributed/auto_parallel/operators/__init__.py
浏览文件 @
6bdf1261
...
...
@@ -35,3 +35,4 @@ from . import dist_fused_attention
from
.
import
dist_reduce_sum_p
from
.
import
dist_shape
from
.
import
dist_assign
from
.
import
dist_scale
python/paddle/distributed/auto_parallel/operators/dist_scale.py
0 → 100644
浏览文件 @
6bdf1261
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.common
import
DistributedOperatorImplContainer
from
.common
import
DistributedOperatorImpl
from
.common
import
register_distributed_operator_impl_container
from
.common
import
register_distributed_operator_impl
from
.dist_default
import
DistributedDefaultImpl0
from
..utils
import
compute_compatible_and_update_dim_mapping
class
DistributedScale
(
DistributedOperatorImplContainer
):
def
__init__
(
self
,
op_type
):
super
().
__init__
(
op_type
)
register_distributed_operator_impl_container
(
DistributedScale
(
"scale"
))
class
DistributedScaleImpl
(
DistributedOperatorImpl
):
def
__init__
(
self
,
name
):
super
().
__init__
(
name
)
self
.
_forward_implemented
=
True
self
.
_backward_implemented
=
True
def
is_input_compatible
(
self
,
dist_op
):
return
True
def
is_output_compatible
(
self
,
dist_op
):
return
True
def
is_auto_compatible
(
self
,
dist_op
):
if
(
not
self
.
is_input_compatible
(
dist_op
))
or
(
not
self
.
is_output_compatible
(
dist_op
)
):
return
False
op_desc
=
dist_op
.
serial_op
.
desc
op_dist_attr
=
dist_op
.
dist_attr
x_name
=
op_desc
.
input
(
'X'
)[
0
]
out_name
=
op_desc
.
output
(
'Out'
)[
0
]
x_dims_mapping
=
op_dist_attr
.
get_input_dims_mapping
(
x_name
)
out_dims_mapping
=
op_dist_attr
.
get_output_dims_mapping
(
out_name
)
if
x_dims_mapping
!=
out_dims_mapping
:
return
False
return
True
def
update_dims_mapping
(
self
,
dist_op
):
changed
=
False
op_desc
=
dist_op
.
serial_op
.
desc
op_dist_attr
=
dist_op
.
dist_attr
x_name
=
op_desc
.
input
(
'X'
)[
0
]
out_name
=
op_desc
.
output
(
'Out'
)[
0
]
x_dims_mapping
=
op_dist_attr
.
get_input_dims_mapping
(
x_name
)
out_dims_mapping
=
op_dist_attr
.
get_output_dims_mapping
(
out_name
)
for
i
in
range
(
len
(
x_dims_mapping
)):
dim_changed
=
compute_compatible_and_update_dim_mapping
(
[
x_dims_mapping
,
out_dims_mapping
],
[
i
,
i
]
)
if
dim_changed
:
changed
=
True
return
changed
@
staticmethod
def
forward
(
ctx
,
*
args
,
**
kwargs
):
DistributedDefaultImpl0
.
forward
(
ctx
,
*
args
,
**
kwargs
)
@
staticmethod
def
backward
(
ctx
,
*
args
,
**
kwargs
):
DistributedDefaultImpl0
.
backward
(
ctx
,
*
args
,
**
kwargs
)
register_distributed_operator_impl
(
"scale"
,
DistributedScaleImpl
(
"scale"
))
python/paddle/fluid/tests/unittests/auto_parallel/test_dist_scale.py
0 → 100644
浏览文件 @
6bdf1261
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
paddle
from
paddle.distributed.fleet
import
auto
paddle
.
enable_static
()
def
make_program
():
main_program
=
paddle
.
fluid
.
Program
()
start_program
=
paddle
.
fluid
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
start_program
):
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
4
,
4
,
8
],
dtype
=
'float32'
)
x
.
stop_gradient
=
False
auto
.
shard_tensor
(
x
,
auto
.
ProcessMesh
([
0
,
1
],
dim_names
=
[
"x"
]),
[
None
,
"x"
,
None
]
)
res
=
paddle
.
scale
(
x
,
scale
=
2.0
,
bias
=
1.0
)
return
main_program
,
start_program
def
parallelizer
(
program_func
,
rank
):
from
paddle.distributed.auto_parallel.completion
import
Completer
from
paddle.distributed.auto_parallel.partitioner
import
Partitioner
from
paddle.distributed.auto_parallel.dist_context
import
DistributedContext
main_program
,
start_program
=
program_func
()
dist_context
=
DistributedContext
()
completer
=
Completer
(
dist_context
)
completer
.
complete_forward_annotation
(
main_program
)
dist_context
.
block_state
.
parse_forward_blocks
(
main_program
)
partitioner
=
Partitioner
(
dist_context
,
rank
)
dist_main_prog
,
_
,
_
=
partitioner
.
partition
(
main_program
,
start_program
,
[]
)
return
dist_main_prog
,
dist_context
class
TestDistScale
(
unittest
.
TestCase
):
def
test_dist_scale
(
self
):
dist_main_prog
,
dist_context
=
parallelizer
(
make_program
,
0
)
ops
=
dist_main_prog
.
global_block
().
ops
scale_op
=
ops
[
0
]
dist_op
=
dist_context
.
get_dist_op_for_program
(
scale_op
)
dist_op
.
dist_attr
.
impl_type
==
"scale"
dist_op
.
dist_attr
.
impl_idx
==
0
in_name
=
scale_op
.
input_arg_names
[
0
]
out_name
=
scale_op
.
output_arg_names
[
0
]
in_dims_mapping
=
dist_op
.
dist_attr
.
get_input_dims_mapping
(
in_name
)
out_dims_mapping
=
dist_op
.
dist_attr
.
get_output_dims_mapping
(
out_name
)
assert
in_dims_mapping
==
out_dims_mapping
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录