Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
c17e6af8
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c17e6af8
编写于
8月 16, 2022
作者:
H
Haohongxiang
提交者:
GitHub
8月 16, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Fleet] Reconstruct of Fleet API in Dygraph Mode (#44922)
* reconstruct_of_fleet_api * update
上级
6452ab3b
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
27 addition
and
18 deletion
+27
-18
python/paddle/distributed/fleet/meta_parallel/parallel_layers/mp_layers.py
...tributed/fleet/meta_parallel/parallel_layers/mp_layers.py
+22
-16
python/paddle/distributed/fleet/utils/hybrid_parallel_util.py
...on/paddle/distributed/fleet/utils/hybrid_parallel_util.py
+5
-2
未找到文件。
python/paddle/distributed/fleet/meta_parallel/parallel_layers/mp_layers.py
浏览文件 @
c17e6af8
...
...
@@ -41,14 +41,16 @@ class VocabParallelEmbedding(Layer):
num_embeddings
,
embedding_dim
,
weight_attr
=
None
,
mp_group
=
None
,
name
=
None
):
super
(
VocabParallelEmbedding
,
self
).
__init__
()
self
.
model_parallel_group
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_group
(
)
)
if
mp_group
is
None
else
mp_group
self
.
world_size
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_world_size
(
)
self
.
rank
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_rank
()
)
if
mp_group
is
None
else
mp_group
.
nranks
self
.
rank
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_rank
(
)
if
mp_group
is
None
else
mp_group
.
rank
self
.
origin_num_embeddings
=
num_embeddings
self
.
is_mp
=
(
self
.
world_size
>
1
)
...
...
@@ -108,14 +110,15 @@ class ColumnParallelLinear(Layer):
weight_attr
=
None
,
has_bias
=
None
,
gather_output
=
True
,
name
=
None
,
fuse_matmul_bias
=
False
):
fuse_matmul_bias
=
False
,
mp_group
=
None
,
name
=
None
):
super
(
ColumnParallelLinear
,
self
).
__init__
()
self
.
model_parallel_group
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_group
(
)
)
if
mp_group
is
None
else
mp_group
self
.
world_size
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_world_size
(
)
)
if
mp_group
is
None
else
mp_group
.
nranks
self
.
_name
=
name
self
.
is_mp
=
(
self
.
world_size
>
1
)
...
...
@@ -197,8 +200,9 @@ class RowParallelLinear(Layer):
weight_attr
=
None
,
has_bias
=
True
,
input_is_parallel
=
False
,
name
=
None
,
fuse_matmul_bias
=
False
):
fuse_matmul_bias
=
False
,
mp_group
=
None
,
name
=
None
):
super
(
RowParallelLinear
,
self
).
__init__
()
self
.
in_features
=
in_features
...
...
@@ -209,10 +213,11 @@ class RowParallelLinear(Layer):
self
.
_name
=
name
self
.
model_parallel_group
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_group
(
)
)
if
mp_group
is
None
else
mp_group
self
.
world_size
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_world_size
(
)
self
.
rank
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_rank
()
)
if
mp_group
is
None
else
mp_group
.
nranks
self
.
rank
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_rank
(
)
if
mp_group
is
None
else
mp_group
.
rank
self
.
is_mp
=
(
self
.
world_size
>
1
)
assert
in_features
%
self
.
world_size
==
0
,
(
...
...
@@ -288,14 +293,15 @@ class RowParallelLinear(Layer):
class
ParallelCrossEntropy
(
Layer
):
def
__init__
(
self
,
name
=
None
):
def
__init__
(
self
,
mp_group
=
None
,
name
=
None
):
super
(
ParallelCrossEntropy
,
self
).
__init__
()
self
.
name
=
name
self
.
model_parallel_group
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_group
(
)
)
if
mp_group
is
None
else
mp_group
self
.
world_size
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_world_size
(
)
self
.
rank
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_rank
()
)
if
mp_group
is
None
else
mp_group
.
nranks
self
.
rank
=
tp
.
_HYBRID_PARALLEL_GROUP
.
get_model_parallel_rank
(
)
if
mp_group
is
None
else
mp_group
.
rank
def
forward
(
self
,
input
,
label
):
loss
=
paddle
.
distributed
.
collective
.
_c_softmax_with_cross_entropy
(
...
...
python/paddle/distributed/fleet/utils/hybrid_parallel_util.py
浏览文件 @
c17e6af8
...
...
@@ -109,16 +109,19 @@ def _broadcast_data_help(data, shape, dtype, hcg):
def
broadcast_input_data
(
hcg
,
*
inputs
,
**
kwargs
):
cur_device
=
paddle
.
get_device
()
for
v
in
inputs
:
if
isinstance
(
v
,
core
.
VarBase
):
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
with
framework
.
no_grad
():
v
=
v
.
cuda
()
if
"gpu"
in
cur_device
else
v
_broadcast_data_help
(
v
,
v
.
shape
,
v
.
dtype
,
hcg
)
else
:
logger
.
error
(
"it doesn't support data type {}"
.
format
(
type
(
v
)))
for
k
,
v
in
kwargs
.
items
():
if
isinstance
(
v
,
core
.
VarBase
):
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
with
framework
.
no_grad
():
v
=
v
.
cuda
()
if
"gpu"
in
cur_device
else
v
_broadcast_data_help
(
v
,
v
.
shape
,
v
.
dtype
,
hcg
)
kwargs
[
k
]
=
v
else
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录