Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
efaaf239
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
efaaf239
编写于
5月 18, 2022
作者:
C
caozhou
提交者:
GitHub
5月 18, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add comm cost (#42784)
上级
6e45a0fb
变更
5
展开全部
显示空白变更内容
内联
并排
Showing
5 changed file
with
880 addition
and
6 deletion
+880
-6
python/paddle/distributed/auto_parallel/cost/base_cost.py
python/paddle/distributed/auto_parallel/cost/base_cost.py
+2
-3
python/paddle/distributed/auto_parallel/cost/comp_op_cost.py
python/paddle/distributed/auto_parallel/cost/comp_op_cost.py
+623
-2
python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt
...paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt
+1
-0
python/paddle/fluid/tests/unittests/auto_parallel/test_comm_cost.py
...dle/fluid/tests/unittests/auto_parallel/test_comm_cost.py
+1
-1
python/paddle/fluid/tests/unittests/auto_parallel/test_comp_cost.py
...dle/fluid/tests/unittests/auto_parallel/test_comp_cost.py
+253
-0
未找到文件。
python/paddle/distributed/auto_parallel/cost/base_cost.py
浏览文件 @
efaaf239
...
@@ -153,6 +153,7 @@ class CommContext:
...
@@ -153,6 +153,7 @@ class CommContext:
return
return
self
.
beta
=
{}
self
.
beta
=
{}
self
.
hops
=
{}
self
.
hops
=
{}
assert
cluster
is
not
None
self
.
cluster
=
cluster
self
.
cluster
=
cluster
# if cluster has no info about those vars, it will be set by default
# if cluster has no info about those vars, it will be set by default
self
.
base_ring
=
None
self
.
base_ring
=
None
...
@@ -338,8 +339,6 @@ class Cost:
...
@@ -338,8 +339,6 @@ class Cost:
class
OpCost
:
class
OpCost
:
def
__init__
(
self
,
op
=
None
,
op_desc
=
None
):
def
__init__
(
self
,
op
=
None
,
op_desc
=
None
):
assert
(
op
is
not
None
and
op_desc
is
None
)
or
(
op
is
None
and
op_desc
is
not
None
)
self
.
_op
=
op
self
.
_op
=
op
self
.
_op_desc
=
op_desc
self
.
_op_desc
=
op_desc
self
.
_cost
=
None
self
.
_cost
=
None
...
...
python/paddle/distributed/auto_parallel/cost/comp_op_cost.py
浏览文件 @
efaaf239
此差异已折叠。
点击以展开。
python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt
浏览文件 @
efaaf239
...
@@ -30,4 +30,5 @@ if(WITH_DISTRIBUTE AND WITH_GPU)
...
@@ -30,4 +30,5 @@ if(WITH_DISTRIBUTE AND WITH_GPU)
py_test_modules
(
test_dist_slice MODULES test_dist_slice ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_dist_slice MODULES test_dist_slice ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_cluster MODULES test_cluster ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_cluster MODULES test_cluster ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_comm_cost MODULES test_comm_cost ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_comm_cost MODULES test_comm_cost ENVS
${
dist_ENVS
}
)
py_test_modules
(
test_comp_cost MODULES test_comp_cost ENVS
${
dist_ENVS
}
)
endif
()
endif
()
python/paddle/fluid/tests/unittests/auto_parallel/test_comm_cost.py
浏览文件 @
efaaf239
# Copyright (c) 202
1
PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 202
2
PaddlePaddle Authors. All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
python/paddle/fluid/tests/unittests/auto_parallel/test_comp_cost.py
0 → 100644
浏览文件 @
efaaf239
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
os
import
json
import
paddle
from
paddle.distributed.auto_parallel.cluster
import
Cluster
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
AssignOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
AssignValueOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
BeamSearchOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
BeamSearchDecodeOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
CastOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ConcatOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ElementwiseAddOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ElementwiseAddGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ElementwiseDivOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ElementwiseDivGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ElementwiseMulOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ElementwiseMulGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
ElementwiseSubOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
EmbeddingOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
EmbeddingGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
FillConstantOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
FillConstantBatchSizeLikeOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
FillConstantBatchSizeLikeGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
GatherOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
GeluOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
GeluGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
GreaterEqualOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
IncrementOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
IsEmptyOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LayerNormOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LayerNormGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LessThanOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LogicalNotOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LogicalAndOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LodResetOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LogOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LookupTableV2OpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
LookupTableV2GradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
MatmulOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
MatmulGradOpCost
from
paddle.distributed.auto_parallel.cost.comp_op_cost
import
MatmulV2OpCost
from
test_cluster
import
cluster_json
class
TestCompOpCost
(
unittest
.
TestCase
):
def
test_comp_cost
(
self
):
# Build cluster
file_dir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
cluster_json_path
=
os
.
path
.
join
(
file_dir
,
"auto_parallel_cluster.json"
)
cluster_json_object
=
json
.
loads
(
cluster_json
)
with
open
(
cluster_json_path
,
"w"
)
as
cluster_json_file
:
json
.
dump
(
cluster_json_object
,
cluster_json_file
)
cluster
=
Cluster
()
cluster
.
build_from_file
(
cluster_json_path
)
op_cost
=
AssignOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
AssignValueOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
BeamSearchOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
BeamSearchDecodeOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
CastOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ConcatOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ElementwiseAddOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ElementwiseAddGradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ElementwiseDivOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ElementwiseDivGradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ElementwiseMulOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ElementwiseMulGradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
ElementwiseSubOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
EmbeddingOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
EmbeddingGradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
FillConstantOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
FillConstantBatchSizeLikeOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
FillConstantBatchSizeLikeGradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
GatherOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
GeluOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
GeluGradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
GreaterEqualOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
IncrementOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
IsEmptyOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LayerNormOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LayerNormGradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LessThanOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LogicalNotOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LogicalAndOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LodResetOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LogOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LookupTableV2OpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
LookupTableV2GradOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
MatmulOpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
op_cost
=
MatmulV2OpCost
(
cluster
=
cluster
)
self
.
assertTrue
(
op_cost
.
flops
>=
0
)
self
.
assertTrue
(
op_cost
.
time
>=
0
)
self
.
assertTrue
(
op_cost
.
memory
>=
0
)
# Remove unnecessary files
if
os
.
path
.
exists
(
cluster_json_path
):
os
.
remove
(
cluster_json_path
)
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录