Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f314b562
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f314b562
编写于
5月 23, 2023
作者:
L
liangjianzhong
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'zyc/develop' into semi-auto/rule-base
上级
ecbb1ae6
42a7b771
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
204 addition
and
1 deletion
+204
-1
paddle/fluid/distributed/auto_parallel/CMakeLists.txt
paddle/fluid/distributed/auto_parallel/CMakeLists.txt
+4
-0
paddle/fluid/distributed/auto_parallel/spmd_rules/CMakeLists.txt
...fluid/distributed/auto_parallel/spmd_rules/CMakeLists.txt
+4
-0
paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc
.../distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc
+72
-0
paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.h
...d/distributed/auto_parallel/spmd_rules/dist_tensor_spec.h
+58
-0
paddle/phi/api/lib/CMakeLists.txt
paddle/phi/api/lib/CMakeLists.txt
+2
-1
paddle/phi/api/yaml/generator/api_base.py
paddle/phi/api/yaml/generator/api_base.py
+13
-0
paddle/phi/api/yaml/generator/api_gen.py
paddle/phi/api/yaml/generator/api_gen.py
+2
-0
python/paddle/distributed/auto_parallel/utils.py
python/paddle/distributed/auto_parallel/utils.py
+49
-0
未找到文件。
paddle/fluid/distributed/auto_parallel/CMakeLists.txt
浏览文件 @
f314b562
...
...
@@ -5,3 +5,7 @@ cc_library(
phi_enforce
)
add_subdirectory
(
test
)
add_subdirectory
(
spmd_rules
)
cc_library
(
auto_parallel DEPS device_mesh process_mesh dist_attr dist_mapper
dist_tensor_spec
)
paddle/fluid/distributed/auto_parallel/spmd_rules/CMakeLists.txt
0 → 100644
浏览文件 @
f314b562
cc_library
(
dist_tensor_spec
SRCS dist_tensor_spec.cc
DEPS dist_attr
)
paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc
0 → 100644
浏览文件 @
f314b562
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.h"
#include "paddle/fluid/distributed/auto_parallel/process_mesh.h"
namespace
paddle
{
namespace
distributed
{
namespace
auto_parallel
{
DistTensorSpec
::
DistTensorSpec
(
const
std
::
vector
<
int64_t
>&
shape
,
const
TensorDistAttr
&
dist_attr
)
{
shape_
.
assign
(
shape
.
begin
(),
shape
.
end
());
// we should merge the new distributed attributes with the original one
// after inferencing, thus we get a copy of the original one
dist_attr_
.
copy_from
(
dist_attr
);
}
DistTensorSpec
::~
DistTensorSpec
()
{}
DistTensorSpec
::
DistTensorSpec
(
const
Tensor
&
tensor
)
{
shape_
=
tensor
.
shape
();
std
::
vector
<
int64_t
>
pm_shape
,
pm_ids
;
pm_shape
=
{
4
};
pm_ids
=
{
0
,
1
,
2
,
3
};
std
::
vector
<
std
::
string
>
dim_name
=
{
"mp"
};
ProcessMesh
pm
(
pm_shape
,
pm_ids
,
dim_name
);
std
::
vector
<
int64_t
>
dims_mapping
=
{
-
1
,
0
};
TensorDistAttr
dist_attr
;
dist_attr
.
set_process_mesh
(
pm
);
dist_attr
.
set_dims_mapping
(
dims_mapping
);
dist_attr_
.
copy_from
(
dist_attr
);
std
::
cout
<<
dist_attr_
;
}
const
std
::
vector
<
int64_t
>&
DistTensorSpec
::
get_dims_mapping
()
{
return
dist_attr_
.
dims_mapping
();
}
void
DistTensorSpec
::
set_dims_mapping
(
const
std
::
vector
<
int64_t
>&
dims_mapping
)
{
dist_attr_
.
set_dims_mapping
(
dims_mapping
);
}
const
ProcessMesh
&
DistTensorSpec
::
get_process_mesh
()
{
return
dist_attr_
.
process_mesh
();
}
void
DistTensorSpec
::
set_process_mesh
(
const
ProcessMesh
&
process_mesh
)
{
dist_attr_
.
set_process_mesh
(
process_mesh
);
}
const
std
::
vector
<
int64_t
>&
DistTensorSpec
::
get_shape
()
{
return
shape_
;
}
}
// namespace auto_parallel
}
// namespace distributed
}
// namespace paddle
paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.h
0 → 100644
浏览文件 @
f314b562
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/distributed/auto_parallel/dist_attr.h"
#include "paddle/phi/api/include/tensor.h"
namespace
paddle
{
namespace
distributed
{
namespace
auto_parallel
{
/**
* A unified data class for inferring distributed attributes
* in both dygraph mode and static mode
*/
class
DistTensorSpec
{
public:
DistTensorSpec
(
const
std
::
vector
<
int64_t
>&
shape
,
const
TensorDistAttr
&
dist_attr
);
explicit
DistTensorSpec
(
const
Tensor
&
tensor
);
~
DistTensorSpec
();
// get dims_mapping from dist_attr_
const
std
::
vector
<
int64_t
>&
get_dims_mapping
();
// set dims_mapping in dist_attr_
void
set_dims_mapping
(
const
std
::
vector
<
int64_t
>&
dims_mapping
);
// get process_mesh from dist_attr_
const
ProcessMesh
&
get_process_mesh
();
// set process_mesh in dist_attr_
void
set_process_mesh
(
const
ProcessMesh
&
process_mesh
);
const
std
::
vector
<
int64_t
>&
get_shape
();
private:
std
::
vector
<
int64_t
>
shape_
;
// distributed attributes of the corresponding tensor
TensorDistAttr
dist_attr_
;
};
}
// namespace auto_parallel
}
// namespace distributed
}
// namespace paddle
paddle/phi/api/lib/CMakeLists.txt
浏览文件 @
f314b562
...
...
@@ -395,7 +395,8 @@ cc_library(
phi_data_transform
api_custom_impl
api_tensor_utils
phi_profiler
)
phi_profiler
dist_tensor_spec
)
cc_library
(
phi_bw_function_api
SRCS
${
bw_api_source_file
}
${
fused_bw_api_source_file
}
...
...
paddle/phi/api/yaml/generator/api_base.py
浏览文件 @
f314b562
...
...
@@ -1278,6 +1278,17 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
}}
"""
def
gen_dist_tensor_code
(
self
):
# define the DistTensorSpec vector for input and output tensors
api_code
=
"
\n
std::vector<paddle::distributed::auto_parallel::DistTensorSpec> input_specs;
\n
"
# get DistTensorSpec for each input tensor
for
tensor_name
in
self
.
inputs
[
'names'
]:
api_code
+=
f
" input_specs.emplace_back(paddle::distributed::auto_parallel::DistTensorSpec(
{
tensor_name
}
));
\n
"
api_code
+=
"
\n
"
return
api_code
def
gene_base_api_code
(
self
,
inplace_flag
=
False
):
api_func_name
=
self
.
get_api_func_name
()
if
inplace_flag
and
api_func_name
[
-
1
]
!=
'_'
:
...
...
@@ -1286,6 +1297,8 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
PADDLE_API
{
self
.
get_return_type
(
inplace_flag
)
}
{
api_func_name
}
(
{
self
.
get_define_args
(
inplace_flag
)
}
) {{
{
self
.
gene_kernel_select
()
}
"""
if
api_func_name
==
'matmul'
:
api_code
+=
self
.
gen_dist_tensor_code
()
if
len
(
self
.
kernel
[
'func'
])
>
1
:
kernel_dispatch_code
=
''
...
...
paddle/phi/api/yaml/generator/api_gen.py
浏览文件 @
f314b562
...
...
@@ -379,6 +379,8 @@ def source_include(header_file_path):
#include "paddle/phi/api/profiler/event_tracing.h"
#include "paddle/phi/api/profiler/supplement_tracing.h"
#include "paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.h"
DECLARE_bool(conv2d_disable_cudnn);
DECLARE_int32(low_precision_op_list);
"""
...
...
python/paddle/distributed/auto_parallel/utils.py
浏览文件 @
f314b562
...
...
@@ -2355,3 +2355,52 @@ def is_dep_skip_op(op):
return
True
return
False
# def wrap_data_for_completion(
# dist_op: DistributedOperator,
# input_names: list,
# output_names: list,
# attr_names: list
# ):
# """
# Get data used in inferring distributed attributes, including:
# 1. DistTensorSpec for each input and output tensor of this dist_op.
# 2. Operator attributes of this dist_op, e.g. transpose_x in matmul op.
#
# Args:
# dist_op: the DistributedOperator
# input_names: list, name of the dist_op's input tensors
# output_names: list, name of the dist_op's output tensors
# attr_names: list, attribute name of the dist_op's corresponding serial op
#
# Returns:
# input_specs: list, DistTensorSpec for each input tensor of the dist_op
# output_specs: list, DistTensorSpec for each output tensor of the dist_op
# attrs: dict, attribute map of the dist op
# """
#
# input_specs = []
# output_specs = []
# attrs = {}
#
# serial_op = dist_op.serial_op
#
# # Construct each input tensor's DistTensorSpec with shape and dist_attr
# for name in input_names:
# tensor_dist_attr = dist_op.dist_attr.get_input_dist_attr(name)
# var = serial_op.block._var_recursive(name)
# tensor_shape = var.shape
# dist_spec = DistTensorSpec(tensor_shape, tensor_dist_attr)
# input_specs.append(dist_spec)
#
# # Construct each output tensor's DistTensorSpec with shape and dist_attr
# for name in output_names:
# tensor_dist_attr = dist_op.dist_attr.get_output_dist_attr(name)
# var = serial_op.block._var_recursive(name)
# tensor_shape = var.shape
# dist_spec = DistTensorSpec(tensor_shape, tensor_dist_attr)
# output_specs.append(dist_spec)
#
# for attr_name in attr_names:
# attrs[attr_name] = serial_op.desc.attr(attr_name)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录