Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
e8e47581
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e8e47581
编写于
12月 27, 2021
作者:
S
ShenLiang
提交者:
GitHub
12月 27, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[BugFix]Fix bug in pfp16 in DataParallel (#38378)
* fix bug in pfp16 * fix hip * fix hip
上级
9cfdae91
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
52 addition
and
2 deletion
+52
-2
paddle/fluid/framework/data_type.h
paddle/fluid/framework/data_type.h
+30
-0
paddle/fluid/imperative/reducer.cc
paddle/fluid/imperative/reducer.cc
+11
-1
paddle/fluid/imperative/reducer.cu
paddle/fluid/imperative/reducer.cu
+11
-1
未找到文件。
paddle/fluid/framework/data_type.h
浏览文件 @
e8e47581
...
@@ -89,6 +89,22 @@ struct DataTypeTrait<void> {
...
@@ -89,6 +89,22 @@ struct DataTypeTrait<void> {
_ForEachDataTypeHelper_(callback, int, INT32); \
_ForEachDataTypeHelper_(callback, int, INT32); \
_ForEachDataTypeHelper_(callback, int64_t, INT64);
_ForEachDataTypeHelper_(callback, int64_t, INT64);
// It's only for DataParallel in HIP, bf16 not support in HIP.
#define _ForEachDataTypeForHIP_(callback) \
_ForEachDataTypeHelper_(callback, float, FP32); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::float16, FP16); \
_ForEachDataTypeHelper_(callback, double, FP64); \
_ForEachDataTypeHelper_(callback, int, INT32); \
_ForEachDataTypeHelper_(callback, int64_t, INT64); \
_ForEachDataTypeHelper_(callback, bool, BOOL); \
_ForEachDataTypeHelper_(callback, uint8_t, UINT8); \
_ForEachDataTypeHelper_(callback, int16_t, INT16); \
_ForEachDataTypeHelper_(callback, int8_t, INT8); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::complex<float>, \
COMPLEX64); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::complex<double>, \
COMPLEX128);
#define DefineDataTypeTrait(cpp_type, proto_type) \
#define DefineDataTypeTrait(cpp_type, proto_type) \
template <> \
template <> \
struct DataTypeTrait<cpp_type> { \
struct DataTypeTrait<cpp_type> { \
...
@@ -147,6 +163,20 @@ inline void VisitDataTypeTiny(proto::VarType::Type type, Visitor visitor) {
...
@@ -147,6 +163,20 @@ inline void VisitDataTypeTiny(proto::VarType::Type type, Visitor visitor) {
#undef VisitDataTypeCallbackTiny
#undef VisitDataTypeCallbackTiny
}
}
template
<
typename
Visitor
>
inline
void
VisitDataTypeForHIP
(
proto
::
VarType
::
Type
type
,
Visitor
visitor
)
{
#define VisitDataTypeCallbackHIP(cpp_type, proto_type) \
do { \
if (type == proto_type) { \
visitor.template apply<cpp_type>(); \
return; \
} \
} while (0)
_ForEachDataTypeForHIP_
(
VisitDataTypeCallbackHIP
);
#undef VisitDataTypeCallbackHIP
}
extern
std
::
string
DataTypeToString
(
const
proto
::
VarType
::
Type
type
);
extern
std
::
string
DataTypeToString
(
const
proto
::
VarType
::
Type
type
);
extern
size_t
SizeOfType
(
proto
::
VarType
::
Type
type
);
extern
size_t
SizeOfType
(
proto
::
VarType
::
Type
type
);
inline
std
::
ostream
&
operator
<<
(
std
::
ostream
&
out
,
inline
std
::
ostream
&
operator
<<
(
std
::
ostream
&
out
,
...
...
paddle/fluid/imperative/reducer.cc
浏览文件 @
e8e47581
...
@@ -48,9 +48,19 @@ void Group::DivNRanks(const platform::DeviceContext &context, int64_t nranks) {
...
@@ -48,9 +48,19 @@ void Group::DivNRanks(const platform::DeviceContext &context, int64_t nranks) {
}
else
if
(
platform
::
is_cpu_place
(
tensor
->
place
()))
{
}
else
if
(
platform
::
is_cpu_place
(
tensor
->
place
()))
{
VLOG
(
4
)
<<
"before div 2"
<<
*
tensor
;
VLOG
(
4
)
<<
"before div 2"
<<
*
tensor
;
VLOG
(
4
)
<<
"NDiv for cpu devices : rank = "
<<
nranks
;
VLOG
(
4
)
<<
"NDiv for cpu devices : rank = "
<<
nranks
;
framework
::
VisitDataTypeSmall
(
#ifdef PADDLE_WITH_HIP
if
(
dtype_
==
paddle
::
framework
::
proto
::
VarType_Type_BF16
)
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
Fatal
(
"Unsupport BF16 in DataParallel for now"
));
}
framework
::
VisitDataTypeForHIP
(
dtype_
,
DivNRanksForAllReduce
<
platform
::
CPUDeviceContext
>
(
dtype_
,
DivNRanksForAllReduce
<
platform
::
CPUDeviceContext
>
(
tensor
,
nranks
,
context
));
tensor
,
nranks
,
context
));
#else
framework
::
VisitDataType
(
dtype_
,
DivNRanksForAllReduce
<
platform
::
CPUDeviceContext
>
(
tensor
,
nranks
,
context
));
#endif
VLOG
(
4
)
<<
"after div 2"
<<
*
tensor
;
VLOG
(
4
)
<<
"after div 2"
<<
*
tensor
;
}
else
if
(
platform
::
is_xpu_place
(
tensor
->
place
()))
{
}
else
if
(
platform
::
is_xpu_place
(
tensor
->
place
()))
{
#ifdef PADDLE_WITH_XPU_BKCL
#ifdef PADDLE_WITH_XPU_BKCL
...
...
paddle/fluid/imperative/reducer.cu
浏览文件 @
e8e47581
...
@@ -20,9 +20,19 @@ namespace imperative {
...
@@ -20,9 +20,19 @@ namespace imperative {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
void
Group
::
DivNRanks
(
framework
::
Tensor
*
tensor
,
int64_t
nranks
,
void
Group
::
DivNRanks
(
framework
::
Tensor
*
tensor
,
int64_t
nranks
,
const
platform
::
DeviceContext
&
context
)
{
const
platform
::
DeviceContext
&
context
)
{
framework
::
VisitDataTypeSmall
(
#ifdef PADDLE_WITH_HIP
if
(
dtype_
==
paddle
::
framework
::
proto
::
VarType_Type_BF16
)
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
Fatal
(
"Unsupport BF16 in DataParallel for now"
));
}
framework
::
VisitDataTypeForHIP
(
dtype_
,
DivNRanksForAllReduce
<
platform
::
CUDADeviceContext
>
(
tensor
,
nranks
,
dtype_
,
DivNRanksForAllReduce
<
platform
::
CUDADeviceContext
>
(
tensor
,
nranks
,
context
));
context
));
#else
framework
::
VisitDataType
(
dtype_
,
DivNRanksForAllReduce
<
platform
::
CUDADeviceContext
>
(
tensor
,
nranks
,
context
));
#endif
}
}
#endif
#endif
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录