Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
6833ecfe
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6833ecfe
编写于
9月 14, 2022
作者:
S
sneaxiy
提交者:
GitHub
9月 14, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix DistributedFusedLAMB NaN problem (#46011)
* fix distributed_fused_lamb nan * remove CUDA_ASSERT
上级
65dd828e
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
59 addition
and
10 deletion
+59
-10
paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cu
...e/fluid/operators/optimizers/distributed_fused_lamb_op.cu
+59
-10
未找到文件。
paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cu
浏览文件 @
6833ecfe
...
...
@@ -1193,6 +1193,38 @@ static void PrintAllMinMaxRange(const framework::ExecutionContext &ctx,
}
}
template
<
typename
T
>
static
bool
HasNanInf
(
const
phi
::
GPUContext
&
dev_ctx
,
const
T
*
x
,
int
numel
)
{
if
(
numel
<=
0
)
return
false
;
cub
::
TransformInputIterator
<
bool
,
IsNanInfFunctor
<
T
>
,
const
T
*>
iter
(
x
,
IsNanInfFunctor
<
T
>
());
memory
::
Buffer
buffer
(
dev_ctx
.
GetPlace
());
memory
::
Buffer
out
(
dev_ctx
.
GetPlace
());
CubDeviceReduce
(
iter
,
out
.
Alloc
<
bool
>
(
1
),
numel
,
OrFunctor
(),
false
,
dev_ctx
.
stream
(),
&
buffer
);
bool
flag
;
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS
(
hipMemcpyAsync
(
&
flag
,
out
.
Get
<
bool
>
(),
sizeof
(
flag
),
hipMemcpyDeviceToHost
,
dev_ctx
.
stream
()));
#else
PADDLE_ENFORCE_GPU_SUCCESS
(
cudaMemcpyAsync
(
&
flag
,
out
.
Get
<
bool
>
(),
sizeof
(
flag
),
cudaMemcpyDeviceToHost
,
dev_ctx
.
stream
()));
#endif
dev_ctx
.
Wait
();
return
flag
;
}
static
void
CheckHasNanInfGrad
(
const
float
*
fp32_grad
,
int
fp32_numel
,
const
platform
::
float16
*
fp16_grad
,
...
...
@@ -1830,17 +1862,11 @@ class DistributedFusedLambOpKernel<phi::GPUContext, T>
}
else
{
VLOG
(
1
)
<<
"Grad scale: "
<<
FlattenToString
(
fp16_scale
,
1
,
place
);
}
if
(
nranks
>
1
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
ncclAllReduce
(
fp32_square_grad_norm
,
fp32_square_grad_norm
,
1
,
ncclFloat32
,
ncclSum
,
global_comm
,
stream
));
}
// (3) Do ReduceScatter with scale
VLOG
(
1
)
<<
"FP32 HasNanInf before all reduce: "
<<
HasNanInf
(
dev_ctx
,
fp32_grad
,
fp32_numel
);
VLOG
(
1
)
<<
"FP16 HasNanInf before all reduce: "
<<
HasNanInf
(
dev_ctx
,
fp16_grad
,
fp16_numel
);
if
(
local_shard
)
{
if
(
use_hierarchical_allreduce
)
{
NCCLReduceScatterWithScale
(
...
...
@@ -1916,6 +1942,29 @@ class DistributedFusedLambOpKernel<phi::GPUContext, T>
dev_ctx
,
fp16_scale
);
}
VLOG
(
1
)
<<
"FP32 HasNanInf after all reduce: "
<<
HasNanInf
(
dev_ctx
,
fp32_sum_grad
,
fp32_numel_each_device
);
VLOG
(
1
)
<<
"FP16 HasNanInf after all reduce: "
<<
HasNanInf
(
dev_ctx
,
fp16_sum_grad
,
fp16_numel_each_device
);
CheckHasNanInfGrad
(
fp32_sum_grad
,
fp32_numel_each_device
,
fp16_sum_grad
,
fp16_numel_each_device
,
fp32_square_grad_norm
,
stream
,
&
cub_tmp_buffer
);
if
(
num_devices
>
1
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
ncclAllReduce
(
fp32_square_grad_norm
,
fp32_square_grad_norm
,
1
,
ncclFloat32
,
ncclSum
,
local_comm
,
stream
));
VLOG
(
1
)
<<
"Grad square norm after all reduce: "
<<
FlattenToString
(
fp32_square_grad_norm
,
1
,
place
);
}
// (4) mark max_global_grad_norm as 0, meaning that clip has been
// already performed
max_global_grad_norm
=
0
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录