Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
628ff34b
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
628ff34b
编写于
9月 26, 2021
作者:
W
whs
提交者:
GitHub
9月 26, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix FPE of label smooth op (#35861)
上级
7ff226f0
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
23 addition
and
20 deletion
+23
-20
paddle/fluid/operators/label_smooth_op.h
paddle/fluid/operators/label_smooth_op.h
+23
-20
未找到文件。
paddle/fluid/operators/label_smooth_op.h
浏览文件 @
628ff34b
...
...
@@ -29,20 +29,21 @@ class LabelSmoothKernel : public framework::OpKernel<T> {
auto
*
dist_t
=
ctx
.
Input
<
framework
::
Tensor
>
(
"PriorDist"
);
auto
label_dim
=
in_t
->
dims
()[
in_t
->
dims
().
size
()
-
1
];
out_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
epsilon
=
ctx
.
Attr
<
float
>
(
"epsilon"
);
auto
out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
out_t
);
auto
in
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
in_t
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
if
(
dist_t
)
{
auto
dist
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dist_t
);
out
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
in
+
static_cast
<
T
>
(
epsilon
)
*
dist
.
broadcast
(
Eigen
::
DSizes
<
int
,
1
>
(
in_t
->
numel
()
/
label_dim
));
}
else
{
out
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
in
+
static_cast
<
T
>
(
epsilon
/
label_dim
);
if
(
label_dim
!=
0
)
{
auto
epsilon
=
ctx
.
Attr
<
float
>
(
"epsilon"
);
auto
out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
out_t
);
auto
in
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
in_t
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
if
(
dist_t
)
{
auto
dist
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dist_t
);
out
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
in
+
static_cast
<
T
>
(
epsilon
)
*
dist
.
broadcast
(
Eigen
::
DSizes
<
int
,
1
>
(
in_t
->
numel
()
/
label_dim
));
}
else
{
out
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
in
+
static_cast
<
T
>
(
epsilon
/
label_dim
);
}
}
}
};
...
...
@@ -54,13 +55,15 @@ class LabelSmoothGradKernel : public framework::OpKernel<T> {
auto
*
d_out_t
=
ctx
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_in_t
=
ctx
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"X"
));
d_in_t
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
d_out_dim
=
d_out_t
->
dims
()[
d_out_t
->
dims
().
size
()
-
1
];
if
(
d_out_dim
!=
0
)
{
auto
d_out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
d_out_t
);
auto
d_in
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
d_in_t
);
auto
d_out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
d_out_t
);
auto
d_in
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
d_in_t
);
auto
epsilon
=
ctx
.
Attr
<
float
>
(
"epsilon"
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
d_in
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
d_out
;
auto
epsilon
=
ctx
.
Attr
<
float
>
(
"epsilon"
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
d_in
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
d_out
;
}
}
};
}
// namespace operators
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录