Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
c945ffa7
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c945ffa7
编写于
1月 14, 2019
作者:
D
dengkaipeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix label_smooth and mixup score
上级
20200e12
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
55 addition
and
60 deletion
+55
-60
paddle/fluid/operators/yolov3_loss_op.h
paddle/fluid/operators/yolov3_loss_op.h
+45
-53
python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py
python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py
+10
-7
未找到文件。
paddle/fluid/operators/yolov3_loss_op.h
浏览文件 @
c945ffa7
...
...
@@ -156,47 +156,29 @@ static void CalcBoxLocationLossGrad(T* input_grad, const T loss, const T* input,
template
<
typename
T
>
static
inline
void
CalcLabelLoss
(
T
*
loss
,
const
T
*
input
,
const
int
index
,
const
int
label
,
const
T
score
,
const
int
class_num
,
const
int
stride
,
const
bool
use_label_smooth
)
{
if
(
use_label_smooth
)
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
]
<
-
0.5
?
input
[
index
+
i
*
stride
]
:
1.0
/
class_num
;
loss
[
0
]
+=
SCE
<
T
>
(
pred
,
(
i
==
label
)
?
score
:
0.0
);
}
}
else
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
];
loss
[
0
]
+=
SCE
<
T
>
(
pred
,
(
i
==
label
)
?
score
:
0.0
);
}
const
int
label
,
const
int
class_num
,
const
int
stride
,
const
T
pos
,
const
T
neg
)
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
];
loss
[
0
]
+=
SCE
<
T
>
(
pred
,
(
i
==
label
)
?
pos
:
neg
);
}
}
template
<
typename
T
>
static
inline
void
CalcLabelLossGrad
(
T
*
input_grad
,
const
T
loss
,
const
T
*
input
,
const
int
index
,
const
int
label
,
const
T
score
,
const
int
class_num
,
const
int
stride
,
const
bool
use_label_smooth
)
{
if
(
use_label_smooth
)
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
]
<
-
0.5
?
input
[
index
+
i
*
stride
]
:
1.0
/
class_num
;
input_grad
[
index
+
i
*
stride
]
=
SCEGrad
<
T
>
(
pred
,
(
i
==
label
)
?
score
:
0.0
)
*
loss
;
}
}
else
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
];
input_grad
[
index
+
i
*
stride
]
=
SCEGrad
<
T
>
(
pred
,
(
i
==
label
)
?
score
:
0.0
)
*
loss
;
}
const
int
label
,
const
int
class_num
,
const
int
stride
,
const
T
pos
,
const
T
neg
)
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
];
input_grad
[
index
+
i
*
stride
]
=
SCEGrad
<
T
>
(
pred
,
(
i
==
label
)
?
pos
:
neg
)
*
loss
;
}
}
template
<
typename
T
>
static
inline
void
CalcObjnessLoss
(
T
*
loss
,
const
T
*
input
,
const
int
*
objness
,
static
inline
void
CalcObjnessLoss
(
T
*
loss
,
const
T
*
input
,
const
T
*
objness
,
const
int
n
,
const
int
an_num
,
const
int
h
,
const
int
w
,
const
int
stride
,
const
int
an_stride
)
{
...
...
@@ -204,9 +186,9 @@ static inline void CalcObjnessLoss(T* loss, const T* input, const int* objness,
for
(
int
j
=
0
;
j
<
an_num
;
j
++
)
{
for
(
int
k
=
0
;
k
<
h
;
k
++
)
{
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
int
obj
=
objness
[
k
*
w
+
l
];
if
(
obj
>
=
0
)
{
loss
[
i
]
+=
SCE
<
T
>
(
input
[
k
*
w
+
l
],
static_cast
<
T
>
(
obj
)
);
T
obj
=
objness
[
k
*
w
+
l
];
if
(
obj
>
-
0.5
)
{
loss
[
i
]
+=
SCE
<
T
>
(
input
[
k
*
w
+
l
],
obj
);
}
}
}
...
...
@@ -218,7 +200,7 @@ static inline void CalcObjnessLoss(T* loss, const T* input, const int* objness,
template
<
typename
T
>
static
inline
void
CalcObjnessLossGrad
(
T
*
input_grad
,
const
T
*
loss
,
const
T
*
input
,
const
int
*
objness
,
const
T
*
input
,
const
T
*
objness
,
const
int
n
,
const
int
an_num
,
const
int
h
,
const
int
w
,
const
int
stride
,
const
int
an_stride
)
{
...
...
@@ -226,10 +208,9 @@ static inline void CalcObjnessLossGrad(T* input_grad, const T* loss,
for
(
int
j
=
0
;
j
<
an_num
;
j
++
)
{
for
(
int
k
=
0
;
k
<
h
;
k
++
)
{
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
int
obj
=
objness
[
k
*
w
+
l
];
if
(
obj
>=
0
)
{
input_grad
[
k
*
w
+
l
]
=
SCEGrad
<
T
>
(
input
[
k
*
w
+
l
],
static_cast
<
T
>
(
obj
))
*
loss
[
i
];
T
obj
=
objness
[
k
*
w
+
l
];
if
(
obj
>
-
0.5
)
{
input_grad
[
k
*
w
+
l
]
=
SCEGrad
<
T
>
(
input
[
k
*
w
+
l
],
obj
)
*
loss
[
i
];
}
}
}
...
...
@@ -285,15 +266,22 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
const
int
stride
=
h
*
w
;
const
int
an_stride
=
(
class_num
+
5
)
*
stride
;
T
label_pos
=
1.0
;
T
label_neg
=
0.0
;
if
(
use_label_smooth
)
{
label_pos
=
1.0
-
1.0
/
static_cast
<
T
>
(
class_num
);
label_neg
=
1.0
/
static_cast
<
T
>
(
class_num
);
}
const
T
*
input_data
=
input
->
data
<
T
>
();
const
T
*
gt_box_data
=
gt_box
->
data
<
T
>
();
const
int
*
gt_label_data
=
gt_label
->
data
<
int
>
();
const
T
*
gt_score_data
=
gt_score
->
data
<
T
>
();
T
*
loss_data
=
loss
->
mutable_data
<
T
>
({
n
},
ctx
.
GetPlace
());
memset
(
loss_data
,
0
,
loss
->
numel
()
*
sizeof
(
T
));
int
*
obj_mask_data
=
objness_mask
->
mutable_data
<
int
>
({
n
,
mask_num
,
h
,
w
},
ctx
.
GetPlace
());
memset
(
obj_mask_data
,
0
,
objness_mask
->
numel
()
*
sizeof
(
int
));
T
*
obj_mask_data
=
objness_mask
->
mutable_data
<
T
>
({
n
,
mask_num
,
h
,
w
},
ctx
.
GetPlace
());
memset
(
obj_mask_data
,
0
,
objness_mask
->
numel
()
*
sizeof
(
T
));
int
*
gt_match_mask_data
=
gt_match_mask
->
mutable_data
<
int
>
({
n
,
b
},
ctx
.
GetPlace
());
...
...
@@ -327,7 +315,7 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
if
(
best_iou
>
ignore_thresh
)
{
int
obj_idx
=
(
i
*
mask_num
+
j
)
*
stride
+
k
*
w
+
l
;
obj_mask_data
[
obj_idx
]
=
-
1
;
obj_mask_data
[
obj_idx
]
=
static_cast
<
T
>
(
-
1.0
)
;
}
// TODO(dengkaipeng): all losses should be calculated if best IoU
// is bigger then truth thresh should be calculated here, but
...
...
@@ -374,15 +362,15 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
CalcBoxLocationLoss
<
T
>
(
loss_data
+
i
,
input_data
,
gt
,
anchors
,
best_n
,
box_idx
,
gi
,
gj
,
h
,
input_size
,
stride
);
T
score
=
gt_score_data
[
i
*
b
+
t
];
int
obj_idx
=
(
i
*
mask_num
+
mask_idx
)
*
stride
+
gj
*
w
+
gi
;
obj_mask_data
[
obj_idx
]
=
1
;
obj_mask_data
[
obj_idx
]
=
score
;
int
label
=
gt_label_data
[
i
*
b
+
t
];
T
score
=
gt_score_data
[
i
*
b
+
t
];
int
label_idx
=
GetEntryIndex
(
i
,
mask_idx
,
gj
*
w
+
gi
,
mask_num
,
an_stride
,
stride
,
5
);
CalcLabelLoss
<
T
>
(
loss_data
+
i
,
input_data
,
label_idx
,
label
,
score
,
class_num
,
stride
,
use_label_smooth
);
CalcLabelLoss
<
T
>
(
loss_data
+
i
,
input_data
,
label_idx
,
label
,
class_num
,
stride
,
label_pos
,
label_neg
);
}
}
}
...
...
@@ -399,7 +387,6 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
gt_box
=
ctx
.
Input
<
Tensor
>
(
"GTBox"
);
auto
*
gt_label
=
ctx
.
Input
<
Tensor
>
(
"GTLabel"
);
auto
*
gt_score
=
ctx
.
Input
<
Tensor
>
(
"GTScore"
);
auto
*
input_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
loss_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Loss"
));
auto
*
objness_mask
=
ctx
.
Input
<
Tensor
>
(
"ObjectnessMask"
);
...
...
@@ -421,12 +408,18 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
const
int
stride
=
h
*
w
;
const
int
an_stride
=
(
class_num
+
5
)
*
stride
;
T
label_pos
=
1.0
;
T
label_neg
=
0.0
;
if
(
use_label_smooth
)
{
label_pos
=
1.0
-
1.0
/
static_cast
<
T
>
(
class_num
);
label_neg
=
1.0
/
static_cast
<
T
>
(
class_num
);
}
const
T
*
input_data
=
input
->
data
<
T
>
();
const
T
*
gt_box_data
=
gt_box
->
data
<
T
>
();
const
int
*
gt_label_data
=
gt_label
->
data
<
int
>
();
const
T
*
gt_score_data
=
gt_score
->
data
<
T
>
();
const
T
*
loss_grad_data
=
loss_grad
->
data
<
T
>
();
const
int
*
obj_mask_data
=
objness_mask
->
data
<
int
>
();
const
T
*
obj_mask_data
=
objness_mask
->
data
<
T
>
();
const
int
*
gt_match_mask_data
=
gt_match_mask
->
data
<
int
>
();
T
*
input_grad_data
=
input_grad
->
mutable_data
<
T
>
({
n
,
c
,
h
,
w
},
ctx
.
GetPlace
());
...
...
@@ -447,12 +440,11 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
anchor_mask
[
mask_idx
],
box_idx
,
gi
,
gj
,
h
,
input_size
,
stride
);
int
label
=
gt_label_data
[
i
*
b
+
t
];
T
score
=
gt_score_data
[
i
*
b
+
t
];
int
label_idx
=
GetEntryIndex
(
i
,
mask_idx
,
gj
*
w
+
gi
,
mask_num
,
an_stride
,
stride
,
5
);
CalcLabelLossGrad
<
T
>
(
input_grad_data
,
loss_grad_data
[
i
],
input_data
,
label_idx
,
label
,
score
,
class_num
,
stride
,
use_label_smooth
);
label_idx
,
label
,
class_num
,
stride
,
label_pos
,
label_neg
);
}
}
}
...
...
python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py
浏览文件 @
c945ffa7
...
...
@@ -81,6 +81,9 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs):
x
=
x
.
reshape
((
n
,
mask_num
,
5
+
class_num
,
h
,
w
)).
transpose
((
0
,
1
,
3
,
4
,
2
))
loss
=
np
.
zeros
((
n
)).
astype
(
'float32'
)
label_pos
=
1.0
-
1.0
/
class_num
if
use_label_smooth
else
1.0
label_neg
=
1.0
/
class_num
if
use_label_smooth
else
0.0
pred_box
=
x
[:,
:,
:,
:,
:
4
].
copy
()
grid_x
=
np
.
tile
(
np
.
arange
(
w
).
reshape
((
1
,
w
)),
(
h
,
1
))
grid_y
=
np
.
tile
(
np
.
arange
(
h
).
reshape
((
h
,
1
)),
(
1
,
w
))
...
...
@@ -103,7 +106,7 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs):
pred_box
=
pred_box
.
reshape
((
n
,
-
1
,
4
))
pred_obj
=
x
[:,
:,
:,
:,
4
].
reshape
((
n
,
-
1
))
objness
=
np
.
zeros
(
pred_box
.
shape
[:
2
])
objness
=
np
.
zeros
(
pred_box
.
shape
[:
2
])
.
astype
(
'float32'
)
ious
=
batch_xywh_box_iou
(
pred_box
,
gtbox
)
ious_max
=
np
.
max
(
ious
,
axis
=-
1
)
objness
=
np
.
where
(
ious_max
>
ignore_thresh
,
-
np
.
ones_like
(
objness
),
...
...
@@ -145,17 +148,17 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs):
loss
[
i
]
+=
l1loss
(
x
[
i
,
an_idx
,
gj
,
gi
,
2
],
tw
)
*
scale
loss
[
i
]
+=
l1loss
(
x
[
i
,
an_idx
,
gj
,
gi
,
3
],
th
)
*
scale
objness
[
i
,
an_idx
*
h
*
w
+
gj
*
w
+
gi
]
=
1
objness
[
i
,
an_idx
*
h
*
w
+
gj
*
w
+
gi
]
=
gtscore
[
i
,
j
]
for
label_idx
in
range
(
class_num
):
loss
[
i
]
+=
sce
(
x
[
i
,
an_idx
,
gj
,
gi
,
5
+
label_idx
],
i
nt
(
label_idx
==
gtlabel
[
i
,
j
])
*
gtscore
[
i
,
j
]
)
loss
[
i
]
+=
sce
(
x
[
i
,
an_idx
,
gj
,
gi
,
5
+
label_idx
],
label_pos
i
f
label_idx
==
gtlabel
[
i
,
j
]
else
label_neg
)
for
j
in
range
(
mask_num
*
h
*
w
):
if
objness
[
i
,
j
]
>=
0
:
loss
[
i
]
+=
sce
(
pred_obj
[
i
,
j
],
objness
[
i
,
j
])
return
(
loss
,
objness
.
reshape
((
n
,
mask_num
,
h
,
w
)).
astype
(
'
in
t32'
),
\
return
(
loss
,
objness
.
reshape
((
n
,
mask_num
,
h
,
w
)).
astype
(
'
floa
t32'
),
\
gt_matches
.
astype
(
'int32'
))
...
...
@@ -220,9 +223,9 @@ class TestYolov3LossOp(OpTest):
self
.
use_label_smooth
=
True
class
TestYolov3LossWithLabelSmooth
(
TestYolov3LossOp
):
class
TestYolov3LossWith
out
LabelSmooth
(
TestYolov3LossOp
):
def
set_label_smooth
(
self
):
self
.
use_label_smooth
=
Tru
e
self
.
use_label_smooth
=
Fals
e
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录