Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
733bb82e
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
733bb82e
编写于
1月 29, 2019
作者:
D
dengkaipeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
downsample -> downsample_ratio. test=develop
上级
ae0b0d5f
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
29 addition
and
26 deletion
+29
-26
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-1
paddle/fluid/operators/yolov3_loss_op.cc
paddle/fluid/operators/yolov3_loss_op.cc
+1
-1
paddle/fluid/operators/yolov3_loss_op.h
paddle/fluid/operators/yolov3_loss_op.h
+22
-19
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+5
-5
未找到文件。
paddle/fluid/API.spec
浏览文件 @
733bb82e
...
@@ -324,7 +324,7 @@ paddle.fluid.layers.generate_mask_labels ArgSpec(args=['im_info', 'gt_classes',
...
@@ -324,7 +324,7 @@ paddle.fluid.layers.generate_mask_labels ArgSpec(args=['im_info', 'gt_classes',
paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None))
paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None))
paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample
_ratio
', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None))
paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None))
paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None))
paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None))
paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1))
paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1))
...
...
paddle/fluid/operators/yolov3_loss_op.cc
浏览文件 @
733bb82e
...
@@ -135,7 +135,7 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -135,7 +135,7 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
"The mask index of anchors used in "
"The mask index of anchors used in "
"current YOLOv3 loss calculation."
)
"current YOLOv3 loss calculation."
)
.
SetDefault
(
std
::
vector
<
int
>
{});
.
SetDefault
(
std
::
vector
<
int
>
{});
AddAttr
<
int
>
(
"downsample"
,
AddAttr
<
int
>
(
"downsample
_ratio
"
,
"The downsample ratio from network input to YOLOv3 loss "
"The downsample ratio from network input to YOLOv3 loss "
"input, so 32, 16, 8 should be set for the first, second, "
"input, so 32, 16, 8 should be set for the first, second, "
"and thrid YOLOv3 loss operators."
)
"and thrid YOLOv3 loss operators."
)
...
...
paddle/fluid/operators/yolov3_loss_op.h
浏览文件 @
733bb82e
...
@@ -32,7 +32,7 @@ static inline bool LessEqualZero(T x) {
...
@@ -32,7 +32,7 @@ static inline bool LessEqualZero(T x) {
}
}
template
<
typename
T
>
template
<
typename
T
>
static
T
S
CE
(
T
x
,
T
label
)
{
static
T
S
igmoidCrossEntropy
(
T
x
,
T
label
)
{
return
(
x
>
0
?
x
:
0.0
)
-
x
*
label
+
std
::
log
(
1.0
+
std
::
exp
(
-
std
::
abs
(
x
)));
return
(
x
>
0
?
x
:
0.0
)
-
x
*
label
+
std
::
log
(
1.0
+
std
::
exp
(
-
std
::
abs
(
x
)));
}
}
...
@@ -42,7 +42,7 @@ static T L2Loss(T x, T y) {
...
@@ -42,7 +42,7 @@ static T L2Loss(T x, T y) {
}
}
template
<
typename
T
>
template
<
typename
T
>
static
T
S
CE
Grad
(
T
x
,
T
label
)
{
static
T
S
igmoidCrossEntropy
Grad
(
T
x
,
T
label
)
{
return
1.0
/
(
1.0
+
std
::
exp
(
-
x
))
-
label
;
return
1.0
/
(
1.0
+
std
::
exp
(
-
x
))
-
label
;
}
}
...
@@ -62,7 +62,7 @@ static int GetMaskIndex(std::vector<int> mask, int val) {
...
@@ -62,7 +62,7 @@ static int GetMaskIndex(std::vector<int> mask, int val) {
template
<
typename
T
>
template
<
typename
T
>
struct
Box
{
struct
Box
{
float
x
,
y
,
w
,
h
;
T
x
,
y
,
w
,
h
;
};
};
template
<
typename
T
>
template
<
typename
T
>
...
@@ -128,8 +128,8 @@ static void CalcBoxLocationLoss(T* loss, const T* input, Box<T> gt,
...
@@ -128,8 +128,8 @@ static void CalcBoxLocationLoss(T* loss, const T* input, Box<T> gt,
T
th
=
std
::
log
(
gt
.
h
*
input_size
/
anchors
[
2
*
an_idx
+
1
]);
T
th
=
std
::
log
(
gt
.
h
*
input_size
/
anchors
[
2
*
an_idx
+
1
]);
T
scale
=
(
2.0
-
gt
.
w
*
gt
.
h
);
T
scale
=
(
2.0
-
gt
.
w
*
gt
.
h
);
loss
[
0
]
+=
S
CE
<
T
>
(
input
[
box_idx
],
tx
)
*
scale
;
loss
[
0
]
+=
S
igmoidCrossEntropy
<
T
>
(
input
[
box_idx
],
tx
)
*
scale
;
loss
[
0
]
+=
S
CE
<
T
>
(
input
[
box_idx
+
stride
],
ty
)
*
scale
;
loss
[
0
]
+=
S
igmoidCrossEntropy
<
T
>
(
input
[
box_idx
+
stride
],
ty
)
*
scale
;
loss
[
0
]
+=
L2Loss
<
T
>
(
input
[
box_idx
+
2
*
stride
],
tw
)
*
scale
;
loss
[
0
]
+=
L2Loss
<
T
>
(
input
[
box_idx
+
2
*
stride
],
tw
)
*
scale
;
loss
[
0
]
+=
L2Loss
<
T
>
(
input
[
box_idx
+
3
*
stride
],
th
)
*
scale
;
loss
[
0
]
+=
L2Loss
<
T
>
(
input
[
box_idx
+
3
*
stride
],
th
)
*
scale
;
}
}
...
@@ -145,9 +145,10 @@ static void CalcBoxLocationLossGrad(T* input_grad, const T loss, const T* input,
...
@@ -145,9 +145,10 @@ static void CalcBoxLocationLossGrad(T* input_grad, const T loss, const T* input,
T
th
=
std
::
log
(
gt
.
h
*
input_size
/
anchors
[
2
*
an_idx
+
1
]);
T
th
=
std
::
log
(
gt
.
h
*
input_size
/
anchors
[
2
*
an_idx
+
1
]);
T
scale
=
(
2.0
-
gt
.
w
*
gt
.
h
);
T
scale
=
(
2.0
-
gt
.
w
*
gt
.
h
);
input_grad
[
box_idx
]
=
SCEGrad
<
T
>
(
input
[
box_idx
],
tx
)
*
scale
*
loss
;
input_grad
[
box_idx
]
=
SigmoidCrossEntropyGrad
<
T
>
(
input
[
box_idx
],
tx
)
*
scale
*
loss
;
input_grad
[
box_idx
+
stride
]
=
input_grad
[
box_idx
+
stride
]
=
S
CE
Grad
<
T
>
(
input
[
box_idx
+
stride
],
ty
)
*
scale
*
loss
;
S
igmoidCrossEntropy
Grad
<
T
>
(
input
[
box_idx
+
stride
],
ty
)
*
scale
*
loss
;
input_grad
[
box_idx
+
2
*
stride
]
=
input_grad
[
box_idx
+
2
*
stride
]
=
L2LossGrad
<
T
>
(
input
[
box_idx
+
2
*
stride
],
tw
)
*
scale
*
loss
;
L2LossGrad
<
T
>
(
input
[
box_idx
+
2
*
stride
],
tw
)
*
scale
*
loss
;
input_grad
[
box_idx
+
3
*
stride
]
=
input_grad
[
box_idx
+
3
*
stride
]
=
...
@@ -160,7 +161,7 @@ static inline void CalcLabelLoss(T* loss, const T* input, const int index,
...
@@ -160,7 +161,7 @@ static inline void CalcLabelLoss(T* loss, const T* input, const int index,
const
int
stride
)
{
const
int
stride
)
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
];
T
pred
=
input
[
index
+
i
*
stride
];
loss
[
0
]
+=
S
CE
<
T
>
(
pred
,
(
i
==
label
)
?
1.0
:
0.0
);
loss
[
0
]
+=
S
igmoidCrossEntropy
<
T
>
(
pred
,
(
i
==
label
)
?
1.0
:
0.0
);
}
}
}
}
...
@@ -172,7 +173,7 @@ static inline void CalcLabelLossGrad(T* input_grad, const T loss,
...
@@ -172,7 +173,7 @@ static inline void CalcLabelLossGrad(T* input_grad, const T loss,
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
for
(
int
i
=
0
;
i
<
class_num
;
i
++
)
{
T
pred
=
input
[
index
+
i
*
stride
];
T
pred
=
input
[
index
+
i
*
stride
];
input_grad
[
index
+
i
*
stride
]
=
input_grad
[
index
+
i
*
stride
]
=
S
CE
Grad
<
T
>
(
pred
,
(
i
==
label
)
?
1.0
:
0.0
)
*
loss
;
S
igmoidCrossEntropy
Grad
<
T
>
(
pred
,
(
i
==
label
)
?
1.0
:
0.0
)
*
loss
;
}
}
}
}
...
@@ -187,11 +188,11 @@ static inline void CalcObjnessLoss(T* loss, const T* input, const T* objness,
...
@@ -187,11 +188,11 @@ static inline void CalcObjnessLoss(T* loss, const T* input, const T* objness,
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
T
obj
=
objness
[
k
*
w
+
l
];
T
obj
=
objness
[
k
*
w
+
l
];
if
(
obj
>
1e-5
)
{
if
(
obj
>
1e-5
)
{
// positive sample: obj =
mixup score
// positive sample: obj =
1
loss
[
i
]
+=
S
CE
<
T
>
(
input
[
k
*
w
+
l
],
1.0
);
loss
[
i
]
+=
S
igmoidCrossEntropy
<
T
>
(
input
[
k
*
w
+
l
],
1.0
);
}
else
if
(
obj
>
-
0.5
)
{
}
else
if
(
obj
>
-
0.5
)
{
// negetive sample: obj = 0
// negetive sample: obj = 0
loss
[
i
]
+=
S
CE
<
T
>
(
input
[
k
*
w
+
l
],
0.0
);
loss
[
i
]
+=
S
igmoidCrossEntropy
<
T
>
(
input
[
k
*
w
+
l
],
0.0
);
}
}
}
}
}
}
...
@@ -213,9 +214,11 @@ static inline void CalcObjnessLossGrad(T* input_grad, const T* loss,
...
@@ -213,9 +214,11 @@ static inline void CalcObjnessLossGrad(T* input_grad, const T* loss,
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
T
obj
=
objness
[
k
*
w
+
l
];
T
obj
=
objness
[
k
*
w
+
l
];
if
(
obj
>
1e-5
)
{
if
(
obj
>
1e-5
)
{
input_grad
[
k
*
w
+
l
]
=
SCEGrad
<
T
>
(
input
[
k
*
w
+
l
],
1.0
)
*
loss
[
i
];
input_grad
[
k
*
w
+
l
]
=
SigmoidCrossEntropyGrad
<
T
>
(
input
[
k
*
w
+
l
],
1.0
)
*
loss
[
i
];
}
else
if
(
obj
>
-
0.5
)
{
}
else
if
(
obj
>
-
0.5
)
{
input_grad
[
k
*
w
+
l
]
=
SCEGrad
<
T
>
(
input
[
k
*
w
+
l
],
0.0
)
*
loss
[
i
];
input_grad
[
k
*
w
+
l
]
=
SigmoidCrossEntropyGrad
<
T
>
(
input
[
k
*
w
+
l
],
0.0
)
*
loss
[
i
];
}
}
}
}
}
}
...
@@ -256,7 +259,7 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
...
@@ -256,7 +259,7 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
auto
anchor_mask
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchor_mask"
);
auto
anchor_mask
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchor_mask"
);
int
class_num
=
ctx
.
Attr
<
int
>
(
"class_num"
);
int
class_num
=
ctx
.
Attr
<
int
>
(
"class_num"
);
float
ignore_thresh
=
ctx
.
Attr
<
float
>
(
"ignore_thresh"
);
float
ignore_thresh
=
ctx
.
Attr
<
float
>
(
"ignore_thresh"
);
int
downsample
=
ctx
.
Attr
<
int
>
(
"downsample
"
);
int
downsample
_ratio
=
ctx
.
Attr
<
int
>
(
"downsample_ratio
"
);
const
int
n
=
input
->
dims
()[
0
];
const
int
n
=
input
->
dims
()[
0
];
const
int
h
=
input
->
dims
()[
2
];
const
int
h
=
input
->
dims
()[
2
];
...
@@ -264,7 +267,7 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
...
@@ -264,7 +267,7 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
const
int
an_num
=
anchors
.
size
()
/
2
;
const
int
an_num
=
anchors
.
size
()
/
2
;
const
int
mask_num
=
anchor_mask
.
size
();
const
int
mask_num
=
anchor_mask
.
size
();
const
int
b
=
gt_box
->
dims
()[
1
];
const
int
b
=
gt_box
->
dims
()[
1
];
int
input_size
=
downsample
*
h
;
int
input_size
=
downsample
_ratio
*
h
;
const
int
stride
=
h
*
w
;
const
int
stride
=
h
*
w
;
const
int
an_stride
=
(
class_num
+
5
)
*
stride
;
const
int
an_stride
=
(
class_num
+
5
)
*
stride
;
...
@@ -308,7 +311,7 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
...
@@ -308,7 +311,7 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
}
}
}
}
// If best IoU is
great
er then ignore_thresh,
// If best IoU is
bigg
er then ignore_thresh,
// ignore the objectness loss.
// ignore the objectness loss.
if
(
best_iou
>
ignore_thresh
)
{
if
(
best_iou
>
ignore_thresh
)
{
int
obj_idx
=
(
i
*
mask_num
+
j
)
*
stride
+
k
*
w
+
l
;
int
obj_idx
=
(
i
*
mask_num
+
j
)
*
stride
+
k
*
w
+
l
;
...
@@ -388,7 +391,7 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
...
@@ -388,7 +391,7 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
auto
anchors
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchors"
);
auto
anchors
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchors"
);
auto
anchor_mask
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchor_mask"
);
auto
anchor_mask
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchor_mask"
);
int
class_num
=
ctx
.
Attr
<
int
>
(
"class_num"
);
int
class_num
=
ctx
.
Attr
<
int
>
(
"class_num"
);
int
downsample
=
ctx
.
Attr
<
int
>
(
"downsample
"
);
int
downsample
_ratio
=
ctx
.
Attr
<
int
>
(
"downsample_ratio
"
);
const
int
n
=
input_grad
->
dims
()[
0
];
const
int
n
=
input_grad
->
dims
()[
0
];
const
int
c
=
input_grad
->
dims
()[
1
];
const
int
c
=
input_grad
->
dims
()[
1
];
...
@@ -396,7 +399,7 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
...
@@ -396,7 +399,7 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
const
int
w
=
input_grad
->
dims
()[
3
];
const
int
w
=
input_grad
->
dims
()[
3
];
const
int
mask_num
=
anchor_mask
.
size
();
const
int
mask_num
=
anchor_mask
.
size
();
const
int
b
=
gt_match_mask
->
dims
()[
1
];
const
int
b
=
gt_match_mask
->
dims
()[
1
];
int
input_size
=
downsample
*
h
;
int
input_size
=
downsample
_ratio
*
h
;
const
int
stride
=
h
*
w
;
const
int
stride
=
h
*
w
;
const
int
an_stride
=
(
class_num
+
5
)
*
stride
;
const
int
an_stride
=
(
class_num
+
5
)
*
stride
;
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
733bb82e
...
@@ -416,7 +416,7 @@ def yolov3_loss(x,
...
@@ -416,7 +416,7 @@ def yolov3_loss(x,
anchor_mask
,
anchor_mask
,
class_num
,
class_num
,
ignore_thresh
,
ignore_thresh
,
downsample
,
downsample
_ratio
,
name
=
None
):
name
=
None
):
"""
"""
${comment}
${comment}
...
@@ -434,7 +434,7 @@ def yolov3_loss(x,
...
@@ -434,7 +434,7 @@ def yolov3_loss(x,
anchor_mask (list|tuple): ${anchor_mask_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample
(int): ${downsample
_comment}
downsample
_ratio (int): ${downsample_ratio
_comment}
name (string): the name of yolov3 loss
name (string): the name of yolov3 loss
Returns:
Returns:
...
@@ -456,8 +456,8 @@ def yolov3_loss(x,
...
@@ -456,8 +456,8 @@ def yolov3_loss(x,
gtlabel = fluid.layers.data(name='gtlabel', shape=[6, 1], dtype='int32')
gtlabel = fluid.layers.data(name='gtlabel', shape=[6, 1], dtype='int32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchors = [0, 1, 2]
anchors = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gtbox=gtbox, class_num=80
loss = fluid.layers.yolov3_loss(x=x, gtbox=gtbox, class_num=80
, anchors=anchors,
anchors=anchors, ignore_thresh=0.5
)
ignore_thresh=0.5, downsample_ratio=32
)
"""
"""
helper
=
LayerHelper
(
'yolov3_loss'
,
**
locals
())
helper
=
LayerHelper
(
'yolov3_loss'
,
**
locals
())
...
@@ -491,7 +491,7 @@ def yolov3_loss(x,
...
@@ -491,7 +491,7 @@ def yolov3_loss(x,
"anchor_mask"
:
anchor_mask
,
"anchor_mask"
:
anchor_mask
,
"class_num"
:
class_num
,
"class_num"
:
class_num
,
"ignore_thresh"
:
ignore_thresh
,
"ignore_thresh"
:
ignore_thresh
,
"downsample
"
:
downsample
,
"downsample
_ratio"
:
downsample_ratio
,
}
}
helper
.
append_op
(
helper
.
append_op
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录