Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
2faa2b40
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2faa2b40
编写于
11月 13, 2018
作者:
D
dengkaipeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove cu file. test=develop
上级
a0284f6f
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
182 addition
and
112 deletion
+182
-112
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-1
paddle/fluid/operators/yolov3_loss_op.cc
paddle/fluid/operators/yolov3_loss_op.cc
+32
-4
paddle/fluid/operators/yolov3_loss_op.cu
paddle/fluid/operators/yolov3_loss_op.cu
+0
-23
paddle/fluid/operators/yolov3_loss_op.h
paddle/fluid/operators/yolov3_loss_op.h
+30
-13
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+98
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+0
-69
python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py
python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py
+21
-2
未找到文件。
paddle/fluid/API.spec
浏览文件 @
2faa2b40
...
@@ -183,7 +183,6 @@ paddle.fluid.layers.similarity_focus ArgSpec(args=['input', 'axis', 'indexes', '
...
@@ -183,7 +183,6 @@ paddle.fluid.layers.similarity_focus ArgSpec(args=['input', 'axis', 'indexes', '
paddle.fluid.layers.hash ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None))
paddle.fluid.layers.hash ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None))
paddle.fluid.layers.grid_sampler ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.grid_sampler ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.log_loss ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None))
paddle.fluid.layers.log_loss ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None))
paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'anchors', 'class_num', 'ignore_thresh', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.add_position_encoding ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.add_position_encoding ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.bilinear_tensor_product ArgSpec(args=['x', 'y', 'size', 'act', 'name', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.layers.bilinear_tensor_product ArgSpec(args=['x', 'y', 'size', 'act', 'name', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
...
@@ -289,6 +288,7 @@ paddle.fluid.layers.generate_proposals ArgSpec(args=['scores', 'bbox_deltas', 'i
...
@@ -289,6 +288,7 @@ paddle.fluid.layers.generate_proposals ArgSpec(args=['scores', 'bbox_deltas', 'i
paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None))
paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None))
paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'anchors', 'class_num', 'ignore_thresh', 'lambda_xy', 'lambda_wh', 'lambda_conf_obj', 'lambda_conf_noobj', 'lambda_class', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None))
paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None))
paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None))
paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1))
paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1))
paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,))
...
...
paddle/fluid/operators/yolov3_loss_op.cc
浏览文件 @
2faa2b40
...
@@ -55,7 +55,8 @@ class Yolov3LossOp : public framework::OperatorWithKernel {
...
@@ -55,7 +55,8 @@ class Yolov3LossOp : public framework::OperatorWithKernel {
framework
::
OpKernelType
GetExpectedKernelType
(
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
return
framework
::
OpKernelType
(
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
type
()),
ctx
.
GetPlace
());
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
type
()),
platform
::
CPUPlace
());
}
}
};
};
...
@@ -63,8 +64,11 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -63,8 +64,11 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
public:
public:
void
Make
()
override
{
void
Make
()
override
{
AddInput
(
"X"
,
AddInput
(
"X"
,
"The input tensor of bilinear interpolation, "
"The input tensor of YOLO v3 loss operator, "
"This is a 4-D tensor with shape of [N, C, H, W]"
);
"This is a 4-D tensor with shape of [N, C, H, W]."
"H and W should be same, and the second dimention(C) stores"
"box locations, confidence score and classification one-hot"
"key of each anchor box"
);
AddInput
(
"GTBox"
,
AddInput
(
"GTBox"
,
"The input tensor of ground truth boxes, "
"The input tensor of ground truth boxes, "
"This is a 3-D tensor with shape of [N, max_box_num, 5], "
"This is a 3-D tensor with shape of [N, max_box_num, 5], "
...
@@ -84,6 +88,20 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -84,6 +88,20 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
"it will be parsed pair by pair."
);
"it will be parsed pair by pair."
);
AddAttr
<
float
>
(
"ignore_thresh"
,
AddAttr
<
float
>
(
"ignore_thresh"
,
"The ignore threshold to ignore confidence loss."
);
"The ignore threshold to ignore confidence loss."
);
AddAttr
<
float
>
(
"lambda_xy"
,
"The weight of x, y location loss."
)
.
SetDefault
(
1.0
);
AddAttr
<
float
>
(
"lambda_wh"
,
"The weight of w, h location loss."
)
.
SetDefault
(
1.0
);
AddAttr
<
float
>
(
"lambda_conf_obj"
,
"The weight of confidence score loss in locations with target object."
)
.
SetDefault
(
1.0
);
AddAttr
<
float
>
(
"lambda_conf_noobj"
,
"The weight of confidence score loss in locations without "
"target object."
)
.
SetDefault
(
1.0
);
AddAttr
<
float
>
(
"lambda_class"
,
"The weight of classification loss."
)
.
SetDefault
(
1.0
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
This operator generate yolov3 loss by given predict result and ground
This operator generate yolov3 loss by given predict result and ground
truth boxes.
truth boxes.
...
@@ -119,6 +137,15 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -119,6 +137,15 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
confidence score loss, and classification loss. The MSE loss is used for
confidence score loss, and classification loss. The MSE loss is used for
box location, and binary cross entropy loss is used for confidence score
box location, and binary cross entropy loss is used for confidence score
loss and classification loss.
loss and classification loss.
Final loss will be represented as follow.
$$
loss = \lambda_{xy} * loss_{xy} + \lambda_{wh} * loss_{wh}
+ \lambda_{conf_obj} * loss_{conf_obj}
+ \lambda_{conf_noobj} * loss_{conf_noobj}
+ \lambda_{class} * loss_{class}
$$
)DOC"
);
)DOC"
);
}
}
};
};
...
@@ -140,7 +167,8 @@ class Yolov3LossOpGrad : public framework::OperatorWithKernel {
...
@@ -140,7 +167,8 @@ class Yolov3LossOpGrad : public framework::OperatorWithKernel {
framework
::
OpKernelType
GetExpectedKernelType
(
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
return
framework
::
OpKernelType
(
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
type
()),
ctx
.
GetPlace
());
framework
::
ToDataType
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
type
()),
platform
::
CPUPlace
());
}
}
};
};
...
...
paddle/fluid/operators/yolov3_loss_op.cu
已删除
100644 → 0
浏览文件 @
a0284f6f
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/fluid/operators/yolov3_loss_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_CUDA_KERNEL
(
yolov3_loss
,
ops
::
Yolov3LossKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
);
REGISTER_OP_CUDA_KERNEL
(
yolov3_loss_grad
,
ops
::
Yolov3LossGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
);
paddle/fluid/operators/yolov3_loss_op.h
浏览文件 @
2faa2b40
...
@@ -267,7 +267,9 @@ static void AddAllGradToInputGrad(
...
@@ -267,7 +267,9 @@ static void AddAllGradToInputGrad(
const
Tensor
&
pred_conf
,
const
Tensor
&
pred_class
,
const
Tensor
&
grad_x
,
const
Tensor
&
pred_conf
,
const
Tensor
&
pred_class
,
const
Tensor
&
grad_x
,
const
Tensor
&
grad_y
,
const
Tensor
&
grad_w
,
const
Tensor
&
grad_h
,
const
Tensor
&
grad_y
,
const
Tensor
&
grad_w
,
const
Tensor
&
grad_h
,
const
Tensor
&
grad_conf_obj
,
const
Tensor
&
grad_conf_noobj
,
const
Tensor
&
grad_conf_obj
,
const
Tensor
&
grad_conf_noobj
,
const
Tensor
&
grad_class
,
const
int
class_num
)
{
const
Tensor
&
grad_class
,
const
int
class_num
,
const
float
lambda_xy
,
const
float
lambda_wh
,
const
float
lambda_conf_obj
,
const
float
lambda_conf_noobj
,
const
float
lambda_class
)
{
const
int
n
=
pred_x
.
dims
()[
0
];
const
int
n
=
pred_x
.
dims
()[
0
];
const
int
an_num
=
pred_x
.
dims
()[
1
];
const
int
an_num
=
pred_x
.
dims
()[
1
];
const
int
h
=
pred_x
.
dims
()[
2
];
const
int
h
=
pred_x
.
dims
()[
2
];
...
@@ -290,25 +292,27 @@ static void AddAllGradToInputGrad(
...
@@ -290,25 +292,27 @@ static void AddAllGradToInputGrad(
for
(
int
j
=
0
;
j
<
an_num
;
j
++
)
{
for
(
int
j
=
0
;
j
<
an_num
;
j
++
)
{
for
(
int
k
=
0
;
k
<
h
;
k
++
)
{
for
(
int
k
=
0
;
k
<
h
;
k
++
)
{
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
for
(
int
l
=
0
;
l
<
w
;
l
++
)
{
grad_t
(
i
,
j
*
attr_num
,
k
,
l
)
=
grad_x_t
(
i
,
j
,
k
,
l
)
*
grad_t
(
i
,
j
*
attr_num
,
k
,
l
)
=
pred_x_t
(
i
,
j
,
k
,
l
)
*
grad_x_t
(
i
,
j
,
k
,
l
)
*
pred_x_t
(
i
,
j
,
k
,
l
)
*
(
1.0
-
pred_x_t
(
i
,
j
,
k
,
l
))
*
loss
;
(
1.0
-
pred_x_t
(
i
,
j
,
k
,
l
))
*
loss
*
lambda_xy
;
grad_t
(
i
,
j
*
attr_num
+
1
,
k
,
l
)
=
grad_t
(
i
,
j
*
attr_num
+
1
,
k
,
l
)
=
grad_y_t
(
i
,
j
,
k
,
l
)
*
pred_y_t
(
i
,
j
,
k
,
l
)
*
grad_y_t
(
i
,
j
,
k
,
l
)
*
pred_y_t
(
i
,
j
,
k
,
l
)
*
(
1.0
-
pred_y_t
(
i
,
j
,
k
,
l
))
*
loss
;
(
1.0
-
pred_y_t
(
i
,
j
,
k
,
l
))
*
loss
*
lambda_xy
;
grad_t
(
i
,
j
*
attr_num
+
2
,
k
,
l
)
=
grad_w_t
(
i
,
j
,
k
,
l
)
*
loss
;
grad_t
(
i
,
j
*
attr_num
+
2
,
k
,
l
)
=
grad_t
(
i
,
j
*
attr_num
+
3
,
k
,
l
)
=
grad_h_t
(
i
,
j
,
k
,
l
)
*
loss
;
grad_w_t
(
i
,
j
,
k
,
l
)
*
loss
*
lambda_wh
;
grad_t
(
i
,
j
*
attr_num
+
3
,
k
,
l
)
=
grad_h_t
(
i
,
j
,
k
,
l
)
*
loss
*
lambda_wh
;
grad_t
(
i
,
j
*
attr_num
+
4
,
k
,
l
)
=
grad_t
(
i
,
j
*
attr_num
+
4
,
k
,
l
)
=
grad_conf_obj_t
(
i
,
j
,
k
,
l
)
*
pred_conf_t
(
i
,
j
,
k
,
l
)
*
grad_conf_obj_t
(
i
,
j
,
k
,
l
)
*
pred_conf_t
(
i
,
j
,
k
,
l
)
*
(
1.0
-
pred_conf_t
(
i
,
j
,
k
,
l
))
*
loss
;
(
1.0
-
pred_conf_t
(
i
,
j
,
k
,
l
))
*
loss
*
lambda_conf_obj
;
grad_t
(
i
,
j
*
attr_num
+
4
,
k
,
l
)
+=
grad_t
(
i
,
j
*
attr_num
+
4
,
k
,
l
)
+=
grad_conf_noobj_t
(
i
,
j
,
k
,
l
)
*
pred_conf_t
(
i
,
j
,
k
,
l
)
*
grad_conf_noobj_t
(
i
,
j
,
k
,
l
)
*
pred_conf_t
(
i
,
j
,
k
,
l
)
*
(
1.0
-
pred_conf_t
(
i
,
j
,
k
,
l
))
*
loss
;
(
1.0
-
pred_conf_t
(
i
,
j
,
k
,
l
))
*
loss
*
lambda_conf_noobj
;
for
(
int
c
=
0
;
c
<
class_num
;
c
++
)
{
for
(
int
c
=
0
;
c
<
class_num
;
c
++
)
{
grad_t
(
i
,
j
*
attr_num
+
5
+
c
,
k
,
l
)
=
grad_t
(
i
,
j
*
attr_num
+
5
+
c
,
k
,
l
)
=
grad_class_t
(
i
,
j
,
k
,
l
,
c
)
*
pred_class_t
(
i
,
j
,
k
,
l
,
c
)
*
grad_class_t
(
i
,
j
,
k
,
l
,
c
)
*
pred_class_t
(
i
,
j
,
k
,
l
,
c
)
*
(
1.0
-
pred_class_t
(
i
,
j
,
k
,
l
,
c
))
*
loss
;
(
1.0
-
pred_class_t
(
i
,
j
,
k
,
l
,
c
))
*
loss
*
lambda_class
;
}
}
}
}
}
}
...
@@ -326,6 +330,11 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
...
@@ -326,6 +330,11 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
auto
anchors
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchors"
);
auto
anchors
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"anchors"
);
int
class_num
=
ctx
.
Attr
<
int
>
(
"class_num"
);
int
class_num
=
ctx
.
Attr
<
int
>
(
"class_num"
);
float
ignore_thresh
=
ctx
.
Attr
<
float
>
(
"ignore_thresh"
);
float
ignore_thresh
=
ctx
.
Attr
<
float
>
(
"ignore_thresh"
);
float
lambda_xy
=
ctx
.
Attr
<
float
>
(
"lambda_xy"
);
float
lambda_wh
=
ctx
.
Attr
<
float
>
(
"lambda_wh"
);
float
lambda_conf_obj
=
ctx
.
Attr
<
float
>
(
"lambda_conf_obj"
);
float
lambda_conf_noobj
=
ctx
.
Attr
<
float
>
(
"lambda_conf_noobj"
);
float
lambda_class
=
ctx
.
Attr
<
float
>
(
"lambda_class"
);
const
int
n
=
input
->
dims
()[
0
];
const
int
n
=
input
->
dims
()[
0
];
const
int
h
=
input
->
dims
()[
2
];
const
int
h
=
input
->
dims
()[
2
];
...
@@ -370,8 +379,10 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
...
@@ -370,8 +379,10 @@ class Yolov3LossKernel : public framework::OpKernel<T> {
T
loss_class
=
CalcBCEWithMask
<
T
>
(
pred_class
,
tclass
,
obj_mask_expand
);
T
loss_class
=
CalcBCEWithMask
<
T
>
(
pred_class
,
tclass
,
obj_mask_expand
);
auto
*
loss_data
=
loss
->
mutable_data
<
T
>
({
1
},
ctx
.
GetPlace
());
auto
*
loss_data
=
loss
->
mutable_data
<
T
>
({
1
},
ctx
.
GetPlace
());
loss_data
[
0
]
=
loss_x
+
loss_y
+
loss_w
+
loss_h
+
loss_conf_obj
+
loss_data
[
0
]
=
loss_conf_noobj
+
loss_class
;
lambda_xy
*
(
loss_x
+
loss_y
)
+
lambda_wh
*
(
loss_w
+
loss_h
)
+
lambda_conf_obj
*
loss_conf_obj
+
lambda_conf_noobj
*
loss_conf_noobj
+
lambda_class
*
loss_class
;
}
}
};
};
...
@@ -387,6 +398,11 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
...
@@ -387,6 +398,11 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
auto
*
input_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
input_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
output_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Loss"
));
auto
*
output_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Loss"
));
const
T
loss
=
output_grad
->
data
<
T
>
()[
0
];
const
T
loss
=
output_grad
->
data
<
T
>
()[
0
];
float
lambda_xy
=
ctx
.
Attr
<
float
>
(
"lambda_xy"
);
float
lambda_wh
=
ctx
.
Attr
<
float
>
(
"lambda_wh"
);
float
lambda_conf_obj
=
ctx
.
Attr
<
float
>
(
"lambda_conf_obj"
);
float
lambda_conf_noobj
=
ctx
.
Attr
<
float
>
(
"lambda_conf_noobj"
);
float
lambda_class
=
ctx
.
Attr
<
float
>
(
"lambda_class"
);
const
int
n
=
input
->
dims
()[
0
];
const
int
n
=
input
->
dims
()[
0
];
const
int
c
=
input
->
dims
()[
1
];
const
int
c
=
input
->
dims
()[
1
];
...
@@ -448,7 +464,8 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
...
@@ -448,7 +464,8 @@ class Yolov3LossGradKernel : public framework::OpKernel<T> {
input_grad
->
mutable_data
<
T
>
({
n
,
c
,
h
,
w
},
ctx
.
GetPlace
());
input_grad
->
mutable_data
<
T
>
({
n
,
c
,
h
,
w
},
ctx
.
GetPlace
());
AddAllGradToInputGrad
<
T
>
(
AddAllGradToInputGrad
<
T
>
(
input_grad
,
loss
,
pred_x
,
pred_y
,
pred_conf
,
pred_class
,
grad_x
,
grad_y
,
input_grad
,
loss
,
pred_x
,
pred_y
,
pred_conf
,
pred_class
,
grad_x
,
grad_y
,
grad_w
,
grad_h
,
grad_conf_obj
,
grad_conf_noobj
,
grad_class
,
class_num
);
grad_w
,
grad_h
,
grad_conf_obj
,
grad_conf_noobj
,
grad_class
,
class_num
,
lambda_xy
,
lambda_wh
,
lambda_conf_obj
,
lambda_conf_noobj
,
lambda_class
);
}
}
};
};
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
2faa2b40
...
@@ -20,6 +20,7 @@ from __future__ import print_function
...
@@ -20,6 +20,7 @@ from __future__ import print_function
from
.layer_function_generator
import
generate_layer_fn
from
.layer_function_generator
import
generate_layer_fn
from
.layer_function_generator
import
autodoc
,
templatedoc
from
.layer_function_generator
import
autodoc
,
templatedoc
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
from
..framework
import
Variable
from
.
import
tensor
from
.
import
tensor
from
.
import
nn
from
.
import
nn
from
.
import
ops
from
.
import
ops
...
@@ -45,6 +46,7 @@ __all__ = [
...
@@ -45,6 +46,7 @@ __all__ = [
'iou_similarity'
,
'iou_similarity'
,
'box_coder'
,
'box_coder'
,
'polygon_box_transform'
,
'polygon_box_transform'
,
'yolov3_loss'
,
]
]
...
@@ -404,6 +406,102 @@ def polygon_box_transform(input, name=None):
...
@@ -404,6 +406,102 @@ def polygon_box_transform(input, name=None):
return
output
return
output
@
templatedoc
(
op_type
=
"yolov3_loss"
)
def
yolov3_loss
(
x
,
gtbox
,
anchors
,
class_num
,
ignore_thresh
,
lambda_xy
=
None
,
lambda_wh
=
None
,
lambda_conf_obj
=
None
,
lambda_conf_noobj
=
None
,
lambda_class
=
None
,
name
=
None
):
"""
${comment}
Args:
x (Variable): ${x_comment}
gtbox (Variable): groud truth boxes, shoulb be in shape of [N, B, 5],
in the third dimenstion, class_id, x, y, w, h should
be stored and x, y, w, h should be relative valud of
input image.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
lambda_xy (float|None): ${lambda_xy_comment}
lambda_wh (float|None): ${lambda_wh_comment}
lambda_conf_obj (float|None): ${lambda_conf_obj_comment}
lambda_conf_noobj (float|None): ${lambda_conf_noobj_comment}
lambda_class (float|None): ${lambda_class_comment}
name (string): the name of yolov3 loss
Returns:
Variable: A 1-D tensor with shape [1], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable"
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10, 255, 13, 13], dtype='float32')
gtbox = fluid.layers.data(name='gtbox', shape=[10, 6, 5], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23]
loss = fluid.layers.yolov3_loss(x=x, gtbox=gtbox, class_num=80
anchors=anchors, ignore_thresh=0.5)
"""
helper
=
LayerHelper
(
'yolov3_loss'
,
**
locals
())
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"Input x of yolov3_loss must be Variable"
)
if
not
isinstance
(
gtbox
,
Variable
):
raise
TypeError
(
"Input gtbox of yolov3_loss must be Variable"
)
if
not
isinstance
(
anchors
,
list
)
and
not
isinstance
(
anchors
,
tuple
):
raise
TypeError
(
"Attr anchors of yolov3_loss must be list or tuple"
)
if
not
isinstance
(
class_num
,
int
):
raise
TypeError
(
"Attr class_num of yolov3_loss must be an integer"
)
if
not
isinstance
(
ignore_thresh
,
float
):
raise
TypeError
(
"Attr ignore_thresh of yolov3_loss must be a float number"
)
if
name
is
None
:
loss
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
loss
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
attrs
=
{
"anchors"
:
anchors
,
"class_num"
:
class_num
,
"ignore_thresh"
:
ignore_thresh
,
}
if
lambda_xy
is
not
None
and
isinstance
(
lambda_xy
,
float
):
self
.
attrs
[
'lambda_xy'
]
=
lambda_xy
if
lambda_wh
is
not
None
and
isinstance
(
lambda_wh
,
float
):
self
.
attrs
[
'lambda_wh'
]
=
lambda_wh
if
lambda_conf_obj
is
not
None
and
isinstance
(
lambda_conf_obj
,
float
):
self
.
attrs
[
'lambda_conf_obj'
]
=
lambda_conf_obj
if
lambda_conf_noobj
is
not
None
and
isinstance
(
lambda_conf_noobj
,
float
):
self
.
attrs
[
'lambda_conf_noobj'
]
=
lambda_conf_noobj
if
lambda_class
is
not
None
and
isinstance
(
lambda_class
,
float
):
self
.
attrs
[
'lambda_class'
]
=
lambda_class
helper
.
append_op
(
type
=
'yolov3_loss'
,
inputs
=
{
'X'
:
x
,
"GTBox"
:
gtbox
},
outputs
=
{
'Loss'
:
loss
},
attrs
=
attrs
)
return
loss
@
templatedoc
()
@
templatedoc
()
def
detection_map
(
detect_res
,
def
detection_map
(
detect_res
,
label
,
label
,
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
2faa2b40
...
@@ -164,7 +164,6 @@ __all__ = [
...
@@ -164,7 +164,6 @@ __all__ = [
'hash'
,
'hash'
,
'grid_sampler'
,
'grid_sampler'
,
'log_loss'
,
'log_loss'
,
'yolov3_loss'
,
'add_position_encoding'
,
'add_position_encoding'
,
'bilinear_tensor_product'
,
'bilinear_tensor_product'
,
]
]
...
@@ -8244,74 +8243,6 @@ def log_loss(input, label, epsilon=1e-4, name=None):
...
@@ -8244,74 +8243,6 @@ def log_loss(input, label, epsilon=1e-4, name=None):
return
loss
return
loss
@
templatedoc
(
op_type
=
"yolov3_loss"
)
def
yolov3_loss
(
x
,
gtbox
,
anchors
,
class_num
,
ignore_thresh
,
name
=
None
):
"""
${comment}
Args:
x (Variable): ${x_comment}
gtbox (Variable): groud truth boxes, shoulb be in shape of [N, B, 5],
in the third dimenstion, class_id, x, y, w, h should
be stored and x, y, w, h should be relative valud of
input image.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
name (string): the name of yolov3 loss
Returns:
Variable: A 1-D tensor with shape [1], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable"
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10, 255, 13, 13], dtype='float32')
gtbox = fluid.layers.data(name='gtbox', shape=[10, 6, 5], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23]
loss = fluid.layers.yolov3_loss(x=x, gtbox=gtbox, class_num=80
anchors=anchors, ignore_thresh=0.5)
"""
helper
=
LayerHelper
(
'yolov3_loss'
,
**
locals
())
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"Input x of yolov3_loss must be Variable"
)
if
not
isinstance
(
gtbox
,
Variable
):
raise
TypeError
(
"Input gtbox of yolov3_loss must be Variable"
)
if
not
isinstance
(
anchors
,
list
)
and
not
isinstance
(
anchors
,
tuple
):
raise
TypeError
(
"Attr anchors of yolov3_loss must be list or tuple"
)
if
not
isinstance
(
class_num
,
int
):
raise
TypeError
(
"Attr class_num of yolov3_loss must be an integer"
)
if
not
isinstance
(
ignore_thresh
,
float
):
raise
TypeError
(
"Attr ignore_thresh of yolov3_loss must be a float number"
)
if
name
is
None
:
loss
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
loss
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
'yolov3_loss'
,
inputs
=
{
'X'
:
x
,
"GTBox"
:
gtbox
},
outputs
=
{
'Loss'
:
loss
},
attrs
=
{
"anchors"
:
anchors
,
"class_num"
:
class_num
,
"ignore_thresh"
:
ignore_thresh
,
})
return
loss
def
add_position_encoding
(
input
,
alpha
,
beta
,
name
=
None
):
def
add_position_encoding
(
input
,
alpha
,
beta
,
name
=
None
):
"""
"""
**Add Position Encoding Layer**
**Add Position Encoding Layer**
...
...
python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py
浏览文件 @
2faa2b40
...
@@ -148,11 +148,20 @@ def YoloV3Loss(x, gtbox, attrs):
...
@@ -148,11 +148,20 @@ def YoloV3Loss(x, gtbox, attrs):
loss_class
=
bce
(
pred_cls
*
obj_mask_expand
,
tcls
*
obj_mask_expand
,
loss_class
=
bce
(
pred_cls
*
obj_mask_expand
,
tcls
*
obj_mask_expand
,
obj_mask_expand
)
obj_mask_expand
)
return
loss_x
+
loss_y
+
loss_w
+
loss_h
+
loss_conf_obj
+
loss_conf_noobj
+
loss_class
return
attrs
[
'lambda_xy'
]
*
(
loss_x
+
loss_y
)
\
+
attrs
[
'lambda_wh'
]
*
(
loss_w
+
loss_h
)
\
+
attrs
[
'lambda_conf_obj'
]
*
loss_conf_obj
\
+
attrs
[
'lambda_conf_noobj'
]
*
loss_conf_noobj
\
+
attrs
[
'lambda_class'
]
*
loss_class
class
TestYolov3LossOp
(
OpTest
):
class
TestYolov3LossOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
lambda_xy
=
1.0
self
.
lambda_wh
=
1.0
self
.
lambda_conf_obj
=
1.0
self
.
lambda_conf_noobj
=
1.0
self
.
lambda_class
=
1.0
self
.
initTestCase
()
self
.
initTestCase
()
self
.
op_type
=
'yolov3_loss'
self
.
op_type
=
'yolov3_loss'
x
=
np
.
random
.
random
(
size
=
self
.
x_shape
).
astype
(
'float32'
)
x
=
np
.
random
.
random
(
size
=
self
.
x_shape
).
astype
(
'float32'
)
...
@@ -164,6 +173,11 @@ class TestYolov3LossOp(OpTest):
...
@@ -164,6 +173,11 @@ class TestYolov3LossOp(OpTest):
"anchors"
:
self
.
anchors
,
"anchors"
:
self
.
anchors
,
"class_num"
:
self
.
class_num
,
"class_num"
:
self
.
class_num
,
"ignore_thresh"
:
self
.
ignore_thresh
,
"ignore_thresh"
:
self
.
ignore_thresh
,
"lambda_xy"
:
self
.
lambda_xy
,
"lambda_wh"
:
self
.
lambda_wh
,
"lambda_conf_obj"
:
self
.
lambda_conf_obj
,
"lambda_conf_noobj"
:
self
.
lambda_conf_noobj
,
"lambda_class"
:
self
.
lambda_class
,
}
}
self
.
inputs
=
{
'X'
:
x
,
'GTBox'
:
gtbox
}
self
.
inputs
=
{
'X'
:
x
,
'GTBox'
:
gtbox
}
...
@@ -182,7 +196,7 @@ class TestYolov3LossOp(OpTest):
...
@@ -182,7 +196,7 @@ class TestYolov3LossOp(OpTest):
place
,
[
'X'
],
place
,
[
'X'
],
'Loss'
,
'Loss'
,
no_grad_set
=
set
(
"GTBox"
),
no_grad_set
=
set
(
"GTBox"
),
max_relative_error
=
0.
1
)
max_relative_error
=
0.
06
)
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
anchors
=
[
10
,
13
,
12
,
12
]
self
.
anchors
=
[
10
,
13
,
12
,
12
]
...
@@ -190,6 +204,11 @@ class TestYolov3LossOp(OpTest):
...
@@ -190,6 +204,11 @@ class TestYolov3LossOp(OpTest):
self
.
ignore_thresh
=
0.5
self
.
ignore_thresh
=
0.5
self
.
x_shape
=
(
5
,
len
(
self
.
anchors
)
//
2
*
(
5
+
self
.
class_num
),
7
,
7
)
self
.
x_shape
=
(
5
,
len
(
self
.
anchors
)
//
2
*
(
5
+
self
.
class_num
),
7
,
7
)
self
.
gtbox_shape
=
(
5
,
5
,
5
)
self
.
gtbox_shape
=
(
5
,
5
,
5
)
self
.
lambda_xy
=
2.5
self
.
lambda_wh
=
0.8
self
.
lambda_conf_obj
=
1.5
self
.
lambda_conf_noobj
=
0.5
self
.
lambda_class
=
1.2
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录