Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
15ce2c1b
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
15ce2c1b
编写于
8月 03, 2022
作者:
Z
zhiboniu
提交者:
GitHub
8月 03, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
transfer op multiclass_nms3 to phi (#44765)
* add cmake enforce * transfer multiclass_nms3 to phi
上级
02414aac
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
972 addition
and
136 deletion
+972
-136
paddle/fluid/operators/detection/CMakeLists.txt
paddle/fluid/operators/detection/CMakeLists.txt
+0
-4
paddle/fluid/operators/detection/multiclass_nms_op.cc
paddle/fluid/operators/detection/multiclass_nms_op.cc
+8
-10
paddle/fluid/operators/detection/poly_util.cc
paddle/fluid/operators/detection/poly_util.cc
+22
-17
paddle/fluid/operators/detection/poly_util.h
paddle/fluid/operators/detection/poly_util.h
+7
-4
paddle/phi/api/yaml/legacy_api.yaml
paddle/phi/api/yaml/legacy_api.yaml
+9
-0
paddle/phi/infermeta/ternary.cc
paddle/phi/infermeta/ternary.cc
+93
-0
paddle/phi/infermeta/ternary.h
paddle/phi/infermeta/ternary.h
+15
-0
paddle/phi/kernels/CMakeLists.txt
paddle/phi/kernels/CMakeLists.txt
+1
-0
paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc
paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc
+632
-0
paddle/phi/kernels/funcs/CMakeLists.txt
paddle/phi/kernels/funcs/CMakeLists.txt
+1
-0
paddle/phi/kernels/funcs/gpc.cc
paddle/phi/kernels/funcs/gpc.cc
+20
-24
paddle/phi/kernels/funcs/gpc.h
paddle/phi/kernels/funcs/gpc.h
+8
-6
paddle/phi/kernels/multiclass_nms3_kernel.h
paddle/phi/kernels/multiclass_nms3_kernel.h
+37
-0
paddle/phi/ops/compat/multiclass_nms3_sig.cc
paddle/phi/ops/compat/multiclass_nms3_sig.cc
+36
-0
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+1
-0
python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py
...on/paddle/fluid/tests/unittests/test_multiclass_nms_op.py
+82
-71
未找到文件。
paddle/fluid/operators/detection/CMakeLists.txt
浏览文件 @
15ce2c1b
...
...
@@ -123,9 +123,5 @@ cc_test(
mask_util_test
SRCS mask_util_test.cc
DEPS memory mask_util
)
cc_library
(
gpc
SRCS gpc.cc
DEPS op_registry
)
detection_library
(
generate_mask_labels_op SRCS generate_mask_labels_op.cc DEPS
mask_util
)
paddle/fluid/operators/detection/multiclass_nms_op.cc
浏览文件 @
15ce2c1b
...
...
@@ -13,8 +13,10 @@ limitations under the License. */
#include <glog/logging.h>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detection/nms_util.h"
#include "paddle/phi/infermeta/ternary.h"
namespace
paddle
{
namespace
operators
{
...
...
@@ -609,12 +611,6 @@ class MultiClassNMS3Op : public MultiClassNMS2Op {
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
:
MultiClassNMS2Op
(
type
,
inputs
,
outputs
,
attrs
)
{}
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
MultiClassNMS2Op
::
InferShape
(
ctx
);
ctx
->
SetOutputDim
(
"NmsRoisNum"
,
{
-
1
});
}
};
class
MultiClassNMS3OpMaker
:
public
MultiClassNMS2OpMaker
{
...
...
@@ -633,6 +629,10 @@ class MultiClassNMS3OpMaker : public MultiClassNMS2OpMaker {
}
// namespace operators
}
// namespace paddle
DECLARE_INFER_SHAPE_FUNCTOR
(
multiclass_nms3
,
MultiClassNMSShapeFunctor
,
PD_INFER_META
(
phi
::
MultiClassNMSInferMeta
));
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
multiclass_nms
,
...
...
@@ -658,7 +658,5 @@ REGISTER_OPERATOR(
ops
::
MultiClassNMS3Op
,
ops
::
MultiClassNMS3OpMaker
,
paddle
::
framework
::
EmptyGradOpMaker
<
paddle
::
framework
::
OpDesc
>
,
paddle
::
framework
::
EmptyGradOpMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OP_CPU_KERNEL
(
multiclass_nms3
,
ops
::
MultiClassNMSKernel
<
float
>
,
ops
::
MultiClassNMSKernel
<
double
>
);
paddle
::
framework
::
EmptyGradOpMaker
<
paddle
::
imperative
::
OpBase
>
,
MultiClassNMSShapeFunctor
);
paddle/fluid/operators/detection/poly_util.cc
浏览文件 @
15ce2c1b
...
...
@@ -21,8 +21,8 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
using
gpc
::
gpc_free_polygon
;
using
gpc
::
gpc_polygon_clip
;
using
phi
::
funcs
::
gpc_free_polygon
;
using
phi
::
funcs
::
gpc_polygon_clip
;
template
<
class
T
>
void
Array2PointVec
(
const
T
*
box
,
...
...
@@ -37,15 +37,18 @@ void Array2PointVec(const T* box,
}
template
<
class
T
>
void
Array2Poly
(
const
T
*
box
,
const
size_t
box_size
,
gpc
::
gpc_polygon
*
poly
)
{
void
Array2Poly
(
const
T
*
box
,
const
size_t
box_size
,
phi
::
funcs
::
gpc_polygon
*
poly
)
{
size_t
pts_num
=
box_size
/
2
;
(
*
poly
).
num_contours
=
1
;
(
*
poly
).
hole
=
reinterpret_cast
<
int
*>
(
malloc
(
sizeof
(
int
)));
(
*
poly
).
hole
[
0
]
=
0
;
(
*
poly
).
contour
=
(
gpc
::
gpc_vertex_list
*
)
malloc
(
sizeof
(
gpc
::
gpc_vertex_list
));
(
*
poly
).
contour
=
(
phi
::
funcs
::
gpc_vertex_list
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex_list
));
(
*
poly
).
contour
->
num_vertices
=
pts_num
;
(
*
poly
).
contour
->
vertex
=
(
gpc
::
gpc_vertex
*
)
malloc
(
sizeof
(
gpc
::
gpc_vertex
)
*
pts_num
);
(
phi
::
funcs
::
gpc_vertex
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex
)
*
pts_num
);
for
(
size_t
i
=
0
;
i
<
pts_num
;
++
i
)
{
(
*
poly
).
contour
->
vertex
[
i
].
x
=
box
[
2
*
i
];
(
*
poly
).
contour
->
vertex
[
i
].
y
=
box
[
2
*
i
+
1
];
...
...
@@ -53,15 +56,17 @@ void Array2Poly(const T* box, const size_t box_size, gpc::gpc_polygon* poly) {
}
template
<
class
T
>
void
PointVec2Poly
(
const
std
::
vector
<
Point_
<
T
>>&
vec
,
gpc
::
gpc_polygon
*
poly
)
{
void
PointVec2Poly
(
const
std
::
vector
<
Point_
<
T
>>&
vec
,
phi
::
funcs
::
gpc_polygon
*
poly
)
{
int
pts_num
=
vec
.
size
();
(
*
poly
).
num_contours
=
1
;
(
*
poly
).
hole
=
reinterpret_cast
<
int
*>
(
malloc
(
sizeof
(
int
)));
(
*
poly
).
hole
[
0
]
=
0
;
(
*
poly
).
contour
=
(
gpc
::
gpc_vertex_list
*
)
malloc
(
sizeof
(
gpc
::
gpc_vertex_list
));
(
*
poly
).
contour
=
(
phi
::
funcs
::
gpc_vertex_list
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex_list
));
(
*
poly
).
contour
->
num_vertices
=
pts_num
;
(
*
poly
).
contour
->
vertex
=
(
gpc
::
gpc_vertex
*
)
malloc
(
sizeof
(
gpc
::
gpc_vertex
)
*
pts_num
);
(
phi
::
funcs
::
gpc_vertex
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex
)
*
pts_num
);
for
(
size_t
i
=
0
;
i
<
pts_num
;
++
i
)
{
(
*
poly
).
contour
->
vertex
[
i
].
x
=
vec
[
i
].
x
;
(
*
poly
).
contour
->
vertex
[
i
].
y
=
vec
[
i
].
y
;
...
...
@@ -69,7 +74,7 @@ void PointVec2Poly(const std::vector<Point_<T>>& vec, gpc::gpc_polygon* poly) {
}
template
<
class
T
>
void
Poly2PointVec
(
const
gpc
::
gpc_vertex_list
&
contour
,
void
Poly2PointVec
(
const
phi
::
funcs
::
gpc_vertex_list
&
contour
,
std
::
vector
<
Point_
<
T
>>*
vec
)
{
int
pts_num
=
contour
.
num_vertices
;
(
*
vec
).
resize
(
pts_num
);
...
...
@@ -105,13 +110,13 @@ T PolyOverlapArea(const T* box1,
const
T
*
box2
,
const
size_t
box_size
,
const
bool
normalized
)
{
gpc
::
gpc_polygon
poly1
;
gpc
::
gpc_polygon
poly2
;
phi
::
funcs
::
gpc_polygon
poly1
;
phi
::
funcs
::
gpc_polygon
poly2
;
Array2Poly
<
T
>
(
box1
,
box_size
,
&
poly1
);
Array2Poly
<
T
>
(
box2
,
box_size
,
&
poly2
);
gpc
::
gpc_polygon
respoly
;
gpc
::
gpc_op
op
=
gpc
::
GPC_INT
;
gpc
::
gpc_polygon_clip
(
op
,
&
poly2
,
&
poly1
,
&
respoly
);
phi
::
funcs
::
gpc_polygon
respoly
;
phi
::
funcs
::
gpc_op
op
=
phi
::
funcs
::
GPC_INT
;
phi
::
funcs
::
gpc_polygon_clip
(
op
,
&
poly2
,
&
poly1
,
&
respoly
);
T
inter_area
=
T
(
0.
);
int
contour_num
=
respoly
.
num_contours
;
...
...
@@ -123,9 +128,9 @@ T PolyOverlapArea(const T* box1,
inter_area
+=
GetContourArea
<
T
>
(
resvec
);
}
gpc
::
gpc_free_polygon
(
&
poly1
);
gpc
::
gpc_free_polygon
(
&
poly2
);
gpc
::
gpc_free_polygon
(
&
respoly
);
phi
::
funcs
::
gpc_free_polygon
(
&
poly1
);
phi
::
funcs
::
gpc_free_polygon
(
&
poly2
);
phi
::
funcs
::
gpc_free_polygon
(
&
respoly
);
return
inter_area
;
}
...
...
paddle/fluid/operators/detection/poly_util.h
浏览文件 @
15ce2c1b
...
...
@@ -16,7 +16,7 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/
fluid/operators/detection
/gpc.h"
#include "paddle/
phi/kernels/funcs
/gpc.h"
namespace
paddle
{
namespace
operators
{
...
...
@@ -47,13 +47,16 @@ void Array2PointVec(const T* box,
std
::
vector
<
Point_
<
T
>>*
vec
);
template
<
class
T
>
void
Array2Poly
(
const
T
*
box
,
const
size_t
box_size
,
gpc
::
gpc_polygon
*
poly
);
void
Array2Poly
(
const
T
*
box
,
const
size_t
box_size
,
phi
::
funcs
::
gpc_polygon
*
poly
);
template
<
class
T
>
void
PointVec2Poly
(
const
std
::
vector
<
Point_
<
T
>>&
vec
,
gpc
::
gpc_polygon
*
poly
);
void
PointVec2Poly
(
const
std
::
vector
<
Point_
<
T
>>&
vec
,
phi
::
funcs
::
gpc_polygon
*
poly
);
template
<
class
T
>
void
Poly2PointVec
(
const
gpc
::
gpc_vertex_list
&
contour
,
void
Poly2PointVec
(
const
phi
::
funcs
::
gpc_vertex_list
&
contour
,
std
::
vector
<
Point_
<
T
>>*
vec
);
template
<
class
T
>
...
...
paddle/phi/api/yaml/legacy_api.yaml
浏览文件 @
15ce2c1b
...
...
@@ -1670,6 +1670,15 @@
func
:
multi_dot
backward
:
multi_dot_grad
-
api
:
multiclass_nms3
args
:
(Tensor bboxes, Tensor scores, Tensor rois_num, float score_threshold, int nms_top_k, int keep_top_k, float nms_threshold=0.3, bool normalized=true, float nms_eta=1.0, int background_label=0)
output
:
Tensor(out), Tensor(index), Tensor(nms_rois_num)
infer_meta
:
func
:
MultiClassNMSInferMeta
kernel
:
func
:
multiclass_nms3
optional
:
rois_num
# multinomial
-
api
:
multinomial
args
:
(Tensor x, int num_samples, bool replacement)
...
...
paddle/phi/infermeta/ternary.cc
浏览文件 @
15ce2c1b
...
...
@@ -743,6 +743,99 @@ void LinspaceInferMeta(const MetaTensor& start,
LinspaceRawInferMeta
(
start
,
stop
,
number
,
out
);
}
void
MultiClassNMSInferMeta
(
const
MetaTensor
&
bboxes
,
const
MetaTensor
&
scores
,
const
MetaTensor
&
rois_num
,
float
score_threshold
,
int
nms_top_k
,
int
keep_top_k
,
float
nms_threshold
,
bool
normalized
,
float
nms_eta
,
int
background_label
,
MetaTensor
*
out
,
MetaTensor
*
index
,
MetaTensor
*
nms_rois_num
,
MetaConfig
config
)
{
auto
box_dims
=
bboxes
.
dims
();
auto
score_dims
=
scores
.
dims
();
auto
score_size
=
score_dims
.
size
();
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
score_size
==
2
||
score_size
==
3
,
true
,
errors
::
InvalidArgument
(
"The rank of Input(Scores) must be 2 or 3"
". But received rank = %d"
,
score_size
));
PADDLE_ENFORCE_EQ
(
box_dims
.
size
(),
3
,
errors
::
InvalidArgument
(
"The rank of Input(BBoxes) must be 3"
". But received rank = %d"
,
box_dims
.
size
()));
if
(
score_size
==
3
)
{
PADDLE_ENFORCE_EQ
(
box_dims
[
2
]
==
4
||
box_dims
[
2
]
==
8
||
box_dims
[
2
]
==
16
||
box_dims
[
2
]
==
24
||
box_dims
[
2
]
==
32
,
true
,
errors
::
InvalidArgument
(
"The last dimension of Input"
"(BBoxes) must be 4 or 8, "
"represents the layout of coordinate "
"[xmin, ymin, xmax, ymax] or "
"4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or "
"8 points: [xi, yi] i= 1,2,...,8 or "
"12 points: [xi, yi] i= 1,2,...,12 or "
"16 points: [xi, yi] i= 1,2,...,16"
));
PADDLE_ENFORCE_EQ
(
box_dims
[
1
],
score_dims
[
2
],
errors
::
InvalidArgument
(
"The 2nd dimension of Input(BBoxes) must be equal to "
"last dimension of Input(Scores), which represents the "
"predicted bboxes."
"But received box_dims[1](%s) != socre_dims[2](%s)"
,
box_dims
[
1
],
score_dims
[
2
]));
}
else
{
PADDLE_ENFORCE_EQ
(
box_dims
[
2
],
4
,
errors
::
InvalidArgument
(
"The last dimension of Input"
"(BBoxes) must be 4. But received dimension = %d"
,
box_dims
[
2
]));
PADDLE_ENFORCE_EQ
(
box_dims
[
1
],
score_dims
[
1
],
errors
::
InvalidArgument
(
"The 2nd dimension of Input"
"(BBoxes) must be equal to the 2nd dimension of Input(Scores). "
"But received box dimension = %d, score dimension = %d"
,
box_dims
[
1
],
score_dims
[
1
]));
}
}
PADDLE_ENFORCE_NE
(
out
,
nullptr
,
errors
::
InvalidArgument
(
"The out in MultiClassNMSInferMeta can't be nullptr."
));
PADDLE_ENFORCE_NE
(
index
,
nullptr
,
errors
::
InvalidArgument
(
"The index in MultiClassNMSInferMeta can't be nullptr."
));
// Here the box_dims[0] is not the real dimension of output.
// It will be rewritten in the computing kernel.
out
->
set_dims
(
phi
::
make_ddim
({
-
1
,
box_dims
[
2
]
+
2
}));
out
->
set_dtype
(
bboxes
.
dtype
());
index
->
set_dims
(
phi
::
make_ddim
({
-
1
,
box_dims
[
2
]
+
2
}));
index
->
set_dtype
(
DataType
::
INT32
);
nms_rois_num
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
nms_rois_num
->
set_dtype
(
DataType
::
INT32
);
}
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
weight
,
...
...
paddle/phi/infermeta/ternary.h
浏览文件 @
15ce2c1b
...
...
@@ -123,6 +123,21 @@ void LinspaceInferMeta(const MetaTensor& start,
DataType
dtype
,
MetaTensor
*
out
);
void
MultiClassNMSInferMeta
(
const
MetaTensor
&
bboxes
,
const
MetaTensor
&
scores
,
const
MetaTensor
&
rois_num
,
float
score_threshold
,
int
nms_top_k
,
int
keep_top_k
,
float
nms_threshold
,
bool
normalized
,
float
nms_eta
,
int
background_label
,
MetaTensor
*
out
,
MetaTensor
*
index
,
MetaTensor
*
nms_rois_num
,
MetaConfig
config
=
MetaConfig
());
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
weight
,
...
...
paddle/phi/kernels/CMakeLists.txt
浏览文件 @
15ce2c1b
...
...
@@ -80,6 +80,7 @@ set(COMMON_KERNEL_DEPS
lod_utils
custom_kernel
string_infermeta
gpc
utf8proc
)
copy_if_different
(
${
kernel_declare_file
}
${
kernel_declare_file_final
}
)
...
...
paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc
0 → 100644
浏览文件 @
15ce2c1b
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/multiclass_nms3_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/gpc.h"
namespace
phi
{
using
phi
::
funcs
::
gpc_free_polygon
;
using
phi
::
funcs
::
gpc_polygon_clip
;
template
<
class
T
>
class
Point_
{
public:
// default constructor
Point_
()
{}
Point_
(
T
_x
,
T
_y
)
{}
Point_
(
const
Point_
&
pt
)
{}
Point_
&
operator
=
(
const
Point_
&
pt
);
// conversion to another data type
// template<typename _T> operator Point_<_T>() const;
// conversion to the old-style C structures
// operator Vec<T, 2>() const;
// checks whether the point is inside the specified rectangle
// bool inside(const Rect_<T>& r) const;
T
x
;
//!< x coordinate of the point
T
y
;
//!< y coordinate of the point
};
template
<
class
T
>
void
Array2PointVec
(
const
T
*
box
,
const
size_t
box_size
,
std
::
vector
<
Point_
<
T
>>*
vec
)
{
size_t
pts_num
=
box_size
/
2
;
(
*
vec
).
resize
(
pts_num
);
for
(
size_t
i
=
0
;
i
<
pts_num
;
i
++
)
{
(
*
vec
).
at
(
i
).
x
=
box
[
2
*
i
];
(
*
vec
).
at
(
i
).
y
=
box
[
2
*
i
+
1
];
}
}
template
<
class
T
>
void
Array2Poly
(
const
T
*
box
,
const
size_t
box_size
,
phi
::
funcs
::
gpc_polygon
*
poly
)
{
size_t
pts_num
=
box_size
/
2
;
(
*
poly
).
num_contours
=
1
;
(
*
poly
).
hole
=
reinterpret_cast
<
int
*>
(
malloc
(
sizeof
(
int
)));
(
*
poly
).
hole
[
0
]
=
0
;
(
*
poly
).
contour
=
(
phi
::
funcs
::
gpc_vertex_list
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex_list
));
(
*
poly
).
contour
->
num_vertices
=
pts_num
;
(
*
poly
).
contour
->
vertex
=
(
phi
::
funcs
::
gpc_vertex
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex
)
*
pts_num
);
for
(
size_t
i
=
0
;
i
<
pts_num
;
++
i
)
{
(
*
poly
).
contour
->
vertex
[
i
].
x
=
box
[
2
*
i
];
(
*
poly
).
contour
->
vertex
[
i
].
y
=
box
[
2
*
i
+
1
];
}
}
template
<
class
T
>
void
PointVec2Poly
(
const
std
::
vector
<
Point_
<
T
>>&
vec
,
phi
::
funcs
::
gpc_polygon
*
poly
)
{
int
pts_num
=
vec
.
size
();
(
*
poly
).
num_contours
=
1
;
(
*
poly
).
hole
=
reinterpret_cast
<
int
*>
(
malloc
(
sizeof
(
int
)));
(
*
poly
).
hole
[
0
]
=
0
;
(
*
poly
).
contour
=
(
phi
::
funcs
::
gpc_vertex_list
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex_list
));
(
*
poly
).
contour
->
num_vertices
=
pts_num
;
(
*
poly
).
contour
->
vertex
=
(
phi
::
funcs
::
gpc_vertex
*
)
malloc
(
sizeof
(
phi
::
funcs
::
gpc_vertex
)
*
pts_num
);
for
(
size_t
i
=
0
;
i
<
pts_num
;
++
i
)
{
(
*
poly
).
contour
->
vertex
[
i
].
x
=
vec
[
i
].
x
;
(
*
poly
).
contour
->
vertex
[
i
].
y
=
vec
[
i
].
y
;
}
}
template
<
class
T
>
void
Poly2PointVec
(
const
phi
::
funcs
::
gpc_vertex_list
&
contour
,
std
::
vector
<
Point_
<
T
>>*
vec
)
{
int
pts_num
=
contour
.
num_vertices
;
(
*
vec
).
resize
(
pts_num
);
for
(
int
i
=
0
;
i
<
pts_num
;
i
++
)
{
(
*
vec
).
at
(
i
).
x
=
contour
.
vertex
[
i
].
x
;
(
*
vec
).
at
(
i
).
y
=
contour
.
vertex
[
i
].
y
;
}
}
template
<
class
T
>
T
GetContourArea
(
const
std
::
vector
<
Point_
<
T
>>&
vec
)
{
size_t
pts_num
=
vec
.
size
();
if
(
pts_num
<
3
)
return
T
(
0.
);
T
area
=
T
(
0.
);
for
(
size_t
i
=
0
;
i
<
pts_num
;
++
i
)
{
area
+=
vec
[
i
].
x
*
vec
[(
i
+
1
)
%
pts_num
].
y
-
vec
[
i
].
y
*
vec
[(
i
+
1
)
%
pts_num
].
x
;
}
return
std
::
fabs
(
area
/
2.0
);
}
template
<
class
T
>
T
PolyArea
(
const
T
*
box
,
const
size_t
box_size
,
const
bool
normalized
)
{
// If coordinate values are is invalid
// if area size <= 0, return 0.
std
::
vector
<
Point_
<
T
>>
vec
;
Array2PointVec
<
T
>
(
box
,
box_size
,
&
vec
);
return
GetContourArea
<
T
>
(
vec
);
}
template
<
class
T
>
T
PolyOverlapArea
(
const
T
*
box1
,
const
T
*
box2
,
const
size_t
box_size
,
const
bool
normalized
)
{
phi
::
funcs
::
gpc_polygon
poly1
;
phi
::
funcs
::
gpc_polygon
poly2
;
Array2Poly
<
T
>
(
box1
,
box_size
,
&
poly1
);
Array2Poly
<
T
>
(
box2
,
box_size
,
&
poly2
);
phi
::
funcs
::
gpc_polygon
respoly
;
phi
::
funcs
::
gpc_op
op
=
phi
::
funcs
::
GPC_INT
;
phi
::
funcs
::
gpc_polygon_clip
(
op
,
&
poly2
,
&
poly1
,
&
respoly
);
T
inter_area
=
T
(
0.
);
int
contour_num
=
respoly
.
num_contours
;
for
(
int
i
=
0
;
i
<
contour_num
;
++
i
)
{
std
::
vector
<
Point_
<
T
>>
resvec
;
Poly2PointVec
<
T
>
(
respoly
.
contour
[
i
],
&
resvec
);
// inter_area += std::fabs(cv::contourArea(resvec)) + 0.5f *
// (cv::arcLength(resvec, true));
inter_area
+=
GetContourArea
<
T
>
(
resvec
);
}
phi
::
funcs
::
gpc_free_polygon
(
&
poly1
);
phi
::
funcs
::
gpc_free_polygon
(
&
poly2
);
phi
::
funcs
::
gpc_free_polygon
(
&
respoly
);
return
inter_area
;
}
template
<
class
T
>
bool
SortScorePairDescend
(
const
std
::
pair
<
float
,
T
>&
pair1
,
const
std
::
pair
<
float
,
T
>&
pair2
)
{
return
pair1
.
first
>
pair2
.
first
;
}
template
<
class
T
>
static
inline
void
GetMaxScoreIndex
(
const
std
::
vector
<
T
>&
scores
,
const
T
threshold
,
int
top_k
,
std
::
vector
<
std
::
pair
<
T
,
int
>>*
sorted_indices
)
{
for
(
size_t
i
=
0
;
i
<
scores
.
size
();
++
i
)
{
if
(
scores
[
i
]
>
threshold
)
{
sorted_indices
->
push_back
(
std
::
make_pair
(
scores
[
i
],
i
));
}
}
// Sort the score pair according to the scores in descending order
std
::
stable_sort
(
sorted_indices
->
begin
(),
sorted_indices
->
end
(),
SortScorePairDescend
<
int
>
);
// Keep top_k scores if needed.
if
(
top_k
>
-
1
&&
top_k
<
static_cast
<
int
>
(
sorted_indices
->
size
()))
{
sorted_indices
->
resize
(
top_k
);
}
}
template
<
class
T
>
static
inline
T
BBoxArea
(
const
T
*
box
,
const
bool
normalized
)
{
if
(
box
[
2
]
<
box
[
0
]
||
box
[
3
]
<
box
[
1
])
{
// If coordinate values are is invalid
// (e.g. xmax < xmin or ymax < ymin), return 0.
return
static_cast
<
T
>
(
0.
);
}
else
{
const
T
w
=
box
[
2
]
-
box
[
0
];
const
T
h
=
box
[
3
]
-
box
[
1
];
if
(
normalized
)
{
return
w
*
h
;
}
else
{
// If coordinate values are not within range [0, 1].
return
(
w
+
1
)
*
(
h
+
1
);
}
}
}
template
<
class
T
>
static
inline
T
JaccardOverlap
(
const
T
*
box1
,
const
T
*
box2
,
const
bool
normalized
)
{
if
(
box2
[
0
]
>
box1
[
2
]
||
box2
[
2
]
<
box1
[
0
]
||
box2
[
1
]
>
box1
[
3
]
||
box2
[
3
]
<
box1
[
1
])
{
return
static_cast
<
T
>
(
0.
);
}
else
{
const
T
inter_xmin
=
std
::
max
(
box1
[
0
],
box2
[
0
]);
const
T
inter_ymin
=
std
::
max
(
box1
[
1
],
box2
[
1
]);
const
T
inter_xmax
=
std
::
min
(
box1
[
2
],
box2
[
2
]);
const
T
inter_ymax
=
std
::
min
(
box1
[
3
],
box2
[
3
]);
T
norm
=
normalized
?
static_cast
<
T
>
(
0.
)
:
static_cast
<
T
>
(
1.
);
T
inter_w
=
inter_xmax
-
inter_xmin
+
norm
;
T
inter_h
=
inter_ymax
-
inter_ymin
+
norm
;
const
T
inter_area
=
inter_w
*
inter_h
;
const
T
bbox1_area
=
BBoxArea
<
T
>
(
box1
,
normalized
);
const
T
bbox2_area
=
BBoxArea
<
T
>
(
box2
,
normalized
);
return
inter_area
/
(
bbox1_area
+
bbox2_area
-
inter_area
);
}
}
template
<
class
T
>
T
PolyIoU
(
const
T
*
box1
,
const
T
*
box2
,
const
size_t
box_size
,
const
bool
normalized
)
{
T
bbox1_area
=
PolyArea
<
T
>
(
box1
,
box_size
,
normalized
);
T
bbox2_area
=
PolyArea
<
T
>
(
box2
,
box_size
,
normalized
);
T
inter_area
=
PolyOverlapArea
<
T
>
(
box1
,
box2
,
box_size
,
normalized
);
if
(
bbox1_area
==
0
||
bbox2_area
==
0
||
inter_area
==
0
)
{
// If coordinate values are invalid
// if area size <= 0, return 0.
return
T
(
0.
);
}
else
{
return
inter_area
/
(
bbox1_area
+
bbox2_area
-
inter_area
);
}
}
inline
std
::
vector
<
size_t
>
GetNmsLodFromRoisNum
(
const
DenseTensor
*
rois_num
)
{
std
::
vector
<
size_t
>
rois_lod
;
auto
*
rois_num_data
=
rois_num
->
data
<
int
>
();
rois_lod
.
push_back
(
static_cast
<
size_t
>
(
0
));
for
(
int
i
=
0
;
i
<
rois_num
->
numel
();
++
i
)
{
rois_lod
.
push_back
(
rois_lod
.
back
()
+
static_cast
<
size_t
>
(
rois_num_data
[
i
]));
}
return
rois_lod
;
}
template
<
typename
T
,
typename
Context
>
void
SliceOneClass
(
const
Context
&
ctx
,
const
DenseTensor
&
items
,
const
int
class_id
,
DenseTensor
*
one_class_item
)
{
// T* item_data = one_class_item->mutable_data<T>(ctx.GetPlace());
T
*
item_data
=
ctx
.
template
Alloc
<
T
>(
one_class_item
);
const
T
*
items_data
=
items
.
data
<
T
>
();
const
int64_t
num_item
=
items
.
dims
()[
0
];
const
int
class_num
=
items
.
dims
()[
1
];
if
(
items
.
dims
().
size
()
==
3
)
{
int
item_size
=
items
.
dims
()[
2
];
for
(
int
i
=
0
;
i
<
num_item
;
++
i
)
{
std
::
memcpy
(
item_data
+
i
*
item_size
,
items_data
+
i
*
class_num
*
item_size
+
class_id
*
item_size
,
sizeof
(
T
)
*
item_size
);
}
}
else
{
for
(
int
i
=
0
;
i
<
num_item
;
++
i
)
{
item_data
[
i
]
=
items_data
[
i
*
class_num
+
class_id
];
}
}
}
template
<
typename
T
>
void
NMSFast
(
const
DenseTensor
&
bbox
,
const
DenseTensor
&
scores
,
const
T
score_threshold
,
const
T
nms_threshold
,
const
T
eta
,
const
int64_t
top_k
,
std
::
vector
<
int
>*
selected_indices
,
const
bool
normalized
)
{
// The total boxes for each instance.
int64_t
num_boxes
=
bbox
.
dims
()[
0
];
// 4: [xmin ymin xmax ymax]
// 8: [x1 y1 x2 y2 x3 y3 x4 y4]
// 16, 24, or 32: [x1 y1 x2 y2 ... xn yn], n = 8, 12 or 16
int64_t
box_size
=
bbox
.
dims
()[
1
];
std
::
vector
<
T
>
scores_data
(
num_boxes
);
std
::
copy_n
(
scores
.
data
<
T
>
(),
num_boxes
,
scores_data
.
begin
());
std
::
vector
<
std
::
pair
<
T
,
int
>>
sorted_indices
;
GetMaxScoreIndex
<
T
>
(
scores_data
,
score_threshold
,
top_k
,
&
sorted_indices
);
selected_indices
->
clear
();
T
adaptive_threshold
=
nms_threshold
;
const
T
*
bbox_data
=
bbox
.
data
<
T
>
();
while
(
sorted_indices
.
size
()
!=
0
)
{
const
int
idx
=
sorted_indices
.
front
().
second
;
bool
keep
=
true
;
for
(
size_t
k
=
0
;
k
<
selected_indices
->
size
();
++
k
)
{
if
(
keep
)
{
const
int
kept_idx
=
(
*
selected_indices
)[
k
];
T
overlap
=
T
(
0.
);
// 4: [xmin ymin xmax ymax]
if
(
box_size
==
4
)
{
overlap
=
JaccardOverlap
<
T
>
(
bbox_data
+
idx
*
box_size
,
bbox_data
+
kept_idx
*
box_size
,
normalized
);
}
// 8: [x1 y1 x2 y2 x3 y3 x4 y4] or 16, 24, 32
if
(
box_size
==
8
||
box_size
==
16
||
box_size
==
24
||
box_size
==
32
)
{
overlap
=
PolyIoU
<
T
>
(
bbox_data
+
idx
*
box_size
,
bbox_data
+
kept_idx
*
box_size
,
box_size
,
normalized
);
}
keep
=
overlap
<=
adaptive_threshold
;
}
else
{
break
;
}
}
if
(
keep
)
{
selected_indices
->
push_back
(
idx
);
}
sorted_indices
.
erase
(
sorted_indices
.
begin
());
if
(
keep
&&
eta
<
1
&&
adaptive_threshold
>
0.5
)
{
adaptive_threshold
*=
eta
;
}
}
}
template
<
typename
T
,
typename
Context
>
void
MultiClassNMS
(
const
Context
&
ctx
,
const
DenseTensor
&
scores
,
const
DenseTensor
&
bboxes
,
const
int
scores_size
,
float
scorethreshold
,
int
nms_top_k
,
int
keep_top_k
,
float
nmsthreshold
,
bool
normalized
,
float
nmseta
,
int
background_label
,
std
::
map
<
int
,
std
::
vector
<
int
>>*
indices
,
int
*
num_nmsed_out
)
{
T
nms_threshold
=
static_cast
<
T
>
(
nmsthreshold
);
T
nms_eta
=
static_cast
<
T
>
(
nmseta
);
T
score_threshold
=
static_cast
<
T
>
(
scorethreshold
);
int
num_det
=
0
;
int64_t
class_num
=
scores_size
==
3
?
scores
.
dims
()[
0
]
:
scores
.
dims
()[
1
];
DenseTensor
bbox_slice
,
score_slice
;
for
(
int64_t
c
=
0
;
c
<
class_num
;
++
c
)
{
if
(
c
==
background_label
)
continue
;
if
(
scores_size
==
3
)
{
score_slice
=
scores
.
Slice
(
c
,
c
+
1
);
bbox_slice
=
bboxes
;
}
else
{
score_slice
.
Resize
({
scores
.
dims
()[
0
],
1
});
bbox_slice
.
Resize
({
scores
.
dims
()[
0
],
4
});
SliceOneClass
<
T
,
Context
>
(
ctx
,
scores
,
c
,
&
score_slice
);
SliceOneClass
<
T
,
Context
>
(
ctx
,
bboxes
,
c
,
&
bbox_slice
);
}
NMSFast
<
T
>
(
bbox_slice
,
score_slice
,
score_threshold
,
nms_threshold
,
nms_eta
,
nms_top_k
,
&
((
*
indices
)[
c
]),
normalized
);
if
(
scores_size
==
2
)
{
std
::
stable_sort
((
*
indices
)[
c
].
begin
(),
(
*
indices
)[
c
].
end
());
}
num_det
+=
(
*
indices
)[
c
].
size
();
}
*
num_nmsed_out
=
num_det
;
const
T
*
scores_data
=
scores
.
data
<
T
>
();
if
(
keep_top_k
>
-
1
&&
num_det
>
keep_top_k
)
{
const
T
*
sdata
;
std
::
vector
<
std
::
pair
<
float
,
std
::
pair
<
int
,
int
>>>
score_index_pairs
;
for
(
const
auto
&
it
:
*
indices
)
{
int
label
=
it
.
first
;
if
(
scores_size
==
3
)
{
sdata
=
scores_data
+
label
*
scores
.
dims
()[
1
];
}
else
{
score_slice
.
Resize
({
scores
.
dims
()[
0
],
1
});
SliceOneClass
<
T
,
Context
>
(
ctx
,
scores
,
label
,
&
score_slice
);
sdata
=
score_slice
.
data
<
T
>
();
}
const
std
::
vector
<
int
>&
label_indices
=
it
.
second
;
for
(
size_t
j
=
0
;
j
<
label_indices
.
size
();
++
j
)
{
int
idx
=
label_indices
[
j
];
score_index_pairs
.
push_back
(
std
::
make_pair
(
sdata
[
idx
],
std
::
make_pair
(
label
,
idx
)));
}
}
// Keep top k results per image.
std
::
stable_sort
(
score_index_pairs
.
begin
(),
score_index_pairs
.
end
(),
SortScorePairDescend
<
std
::
pair
<
int
,
int
>>
);
score_index_pairs
.
resize
(
keep_top_k
);
// Store the new indices.
std
::
map
<
int
,
std
::
vector
<
int
>>
new_indices
;
for
(
size_t
j
=
0
;
j
<
score_index_pairs
.
size
();
++
j
)
{
int
label
=
score_index_pairs
[
j
].
second
.
first
;
int
idx
=
score_index_pairs
[
j
].
second
.
second
;
new_indices
[
label
].
push_back
(
idx
);
}
if
(
scores_size
==
2
)
{
for
(
const
auto
&
it
:
new_indices
)
{
int
label
=
it
.
first
;
std
::
stable_sort
(
new_indices
[
label
].
begin
(),
new_indices
[
label
].
end
());
}
}
new_indices
.
swap
(
*
indices
);
*
num_nmsed_out
=
keep_top_k
;
}
}
template
<
typename
T
,
typename
Context
>
void
MultiClassOutput
(
const
Context
&
ctx
,
const
DenseTensor
&
scores
,
const
DenseTensor
&
bboxes
,
const
std
::
map
<
int
,
std
::
vector
<
int
>>&
selected_indices
,
const
int
scores_size
,
DenseTensor
*
out
,
int
*
oindices
=
nullptr
,
const
int
offset
=
0
)
{
int64_t
class_num
=
scores
.
dims
()[
1
];
int64_t
predict_dim
=
scores
.
dims
()[
1
];
int64_t
box_size
=
bboxes
.
dims
()[
1
];
if
(
scores_size
==
2
)
{
box_size
=
bboxes
.
dims
()[
2
];
}
int64_t
out_dim
=
box_size
+
2
;
auto
*
scores_data
=
scores
.
data
<
T
>
();
auto
*
bboxes_data
=
bboxes
.
data
<
T
>
();
auto
*
odata
=
out
->
data
<
T
>
();
const
T
*
sdata
;
DenseTensor
bbox
;
bbox
.
Resize
({
scores
.
dims
()[
0
],
box_size
});
int
count
=
0
;
for
(
const
auto
&
it
:
selected_indices
)
{
int
label
=
it
.
first
;
const
std
::
vector
<
int
>&
indices
=
it
.
second
;
if
(
scores_size
==
2
)
{
SliceOneClass
<
T
,
Context
>
(
ctx
,
bboxes
,
label
,
&
bbox
);
}
else
{
sdata
=
scores_data
+
label
*
predict_dim
;
}
for
(
size_t
j
=
0
;
j
<
indices
.
size
();
++
j
)
{
int
idx
=
indices
[
j
];
odata
[
count
*
out_dim
]
=
label
;
// label
const
T
*
bdata
;
if
(
scores_size
==
3
)
{
bdata
=
bboxes_data
+
idx
*
box_size
;
odata
[
count
*
out_dim
+
1
]
=
sdata
[
idx
];
// score
if
(
oindices
!=
nullptr
)
{
oindices
[
count
]
=
offset
+
idx
;
}
}
else
{
bdata
=
bbox
.
data
<
T
>
()
+
idx
*
box_size
;
odata
[
count
*
out_dim
+
1
]
=
*
(
scores_data
+
idx
*
class_num
+
label
);
if
(
oindices
!=
nullptr
)
{
oindices
[
count
]
=
offset
+
idx
*
class_num
+
label
;
}
}
// xmin, ymin, xmax, ymax or multi-points coordinates
std
::
memcpy
(
odata
+
count
*
out_dim
+
2
,
bdata
,
box_size
*
sizeof
(
T
));
count
++
;
}
}
}
template
<
typename
T
,
typename
Context
>
void
MultiClassNMSKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
bboxes
,
const
DenseTensor
&
scores
,
const
paddle
::
optional
<
DenseTensor
>&
rois_num
,
float
score_threshold
,
int
nms_top_k
,
int
keep_top_k
,
float
nms_threshold
,
bool
normalized
,
float
nms_eta
,
int
background_label
,
DenseTensor
*
out
,
DenseTensor
*
index
,
DenseTensor
*
nms_rois_num
)
{
bool
return_index
=
index
!=
nullptr
;
bool
has_roisnum
=
rois_num
.
get_ptr
()
!=
nullptr
;
auto
score_dims
=
scores
.
dims
();
auto
score_size
=
score_dims
.
size
();
std
::
vector
<
std
::
map
<
int
,
std
::
vector
<
int
>>>
all_indices
;
std
::
vector
<
size_t
>
batch_starts
=
{
0
};
int64_t
batch_size
=
score_dims
[
0
];
int64_t
box_dim
=
bboxes
.
dims
()[
2
];
int64_t
out_dim
=
box_dim
+
2
;
int
num_nmsed_out
=
0
;
DenseTensor
boxes_slice
,
scores_slice
;
int
n
=
0
;
if
(
has_roisnum
)
{
n
=
score_size
==
3
?
batch_size
:
rois_num
.
get_ptr
()
->
numel
();
}
else
{
n
=
score_size
==
3
?
batch_size
:
bboxes
.
lod
().
back
().
size
()
-
1
;
}
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
std
::
map
<
int
,
std
::
vector
<
int
>>
indices
;
if
(
score_size
==
3
)
{
scores_slice
=
scores
.
Slice
(
i
,
i
+
1
);
scores_slice
.
Resize
({
score_dims
[
1
],
score_dims
[
2
]});
boxes_slice
=
bboxes
.
Slice
(
i
,
i
+
1
);
boxes_slice
.
Resize
({
score_dims
[
2
],
box_dim
});
}
else
{
std
::
vector
<
size_t
>
boxes_lod
;
if
(
has_roisnum
)
{
boxes_lod
=
GetNmsLodFromRoisNum
(
rois_num
.
get_ptr
());
}
else
{
boxes_lod
=
bboxes
.
lod
().
back
();
}
if
(
boxes_lod
[
i
]
==
boxes_lod
[
i
+
1
])
{
all_indices
.
push_back
(
indices
);
batch_starts
.
push_back
(
batch_starts
.
back
());
continue
;
}
scores_slice
=
scores
.
Slice
(
boxes_lod
[
i
],
boxes_lod
[
i
+
1
]);
boxes_slice
=
bboxes
.
Slice
(
boxes_lod
[
i
],
boxes_lod
[
i
+
1
]);
}
MultiClassNMS
<
T
,
Context
>
(
ctx
,
scores_slice
,
boxes_slice
,
score_size
,
score_threshold
,
nms_top_k
,
keep_top_k
,
nms_threshold
,
normalized
,
nms_eta
,
background_label
,
&
indices
,
&
num_nmsed_out
);
all_indices
.
push_back
(
indices
);
batch_starts
.
push_back
(
batch_starts
.
back
()
+
num_nmsed_out
);
}
int
num_kept
=
batch_starts
.
back
();
if
(
num_kept
==
0
)
{
if
(
return_index
)
{
out
->
Resize
({
0
,
out_dim
});
ctx
.
template
Alloc
<
T
>(
out
);
index
->
Resize
({
0
,
1
});
ctx
.
template
Alloc
<
int
>(
index
);
}
else
{
out
->
Resize
({
1
,
1
});
T
*
od
=
ctx
.
template
Alloc
<
T
>(
out
);
od
[
0
]
=
-
1
;
batch_starts
=
{
0
,
1
};
}
}
else
{
out
->
Resize
({
num_kept
,
out_dim
});
ctx
.
template
Alloc
<
T
>(
out
);
int
offset
=
0
;
int
*
oindices
=
nullptr
;
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
if
(
score_size
==
3
)
{
scores_slice
=
scores
.
Slice
(
i
,
i
+
1
);
boxes_slice
=
bboxes
.
Slice
(
i
,
i
+
1
);
scores_slice
.
Resize
({
score_dims
[
1
],
score_dims
[
2
]});
boxes_slice
.
Resize
({
score_dims
[
2
],
box_dim
});
if
(
return_index
)
{
offset
=
i
*
score_dims
[
2
];
}
}
else
{
std
::
vector
<
size_t
>
boxes_lod
;
if
(
has_roisnum
)
{
boxes_lod
=
GetNmsLodFromRoisNum
(
rois_num
.
get_ptr
());
}
else
{
boxes_lod
=
bboxes
.
lod
().
back
();
}
if
(
boxes_lod
[
i
]
==
boxes_lod
[
i
+
1
])
continue
;
scores_slice
=
scores
.
Slice
(
boxes_lod
[
i
],
boxes_lod
[
i
+
1
]);
boxes_slice
=
bboxes
.
Slice
(
boxes_lod
[
i
],
boxes_lod
[
i
+
1
]);
if
(
return_index
)
{
offset
=
boxes_lod
[
i
]
*
score_dims
[
1
];
}
}
int64_t
s
=
batch_starts
[
i
];
int64_t
e
=
batch_starts
[
i
+
1
];
if
(
e
>
s
)
{
DenseTensor
nout
=
out
->
Slice
(
s
,
e
);
if
(
return_index
)
{
index
->
Resize
({
num_kept
,
1
});
int
*
output_idx
=
ctx
.
template
Alloc
<
int
>(
index
);
oindices
=
output_idx
+
s
;
}
MultiClassOutput
<
T
,
Context
>
(
ctx
,
scores_slice
,
boxes_slice
,
all_indices
[
i
],
score_dims
.
size
(),
&
nout
,
oindices
,
offset
);
}
}
}
if
(
nms_rois_num
!=
nullptr
)
{
nms_rois_num
->
Resize
({
n
});
ctx
.
template
Alloc
<
int
>(
nms_rois_num
);
int
*
num_data
=
nms_rois_num
->
data
<
int
>
();
for
(
int
i
=
1
;
i
<=
n
;
i
++
)
{
num_data
[
i
-
1
]
=
batch_starts
[
i
]
-
batch_starts
[
i
-
1
];
}
nms_rois_num
->
Resize
({
n
});
}
}
}
// namespace phi
PD_REGISTER_KERNEL
(
multiclass_nms3
,
CPU
,
ALL_LAYOUT
,
phi
::
MultiClassNMSKernel
,
float
,
double
)
{
}
paddle/phi/kernels/funcs/CMakeLists.txt
浏览文件 @
15ce2c1b
...
...
@@ -6,6 +6,7 @@ add_subdirectory(detail)
math_library
(
deformable_conv_functor DEPS dense_tensor
)
math_library
(
concat_and_split_functor DEPS dense_tensor
)
math_library
(
fc_functor DEPS blas jit_kernel_helper
)
math_library
(
gpc DEPS phi_enforce
)
math_library
(
gru_compute DEPS activation_functions math_function
)
math_library
(
lstm_compute DEPS activation_functions
)
math_library
(
math_function DEPS blas dense_tensor tensor
)
...
...
paddle/
fluid/operators/detection
/gpc.cc
→
paddle/
phi/kernels/funcs
/gpc.cc
浏览文件 @
15ce2c1b
...
...
@@ -23,11 +23,12 @@
* @date 2018/6/12
**/
#include "paddle/
fluid/operators/detection
/gpc.h"
#include "paddle/
phi/kernels/funcs
/gpc.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
namespace
gpc
{
namespace
phi
{
namespace
funcs
{
typedef
struct
lmt_shape
{
/* Local minima table */
double
y
;
/* Y coordinate at local minimum */
...
...
@@ -541,9 +542,8 @@ static int count_contours(polygon_node *polygon) {
}
static
void
add_left
(
polygon_node
*
p
,
double
x
,
double
y
)
{
PADDLE_ENFORCE_NOT_NULL
(
p
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"Input polygon node is nullptr."
));
PADDLE_ENFORCE_NOT_NULL
(
p
,
phi
::
errors
::
InvalidArgument
(
"Input polygon node is nullptr."
));
vertex_node
*
nv
=
NULL
;
/* Create a new vertex node and set its fields */
...
...
@@ -599,9 +599,8 @@ static void add_right(polygon_node *p, double x, double y) {
}
static
void
merge_right
(
polygon_node
*
p
,
polygon_node
*
q
,
polygon_node
*
list
)
{
PADDLE_ENFORCE_NOT_NULL
(
p
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"Input polygon node is nullptr."
));
PADDLE_ENFORCE_NOT_NULL
(
p
,
phi
::
errors
::
InvalidArgument
(
"Input polygon node is nullptr."
));
polygon_node
*
target
=
NULL
;
/* Label contour as external */
...
...
@@ -681,8 +680,7 @@ void add_vertex(vertex_node **t, double x, double y) {
void
gpc_vertex_create
(
edge_node
*
e
,
int
p
,
int
s
,
double
x
,
double
y
)
{
PADDLE_ENFORCE_NOT_NULL
(
e
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"Input edge node is nullptr."
));
e
,
phi
::
errors
::
InvalidArgument
(
"Input edge node is nullptr."
));
add_vertex
(
&
(
e
->
outp
[
p
]
->
v
[
s
]),
x
,
y
);
e
->
outp
[
p
]
->
active
++
;
}
...
...
@@ -715,9 +713,8 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
gpc_malloc
<
bbox
>
(
box
,
p
->
num_contours
*
sizeof
(
bbox
),
const_cast
<
char
*>
(
"Bounding box creation"
));
PADDLE_ENFORCE_NOT_NULL
(
box
,
paddle
::
platform
::
errors
::
ResourceExhausted
(
"Failed to malloc box memory."
));
PADDLE_ENFORCE_NOT_NULL
(
box
,
phi
::
errors
::
ResourceExhausted
(
"Failed to malloc box memory."
));
/* Construct contour bounding boxes */
for
(
c
=
0
;
c
<
p
->
num_contours
;
c
++
)
{
...
...
@@ -882,9 +879,9 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
gpc_malloc
<
int
>
(
extended_hole
,
(
p
->
num_contours
+
1
)
*
sizeof
(
int
),
const_cast
<
char
*>
(
"contour hole addition"
));
PADDLE_ENFORCE_NOT_NULL
(
extended_hole
,
paddle
::
platform
::
errors
::
ResourceExhausted
(
"Failed to malloc extended hole memory."
));
PADDLE_ENFORCE_NOT_NULL
(
extended_hole
,
phi
::
errors
::
ResourceExhausted
(
"Failed to malloc extended hole memory."
));
/* Create an extended contour array */
gpc_malloc
<
gpc_vertex_list
>
(
extended_contour
,
...
...
@@ -1005,7 +1002,7 @@ void gpc_polygon_clip(gpc_op op,
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
const_cast
<
char
*>
(
"sbt creation"
));
PADDLE_ENFORCE_NOT_NULL
(
sbt
,
p
addle
::
platform
::
errors
::
ResourceExhausted
(
p
hi
::
errors
::
ResourceExhausted
(
"Failed to malloc scanbeam table memory."
));
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
...
...
@@ -1050,8 +1047,7 @@ void gpc_polygon_clip(gpc_op op,
e1
=
aet
;
/* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL
(
aet
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"Edge node AET is nullptr."
));
aet
,
phi
::
errors
::
InvalidArgument
(
"Edge node AET is nullptr."
));
aet
->
bundle
[
ABOVE
][
aet
->
type
]
=
(
aet
->
top
.
y
!=
yb
);
aet
->
bundle
[
ABOVE
][
!
aet
->
type
]
=
0
;
...
...
@@ -1651,7 +1647,7 @@ void gpc_tristrip_clip(gpc_op op,
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
const_cast
<
char
*>
(
"sbt creation"
));
PADDLE_ENFORCE_NOT_NULL
(
sbt
,
p
addle
::
platform
::
errors
::
ResourceExhausted
(
p
hi
::
errors
::
ResourceExhausted
(
"Failed to malloc scanbeam table memory."
));
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
scanbeam
=
0
;
...
...
@@ -1691,8 +1687,7 @@ void gpc_tristrip_clip(gpc_op op,
/* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL
(
aet
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"Edge node AET is nullptr."
));
aet
,
phi
::
errors
::
InvalidArgument
(
"Edge node AET is nullptr."
));
aet
->
bundle
[
ABOVE
][
aet
->
type
]
=
(
aet
->
top
.
y
!=
yb
);
aet
->
bundle
[
ABOVE
][
!
aet
->
type
]
=
0
;
aet
->
bstate
[
ABOVE
]
=
UNBUNDLED
;
...
...
@@ -2248,6 +2243,7 @@ void gpc_tristrip_clip(gpc_op op,
gpc_free
<
double
>
(
sbt
);
}
// NOLINT
}
// namespace gpc
}
// namespace funcs
}
// namespace phi
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
paddle/
fluid/operators/detection
/gpc.h
→
paddle/
phi/kernels/funcs
/gpc.h
浏览文件 @
15ce2c1b
...
...
@@ -29,15 +29,16 @@
* @date 2018/6/12
**/
#ifndef PADDLE_
FLUID_OPERATORS_DETECTION
_GPC_H_ // GPC_H_
#define PADDLE_
FLUID_OPERATORS_DETECTION
_GPC_H_ // GPC_H_
#ifndef PADDLE_
PHI_KERNELS_FUNCS
_GPC_H_ // GPC_H_
#define PADDLE_
PHI_KERNELS_FUNCS
_GPC_H_ // GPC_H_
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
namespace
gpc
{
namespace
phi
{
namespace
funcs
{
typedef
enum
{
// Set operation type
GPC_DIFF
,
// Difference
...
...
@@ -190,7 +191,7 @@ inline void gpc_n_edge(edge_node *d, edge_node *e, int p) {
template
<
typename
T
>
void
gpc_malloc
(
T
*&
p
,
int
b
,
char
*
s
)
{
if
(
b
>
0
)
{
p
=
(
T
*
)
malloc
(
b
);
p
=
reinterpret_cast
<
T
*>
(
malloc
(
b
)
);
if
(
!
p
)
{
fprintf
(
stderr
,
"gpc malloc failure: %s
\n
"
,
s
);
...
...
@@ -243,7 +244,8 @@ void gpc_free_polygon(gpc_polygon *polygon);
void
gpc_free_tristrip
(
gpc_tristrip
*
tristrip
);
}
// namespace gpc
}
// namespace funcs
}
// namespace phi
#endif // PADDLE_
FLUID_OPERATORS_DETECTION
_GPC_H_
#endif // PADDLE_
PHI_KERNELS_FUNCS
_GPC_H_
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
paddle/phi/kernels/multiclass_nms3_kernel.h
0 → 100644
浏览文件 @
15ce2c1b
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
MultiClassNMSKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
bboxes
,
const
DenseTensor
&
scores
,
const
paddle
::
optional
<
DenseTensor
>&
rois_num
,
float
score_threshold
,
int
nms_top_k
,
int
keep_top_k
,
float
nms_threshold
,
bool
normalized
,
float
nms_eta
,
int
background_label
,
DenseTensor
*
out
,
DenseTensor
*
index
,
DenseTensor
*
nms_rois_num
);
}
// namespace phi
paddle/phi/ops/compat/multiclass_nms3_sig.cc
0 → 100644
浏览文件 @
15ce2c1b
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace
phi
{
KernelSignature
MultiClassNMS3OpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"multiclass_nms3"
,
{
"BBoxes"
,
"Scores"
,
"RoisNum"
},
{
"score_threshold"
,
"nms_top_k"
,
"keep_top_k"
,
"nms_threshold"
,
"normalized"
,
"nms_eta"
,
"background_label"
},
{
"Out"
,
"Index"
,
"NmsRoisNum"
});
}
}
// namespace phi
PD_REGISTER_ARG_MAPPING_FN
(
multiclass_nms3
,
phi
::
MultiClassNMS3OpArgumentMapping
);
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
15ce2c1b
...
...
@@ -1457,6 +1457,7 @@ class OpTest(unittest.TestCase):
# see details: https://stackoverflow.com/questions/38331703/why-does-numpys-broadcasting-sometimes-allow-comparing-arrays-of-different-leng
if
expect_np
.
size
==
0
:
self
.
op_test
.
assertTrue
(
actual_np
.
size
==
0
)
# }}}
# print("actual_np, expect_np", actual_np, expect_np)
self
.
_compare_numpy
(
name
,
actual_np
,
expect_np
)
if
isinstance
(
expect
,
tuple
):
self
.
_compare_list
(
name
,
actual
,
expect
)
...
...
python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py
浏览文件 @
15ce2c1b
...
...
@@ -19,7 +19,81 @@ import copy
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid
import
Program
,
program_guard
,
in_dygraph_mode
,
_non_static_mode
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle
import
_C_ops
def
multiclass_nms3
(
bboxes
,
scores
,
rois_num
=
None
,
score_threshold
=
0.3
,
nms_top_k
=
1000
,
keep_top_k
=
100
,
nms_threshold
=
0.3
,
normalized
=
True
,
nms_eta
=
1.
,
background_label
=-
1
,
return_index
=
True
,
return_rois_num
=
True
,
name
=
None
):
helper
=
LayerHelper
(
'multiclass_nms3'
,
**
locals
())
if
in_dygraph_mode
():
attrs
=
(
score_threshold
,
nms_top_k
,
keep_top_k
,
nms_threshold
,
normalized
,
nms_eta
,
background_label
)
output
,
index
,
nms_rois_num
=
_C_ops
.
final_state_multiclass_nms3
(
bboxes
,
scores
,
rois_num
,
*
attrs
)
if
not
return_index
:
index
=
None
return
output
,
index
,
nms_rois_num
elif
_non_static_mode
():
attrs
=
(
'background_label'
,
background_label
,
'score_threshold'
,
score_threshold
,
'nms_top_k'
,
nms_top_k
,
'nms_threshold'
,
nms_threshold
,
'keep_top_k'
,
keep_top_k
,
'nms_eta'
,
nms_eta
,
'normalized'
,
normalized
)
output
,
index
,
nms_rois_num
=
_C_ops
.
multiclass_nms3
(
bboxes
,
scores
,
rois_num
,
*
attrs
)
if
not
return_index
:
index
=
None
return
output
,
index
,
nms_rois_num
else
:
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
bboxes
.
dtype
)
index
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
inputs
=
{
'BBoxes'
:
bboxes
,
'Scores'
:
scores
}
outputs
=
{
'Out'
:
output
,
'Index'
:
index
}
if
rois_num
is
not
None
:
inputs
[
'RoisNum'
]
=
rois_num
if
return_rois_num
:
nms_rois_num
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
outputs
[
'NmsRoisNum'
]
=
nms_rois_num
helper
.
append_op
(
type
=
"multiclass_nms3"
,
inputs
=
inputs
,
attrs
=
{
'background_label'
:
background_label
,
'score_threshold'
:
score_threshold
,
'nms_top_k'
:
nms_top_k
,
'nms_threshold'
:
nms_threshold
,
'keep_top_k'
:
keep_top_k
,
'nms_eta'
:
nms_eta
,
'normalized'
:
normalized
},
outputs
=
outputs
)
output
.
stop_gradient
=
True
index
.
stop_gradient
=
True
if
not
return_index
:
index
=
None
if
not
return_rois_num
:
nms_rois_num
=
None
return
output
,
nms_rois_num
,
index
def
softmax
(
x
):
...
...
@@ -541,8 +615,9 @@ class TestMulticlassNMS2LoDInput(TestMulticlassNMSLoDInput):
'normalized'
:
normalized
,
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_output
(
self
):
self
.
check_output
()
class
TestMulticlassNMS2LoDNoOutput
(
TestMulticlassNMS2LoDInput
):
...
...
@@ -590,6 +665,7 @@ class TestMulticlassNMSError(unittest.TestCase):
class
TestMulticlassNMS3Op
(
TestMulticlassNMS2Op
):
def
setUp
(
self
):
self
.
python_api
=
multiclass_nms3
self
.
set_argument
()
N
=
7
M
=
1200
...
...
@@ -623,8 +699,8 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op):
self
.
op_type
=
'multiclass_nms3'
self
.
inputs
=
{
'BBoxes'
:
boxes
,
'Scores'
:
scores
}
self
.
outputs
=
{
'Out'
:
(
nmsed_outs
,
[
lod
])
,
'Index'
:
(
index_outs
,
[
lod
])
,
'Out'
:
nmsed_outs
,
'Index'
:
index_outs
,
'NmsRoisNum'
:
np
.
array
(
lod
).
astype
(
'int32'
)
}
self
.
attrs
=
{
...
...
@@ -638,7 +714,7 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op):
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestMulticlassNMS3OpNoOutput
(
TestMulticlassNMS3Op
):
...
...
@@ -649,71 +725,6 @@ class TestMulticlassNMS3OpNoOutput(TestMulticlassNMS3Op):
self
.
score_threshold
=
2.0
class
TestMulticlassNMS3LoDInput
(
TestMulticlassNMS2LoDInput
):
def
setUp
(
self
):
self
.
set_argument
()
M
=
1200
C
=
21
BOX_SIZE
=
4
box_lod
=
[[
1200
]]
background
=
0
nms_threshold
=
0.3
nms_top_k
=
400
keep_top_k
=
200
score_threshold
=
self
.
score_threshold
normalized
=
False
scores
=
np
.
random
.
random
((
M
,
C
)).
astype
(
'float32'
)
scores
=
np
.
apply_along_axis
(
softmax
,
1
,
scores
)
boxes
=
np
.
random
.
random
((
M
,
C
,
BOX_SIZE
)).
astype
(
'float32'
)
boxes
[:,
:,
0
]
=
boxes
[:,
:,
0
]
*
10
boxes
[:,
:,
1
]
=
boxes
[:,
:,
1
]
*
10
boxes
[:,
:,
2
]
=
boxes
[:,
:,
2
]
*
10
+
10
boxes
[:,
:,
3
]
=
boxes
[:,
:,
3
]
*
10
+
10
det_outs
,
lod
=
lod_multiclass_nms
(
boxes
,
scores
,
background
,
score_threshold
,
nms_threshold
,
nms_top_k
,
keep_top_k
,
box_lod
,
normalized
)
det_outs
=
np
.
array
(
det_outs
)
nmsed_outs
=
det_outs
[:,
:
-
1
].
astype
(
'float32'
)
if
len
(
det_outs
)
else
det_outs
self
.
op_type
=
'multiclass_nms3'
self
.
inputs
=
{
'BBoxes'
:
(
boxes
,
box_lod
),
'Scores'
:
(
scores
,
box_lod
),
'RoisNum'
:
np
.
array
(
box_lod
).
astype
(
'int32'
)
}
self
.
outputs
=
{
'Out'
:
(
nmsed_outs
,
[
lod
]),
'NmsRoisNum'
:
np
.
array
(
lod
).
astype
(
'int32'
)
}
self
.
attrs
=
{
'background_label'
:
0
,
'nms_threshold'
:
nms_threshold
,
'nms_top_k'
:
nms_top_k
,
'keep_top_k'
:
keep_top_k
,
'score_threshold'
:
score_threshold
,
'nms_eta'
:
1.0
,
'normalized'
:
normalized
,
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestMulticlassNMS3LoDNoOutput
(
TestMulticlassNMS3LoDInput
):
def
set_argument
(
self
):
# Here set 2.0 to test the case there is no outputs.
# In practical use, 0.0 < score_threshold < 1.0
self
.
score_threshold
=
2.0
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录