提交 88ee56d0 编写于 作者: J jerrywgz

enhance nms for mask rcnn

上级 3f815e07
......@@ -93,5 +93,25 @@ void BboxOverlaps(const framework::Tensor& r_boxes,
}
}
template <class T>
void SliceOneClass(const platform::DeviceContext& ctx,
const framework::Tensor& items, const int class_id,
framework::Tensor* one_class_item) {
T* item_data = one_class_item->mutable_data<T>(ctx.GetPlace());
const T* items_data = items.data<T>();
const int64_t num_item = items.dims()[0];
const int class_num = items.dims()[1];
int item_size = 1;
if (items.dims().size() == 3) {
item_size = items.dims()[2];
}
for (int i = 0; i < num_item; ++i) {
for (int j = 0; j < item_size; ++j) {
item_data[i * item_size + j] =
items_data[i * class_num * item_size + class_id * item_size + j];
}
}
}
} // namespace operators
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
limitations under the License. */
#include <glog/logging.h>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detection/bbox_util.h"
#include "paddle/fluid/operators/detection/poly_util.h"
namespace paddle {
......@@ -35,30 +33,45 @@ class MultiClassNMSOp : public framework::OperatorWithKernel {
auto box_dims = ctx->GetInputDim("BBoxes");
auto score_dims = ctx->GetInputDim("Scores");
auto score_size = score_dims.size();
if (ctx->IsRuntime()) {
PADDLE_ENFORCE(score_size == 2 || score_size == 3,
"The rank of Input(Scores) must be 2 or 3");
PADDLE_ENFORCE_EQ(box_dims.size(), 3,
"The rank of Input(BBoxes) must be 3.");
PADDLE_ENFORCE_EQ(score_dims.size(), 3,
"The rank of Input(Scores) must be 3.");
"The rank of Input(BBoxes) must be 3");
if (score_size == 3) {
PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 ||
box_dims[2] == 16 || box_dims[2] == 24 ||
box_dims[2] == 32,
"The 2nd dimension of Input(BBoxes) must be 4 or 8, "
"The last dimension of Input(BBoxes) must be 4 or 8, "
"represents the layout of coordinate "
"[xmin, ymin, xmax, ymax] or "
"4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or "
"8 points: [xi, yi] i= 1,2,...,8 or "
"12 points: [xi, yi] i= 1,2,...,12 or "
"16 points: [xi, yi] i= 1,2,...,16");
PADDLE_ENFORCE_EQ(box_dims[1], score_dims[2],
"The 1st dimensiong of Input(BBoxes) must be equal to "
"3rd dimension of Input(Scores), which represents the "
PADDLE_ENFORCE_EQ(
box_dims[1], score_dims[2],
"The 2nd dimension of Input(BBoxes) must be equal to "
"last dimension of Input(Scores), which represents the "
"predicted bboxes.");
} else {
PADDLE_ENFORCE(box_dims[2] == 4,
"The last dimension of Input(BBoxes) must be 4");
PADDLE_ENFORCE_EQ(box_dims[1], score_dims[1],
"The 2nd dimension of Input(BBoxes)"
"must be equal to the 2nd dimension"
" of Input(Scores)");
}
}
// Here the box_dims[0] is not the real dimension of output.
// It will be rewritten in the computing kernel.
if (score_size == 3) {
ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2});
} else {
ctx->SetOutputDim("Out", {-1, box_dims[2] + 2});
}
}
protected:
......@@ -123,8 +136,12 @@ static inline T JaccardOverlap(const T* box1, const T* box2,
const T inter_ymin = std::max(box1[1], box2[1]);
const T inter_xmax = std::min(box1[2], box2[2]);
const T inter_ymax = std::min(box1[3], box2[3]);
const T inter_w = inter_xmax - inter_xmin;
const T inter_h = inter_ymax - inter_ymin;
T inter_w = inter_xmax - inter_xmin;
T inter_h = inter_ymax - inter_ymin;
if (!normalized) {
inter_w += 1;
inter_h += 1;
}
const T inter_area = inter_w * inter_h;
const T bbox1_area = BBoxArea<T>(box1, normalized);
const T bbox2_area = BBoxArea<T>(box2, normalized);
......@@ -139,7 +156,7 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size,
T bbox2_area = PolyArea<T>(box2, box_size, normalized);
T inter_area = PolyOverlapArea<T>(box1, box2, box_size, normalized);
if (bbox1_area == 0 || bbox2_area == 0 || inter_area == 0) {
// If coordinate values are is invalid
// If coordinate values are invalid
// if area size <= 0, return 0.
return T(0.);
} else {
......@@ -152,7 +169,8 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
public:
void NMSFast(const Tensor& bbox, const Tensor& scores,
const T score_threshold, const T nms_threshold, const T eta,
const int64_t top_k, std::vector<int>* selected_indices) const {
const int64_t top_k, std::vector<int>* selected_indices,
const bool normalized) const {
// The total boxes for each instance.
int64_t num_boxes = bbox.dims()[0];
// 4: [xmin ymin xmax ymax]
......@@ -178,15 +196,16 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
T overlap = T(0.);
// 4: [xmin ymin xmax ymax]
if (box_size == 4) {
overlap = JaccardOverlap<T>(bbox_data + idx * box_size,
bbox_data + kept_idx * box_size, true);
overlap =
JaccardOverlap<T>(bbox_data + idx * box_size,
bbox_data + kept_idx * box_size, normalized);
}
// 8: [x1 y1 x2 y2 x3 y3 x4 y4] or 16, 24, 32
if (box_size == 8 || box_size == 16 || box_size == 24 ||
box_size == 32) {
overlap =
PolyIoU<T>(bbox_data + idx * box_size,
bbox_data + kept_idx * box_size, box_size, true);
overlap = PolyIoU<T>(bbox_data + idx * box_size,
bbox_data + kept_idx * box_size, box_size,
normalized);
}
keep = overlap <= adaptive_threshold;
} else {
......@@ -205,37 +224,66 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
void MultiClassNMS(const framework::ExecutionContext& ctx,
const Tensor& scores, const Tensor& bboxes,
const int scores_size,
std::map<int, std::vector<int>>* indices,
int* num_nmsed_out) const {
int64_t background_label = ctx.Attr<int>("background_label");
int64_t nms_top_k = ctx.Attr<int>("nms_top_k");
int64_t keep_top_k = ctx.Attr<int>("keep_top_k");
bool normalized = ctx.Attr<bool>("normalized");
T nms_threshold = static_cast<T>(ctx.Attr<float>("nms_threshold"));
T nms_eta = static_cast<T>(ctx.Attr<float>("nms_eta"));
T score_threshold = static_cast<T>(ctx.Attr<float>("score_threshold"));
auto& dev_ctx = ctx.template device_context<platform::CPUDeviceContext>();
int64_t class_num = scores.dims()[0];
int64_t predict_dim = scores.dims()[1];
int num_det = 0;
int64_t box_num = 0, class_num = 0, predict_dim = 0;
if (scores_size == 3) {
class_num = scores.dims()[0];
predict_dim = scores.dims()[1];
for (int64_t c = 0; c < class_num; ++c) {
if (c == background_label) continue;
Tensor score = scores.Slice(c, c + 1);
NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta, nms_top_k,
&((*indices)[c]));
NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta,
nms_top_k, &((*indices)[c]), normalized);
num_det += (*indices)[c].size();
}
} else {
box_num = scores.dims()[0];
class_num = scores.dims()[1];
Tensor score;
score.Resize({box_num, 1});
Tensor bbox;
bbox.Resize({box_num, 4});
for (int64_t c = 0; c < class_num; ++c) {
if (c == background_label) continue;
SliceOneClass<T>(dev_ctx, scores, c, &score);
SliceOneClass<T>(dev_ctx, bboxes, c, &bbox);
NMSFast(bbox, score, score_threshold, nms_threshold, nms_eta, nms_top_k,
&((*indices)[c]), normalized);
std::stable_sort((*indices)[c].begin(), (*indices)[c].end());
num_det += (*indices)[c].size();
}
}
*num_nmsed_out = num_det;
const T* scores_data = scores.data<T>();
if (keep_top_k > -1 && num_det > keep_top_k) {
const T* sdata;
std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
for (const auto& it : *indices) {
int label = it.first;
const T* sdata = scores_data + label * predict_dim;
if (scores_size == 3) {
sdata = scores_data + label * predict_dim;
} else {
Tensor score;
score.Resize({box_num, 1});
SliceOneClass<T>(dev_ctx, scores, label, &score);
sdata = score.data<T>();
}
const std::vector<int>& label_indices = it.second;
for (size_t j = 0; j < label_indices.size(); ++j) {
int idx = label_indices[j];
PADDLE_ENFORCE_LT(idx, predict_dim);
score_index_pairs.push_back(
std::make_pair(sdata[idx], std::make_pair(label, idx)));
}
......@@ -252,31 +300,55 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
int idx = score_index_pairs[j].second.second;
new_indices[label].push_back(idx);
}
if (scores_size == 2) {
for (const auto& it : new_indices) {
int label = it.first;
std::stable_sort(new_indices[label].begin(),
new_indices[label].end());
}
}
new_indices.swap(*indices);
*num_nmsed_out = keep_top_k;
}
}
void MultiClassOutput(const Tensor& scores, const Tensor& bboxes,
void MultiClassOutput(const platform::DeviceContext& ctx,
const Tensor& scores, const Tensor& bboxes,
const std::map<int, std::vector<int>>& selected_indices,
Tensor* outs) const {
const int scores_size, Tensor* outs) const {
int64_t class_num = scores.dims()[1];
int64_t predict_dim = scores.dims()[1];
int64_t box_size = bboxes.dims()[1];
int64_t out_dim = bboxes.dims()[1] + 2;
if (scores_size == 2) {
box_size = bboxes.dims()[2];
}
int64_t out_dim = box_size + 2;
auto* scores_data = scores.data<T>();
auto* bboxes_data = bboxes.data<T>();
auto* odata = outs->data<T>();
const T* sdata;
Tensor bbox;
bbox.Resize({scores.dims()[0], box_size});
int count = 0;
for (const auto& it : selected_indices) {
int label = it.first;
const T* sdata = scores_data + label * predict_dim;
const std::vector<int>& indices = it.second;
if (scores_size == 2) {
SliceOneClass<T>(ctx, bboxes, label, &bbox);
} else {
sdata = scores_data + label * predict_dim;
}
for (size_t j = 0; j < indices.size(); ++j) {
int idx = indices[j];
const T* bdata = bboxes_data + idx * box_size;
odata[count * out_dim] = label; // label
const T* bdata;
if (scores_size == 3) {
bdata = bboxes_data + idx * box_size;
odata[count * out_dim + 1] = sdata[idx]; // score
} else {
bdata = bbox.data<T>() + idx * box_size;
odata[count * out_dim + 1] = *(scores_data + idx * class_num + label);
}
// xmin, ymin, xmax, ymax or multi-points coordinates
std::memcpy(odata + count * out_dim + 2, bdata, box_size * sizeof(T));
count++;
......@@ -285,20 +357,23 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
}
void Compute(const framework::ExecutionContext& ctx) const override {
auto* boxes = ctx.Input<Tensor>("BBoxes");
auto* scores = ctx.Input<Tensor>("Scores");
auto* boxes = ctx.Input<LoDTensor>("BBoxes");
auto* scores = ctx.Input<LoDTensor>("Scores");
auto* outs = ctx.Output<LoDTensor>("Out");
auto score_dims = scores->dims();
int64_t batch_size = score_dims[0];
int64_t class_num = score_dims[1];
int64_t predict_dim = score_dims[2];
int64_t box_dim = boxes->dims()[2];
int64_t out_dim = boxes->dims()[2] + 2;
auto& dev_ctx = ctx.template device_context<platform::CPUDeviceContext>();
std::vector<std::map<int, std::vector<int>>> all_indices;
std::vector<size_t> batch_starts = {0};
int64_t batch_size = score_dims[0];
int64_t predict_dim = 0;
int64_t box_dim = boxes->dims()[2];
int64_t out_dim = box_dim + 2;
int num_nmsed_out = 0;
if (score_dims.size() == 3) {
predict_dim = score_dims[2];
for (int64_t i = 0; i < batch_size; ++i) {
Tensor ins_score = scores->Slice(i, i + 1);
ins_score.Resize({class_num, predict_dim});
......@@ -307,18 +382,33 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
ins_boxes.Resize({predict_dim, box_dim});
std::map<int, std::vector<int>> indices;
int num_nmsed_out = 0;
MultiClassNMS(ctx, ins_score, ins_boxes, &indices, &num_nmsed_out);
MultiClassNMS(ctx, ins_score, ins_boxes, score_dims.size(), &indices,
&num_nmsed_out);
all_indices.push_back(indices);
batch_starts.push_back(batch_starts.back() + num_nmsed_out);
}
} else {
auto boxes_lod = boxes->lod().back();
int64_t n = static_cast<int64_t>(boxes_lod.size() - 1);
for (int i = 0; i < n; ++i) {
Tensor boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]);
Tensor scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]);
std::map<int, std::vector<int>> indices;
MultiClassNMS(ctx, scores_slice, boxes_slice, score_dims.size(),
&indices, &num_nmsed_out);
all_indices.push_back(indices);
batch_starts.push_back(batch_starts.back() + num_nmsed_out);
}
}
int num_kept = batch_starts.back();
if (num_kept == 0) {
T* od = outs->mutable_data<T>({1}, ctx.GetPlace());
T* od = outs->mutable_data<T>({1, 1}, ctx.GetPlace());
od[0] = -1;
batch_starts.back() = 1;
} else {
outs->mutable_data<T>({num_kept, out_dim}, ctx.GetPlace());
if (score_dims.size() == 3) {
for (int64_t i = 0; i < batch_size; ++i) {
Tensor ins_score = scores->Slice(i, i + 1);
ins_score.Resize({class_num, predict_dim});
......@@ -330,13 +420,30 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
int64_t e = batch_starts[i + 1];
if (e > s) {
Tensor out = outs->Slice(s, e);
MultiClassOutput(ins_score, ins_boxes, all_indices[i], &out);
MultiClassOutput(dev_ctx, ins_score, ins_boxes, all_indices[i],
score_dims.size(), &out);
}
}
} else {
auto boxes_lod = boxes->lod().back();
int64_t n = static_cast<int64_t>(boxes_lod.size() - 1);
for (int i = 0; i < n; ++i) {
Tensor boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]);
Tensor scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]);
int64_t s = batch_starts[i];
int64_t e = batch_starts[i + 1];
if (e > s) {
Tensor out = outs->Slice(s, e);
MultiClassOutput(dev_ctx, scores_slice, boxes_slice, all_indices[i],
score_dims.size(), &out);
}
}
}
}
framework::LoD lod;
lod.emplace_back(batch_starts);
LOG(ERROR) << "c++ lod: " << lod;
outs->set_lod(lod);
}
......@@ -346,17 +453,23 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("BBoxes",
"(Tensor) A 3-D Tensor with shape "
"Two types of bboxes are supported:"
"1. (Tensor) A 3-D Tensor with shape "
"[N, M, 4 or 8 16 24 32] represents the "
"predicted locations of M bounding bboxes, N is the batch size. "
"Each bounding box has four coordinate values and the layout is "
"[xmin, ymin, xmax, ymax], when box size equals to 4.");
"[xmin, ymin, xmax, ymax], when box size equals to 4."
"2. (LoDTensor) A 3-D Tensor with shape [N, M, 4]");
AddInput("Scores",
"(Tensor) A 3-D Tensor with shape [N, C, M] represents the "
"Two types of scores are supported:"
"1. (Tensor) A 3-D Tensor with shape [N, C, M] represents the "
"predicted confidence predictions. N is the batch size, C is the "
"class number, M is number of bounding boxes. For each category "
"there are total M scores which corresponding M bounding boxes. "
" Please note, M is equal to the 1st dimension of BBoxes. ");
" Please note, M is equal to the 1st dimension of BBoxes. "
"2. (LoDTensor) A 2-D LoDTensor with shape"
"[N, num_class]. N is the number of bbox and"
"M represents the scores of bboxes in each class.");
AddAttr<int>(
"background_label",
"(int, defalut: 0) "
......@@ -384,6 +497,10 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker {
"(int64_t) "
"Number of total bboxes to be kept per image after NMS "
"step. -1 means keeping all bboxes after NMS step.");
AddAttr<bool>("normalized",
"(bool, default false) "
"Whether detections are normalized.")
.SetDefault(true);
AddOutput("Out",
"(LoDTensor) A 2-D LoDTensor with shape [No, 6] represents the "
"detections. Each row has 6 values: "
......@@ -399,17 +516,14 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
This operator is to do multi-class non maximum suppression (NMS) on a batched
of boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
This operator support multi-class and batched inputs. It applying NMS
independently for each class. The outputs is a 2-D LoDTenosr, for each
image, the offsets in first dimension of LoDTensor are called LoD, the number
......
......@@ -19,7 +19,7 @@ import copy
from op_test import OpTest
def iou(box_a, box_b):
def iou(box_a, box_b, normalized):
"""Apply intersection-over-union overlap between box_a and box_b
"""
xmin_a = min(box_a[0], box_a[2])
......@@ -32,8 +32,10 @@ def iou(box_a, box_b):
xmax_b = max(box_b[0], box_b[2])
ymax_b = max(box_b[1], box_b[3])
area_a = (ymax_a - ymin_a) * (xmax_a - xmin_a)
area_b = (ymax_b - ymin_b) * (xmax_b - xmin_b)
area_a = (ymax_a - ymin_a + (normalized == False)) * \
(xmax_a - xmin_a + (normalized == False))
area_b = (ymax_b - ymin_b + (normalized == False)) * \
(xmax_b - xmin_b + (normalized == False))
if area_a <= 0 and area_b <= 0:
return 0.0
......@@ -42,17 +44,21 @@ def iou(box_a, box_b):
xb = min(xmax_a, xmax_b)
yb = min(ymax_a, ymax_b)
inter_area = max(xb - xa, 0.0) * max(yb - ya, 0.0)
box_a_area = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])
box_b_area = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])
inter_area = max(xb - xa + (normalized == False), 0.0) * \
max(yb - ya + (normalized == False), 0.0)
iou_ratio = inter_area / (area_a + area_b - inter_area)
return iou_ratio
def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0):
def nms(boxes,
scores,
score_threshold,
nms_threshold,
top_k=200,
normalized=True,
eta=1.0):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
......@@ -87,7 +93,7 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0):
for k in range(len(selected_indices)):
if keep:
kept_idx = selected_indices[k]
overlap = iou(boxes[idx], boxes[kept_idx])
overlap = iou(boxes[idx], boxes[kept_idx], normalized)
keep = True if overlap <= adaptive_threshold else False
else:
break
......@@ -99,16 +105,24 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0):
def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold,
nms_top_k, keep_top_k):
nms_top_k, keep_top_k, normalized, shared):
if shared:
class_num = scores.shape[0]
priorbox_num = scores.shape[1]
else:
box_num = scores.shape[0]
class_num = scores.shape[1]
selected_indices = {}
num_det = 0
for c in range(class_num):
if c == background: continue
if shared:
indices = nms(boxes, scores[c], score_threshold, nms_threshold,
nms_top_k)
nms_top_k, normalized)
else:
indices = nms(boxes[:, c, :], scores[:, c], score_threshold,
nms_threshold, nms_top_k, normalized)
selected_indices[c] = indices
num_det += len(indices)
......@@ -116,7 +130,10 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold,
score_index = []
for c, indices in selected_indices.items():
for idx in indices:
if shared:
score_index.append((scores[c][idx], c, idx))
else:
score_index.append((scores[idx][c], c, idx))
sorted_score_index = sorted(
score_index, key=lambda tup: tup[0], reverse=True)
......@@ -127,24 +144,74 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold,
selected_indices[c] = []
for s, c, idx in sorted_score_index:
selected_indices[c].append(idx)
if not shared:
for labels in selected_indices:
selected_indices[labels].sort()
num_det = keep_top_k
return selected_indices, num_det
def batched_multiclass_nms(boxes, scores, background, score_threshold,
nms_threshold, nms_top_k, keep_top_k):
def lod_multiclass_nms(boxes, scores, background, score_threshold,
nms_threshold, nms_top_k, keep_top_k, box_lod,
normalized):
det_outs = []
lod = []
head = 0
for n in range(len(box_lod[0])):
box = boxes[head:head + box_lod[0][n]]
score = scores[head:head + box_lod[0][n]]
head = head + box_lod[0][n]
nmsed_outs, nmsed_num = multiclass_nms(
box,
score,
background,
score_threshold,
nms_threshold,
nms_top_k,
keep_top_k,
normalized,
shared=False)
if nmsed_num == 0:
lod.append(1)
continue
lod.append(nmsed_num)
for c, indices in nmsed_outs.items():
for idx in indices:
xmin, ymin, xmax, ymax = box[idx, c, :]
det_outs.append([c, score[idx][c], xmin, ymin, xmax, ymax])
return det_outs, lod
def batched_multiclass_nms(boxes,
scores,
background,
score_threshold,
nms_threshold,
nms_top_k,
keep_top_k,
normalized=True):
batch_size = scores.shape[0]
det_outs = []
lod = []
for n in range(batch_size):
nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background,
score_threshold, nms_threshold,
nms_top_k, keep_top_k)
lod.append(nmsed_num)
if nmsed_num == 0: continue
nmsed_outs, nmsed_num = multiclass_nms(
boxes[n],
scores[n],
background,
score_threshold,
nms_threshold,
nms_top_k,
keep_top_k,
normalized,
shared=True)
if nmsed_num == 0:
lod.append(1)
continue
lod.append(nmsed_num)
tmp_det_out = []
for c, indices in nmsed_outs.items():
for idx in indices:
......@@ -168,7 +235,6 @@ class TestMulticlassNMSOp(OpTest):
M = 1200
C = 21
BOX_SIZE = 4
background = 0
nms_threshold = 0.3
nms_top_k = 400
......@@ -193,6 +259,7 @@ class TestMulticlassNMSOp(OpTest):
nmsed_outs, lod = batched_multiclass_nms(boxes, scores, background,
score_threshold, nms_threshold,
nms_top_k, keep_top_k)
print('python lod: ', lod)
nmsed_outs = [-1] if not nmsed_outs else nmsed_outs
nmsed_outs = np.array(nmsed_outs).astype('float32')
......@@ -206,6 +273,7 @@ class TestMulticlassNMSOp(OpTest):
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
'normalized': True,
}
def test_check_output(self):
......@@ -219,13 +287,70 @@ class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp):
self.score_threshold = 2.0
class TestMulticlassNMSLoDInput(OpTest):
def set_argument(self):
self.score_threshold = 0.01
def setUp(self):
self.set_argument()
M = 1200
C = 21
BOX_SIZE = 4
box_lod = [[1200]]
background = 0
nms_threshold = 0.3
nms_top_k = 400
keep_top_k = 200
score_threshold = self.score_threshold
normalized = False
scores = np.random.random((M, C)).astype('float32')
def softmax(x):
shiftx = x - np.max(x).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
scores = np.apply_along_axis(softmax, 1, scores)
boxes = np.random.random((M, C, BOX_SIZE)).astype('float32')
boxes[:, :, 0] = boxes[:, :, 0] * 10
boxes[:, :, 1] = boxes[:, :, 1] * 10
boxes[:, :, 2] = boxes[:, :, 2] * 10 + 10
boxes[:, :, 3] = boxes[:, :, 3] * 10 + 10
nmsed_outs, lod = lod_multiclass_nms(
boxes, scores, background, score_threshold, nms_threshold,
nms_top_k, keep_top_k, box_lod, normalized)
nmsed_outs = [-1] if not nmsed_outs else nmsed_outs
nmsed_outs = np.array(nmsed_outs).astype('float32')
self.op_type = 'multiclass_nms'
self.inputs = {
'BBoxes': (boxes, box_lod),
'Scores': (scores, box_lod),
}
self.outputs = {'Out': (nmsed_outs, [lod])}
self.attrs = {
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
'normalized': normalized,
}
def test_check_output(self):
self.check_output()
class TestIOU(unittest.TestCase):
def test_iou(self):
box1 = np.array([4.0, 3.0, 7.0, 5.0]).astype('float32')
box2 = np.array([3.0, 4.0, 6.0, 8.0]).astype('float32')
expt_output = np.array([2.0 / 16.0]).astype('float32')
calc_output = np.array([iou(box1, box2)]).astype('float32')
calc_output = np.array([iou(box1, box2, True)]).astype('float32')
self.assertTrue(np.allclose(calc_output, expt_output))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册