未验证 提交 7bc8481c 编写于 作者: J jerrywgz 提交者: GitHub

Merge pull request #15418 from jerrywgz/refine_nms

Refine nms
...@@ -325,6 +325,7 @@ paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None ...@@ -325,6 +325,7 @@ paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None
paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None)) paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None))
paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'class_num', 'ignore_thresh', 'loss_weight_xy', 'loss_weight_wh', 'loss_weight_conf_target', 'loss_weight_conf_notarget', 'loss_weight_class', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None)) paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'class_num', 'ignore_thresh', 'loss_weight_xy', 'loss_weight_wh', 'loss_weight_conf_target', 'loss_weight_conf_notarget', 'loss_weight_class', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None))
paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None))
paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)) paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None))
paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)) paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1))
paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,))
......
...@@ -9,9 +9,9 @@ http://www.apache.org/licenses/LICENSE-2.0 ...@@ -9,9 +9,9 @@ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
limitations under the License. */ limitations under the License. */
#include <glog/logging.h>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detection/poly_util.h" #include "paddle/fluid/operators/detection/poly_util.h"
...@@ -35,30 +35,45 @@ class MultiClassNMSOp : public framework::OperatorWithKernel { ...@@ -35,30 +35,45 @@ class MultiClassNMSOp : public framework::OperatorWithKernel {
auto box_dims = ctx->GetInputDim("BBoxes"); auto box_dims = ctx->GetInputDim("BBoxes");
auto score_dims = ctx->GetInputDim("Scores"); auto score_dims = ctx->GetInputDim("Scores");
auto score_size = score_dims.size();
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE(score_size == 2 || score_size == 3,
"The rank of Input(Scores) must be 2 or 3");
PADDLE_ENFORCE_EQ(box_dims.size(), 3, PADDLE_ENFORCE_EQ(box_dims.size(), 3,
"The rank of Input(BBoxes) must be 3."); "The rank of Input(BBoxes) must be 3");
PADDLE_ENFORCE_EQ(score_dims.size(), 3, if (score_size == 3) {
"The rank of Input(Scores) must be 3."); PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 ||
PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 || box_dims[2] == 16 || box_dims[2] == 24 ||
box_dims[2] == 16 || box_dims[2] == 24 || box_dims[2] == 32,
box_dims[2] == 32, "The last dimension of Input(BBoxes) must be 4 or 8, "
"The 2nd dimension of Input(BBoxes) must be 4 or 8, " "represents the layout of coordinate "
"represents the layout of coordinate " "[xmin, ymin, xmax, ymax] or "
"[xmin, ymin, xmax, ymax] or " "4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or "
"4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or " "8 points: [xi, yi] i= 1,2,...,8 or "
"8 points: [xi, yi] i= 1,2,...,8 or " "12 points: [xi, yi] i= 1,2,...,12 or "
"12 points: [xi, yi] i= 1,2,...,12 or " "16 points: [xi, yi] i= 1,2,...,16");
"16 points: [xi, yi] i= 1,2,...,16"); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(box_dims[1], score_dims[2], box_dims[1], score_dims[2],
"The 1st dimensiong of Input(BBoxes) must be equal to " "The 2nd dimension of Input(BBoxes) must be equal to "
"3rd dimension of Input(Scores), which represents the " "last dimension of Input(Scores), which represents the "
"predicted bboxes."); "predicted bboxes.");
} else {
PADDLE_ENFORCE(box_dims[2] == 4,
"The last dimension of Input(BBoxes) must be 4");
PADDLE_ENFORCE_EQ(box_dims[1], score_dims[1],
"The 2nd dimension of Input(BBoxes)"
"must be equal to the 2nd dimension"
" of Input(Scores)");
}
} }
// Here the box_dims[0] is not the real dimension of output. // Here the box_dims[0] is not the real dimension of output.
// It will be rewritten in the computing kernel. // It will be rewritten in the computing kernel.
ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2}); if (score_size == 3) {
ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2});
} else {
ctx->SetOutputDim("Out", {-1, box_dims[2] + 2});
}
} }
protected: protected:
...@@ -123,8 +138,9 @@ static inline T JaccardOverlap(const T* box1, const T* box2, ...@@ -123,8 +138,9 @@ static inline T JaccardOverlap(const T* box1, const T* box2,
const T inter_ymin = std::max(box1[1], box2[1]); const T inter_ymin = std::max(box1[1], box2[1]);
const T inter_xmax = std::min(box1[2], box2[2]); const T inter_xmax = std::min(box1[2], box2[2]);
const T inter_ymax = std::min(box1[3], box2[3]); const T inter_ymax = std::min(box1[3], box2[3]);
const T inter_w = inter_xmax - inter_xmin; T norm = normalized ? static_cast<T>(0.) : static_cast<T>(1.);
const T inter_h = inter_ymax - inter_ymin; T inter_w = inter_xmax - inter_xmin + norm;
T inter_h = inter_ymax - inter_ymin + norm;
const T inter_area = inter_w * inter_h; const T inter_area = inter_w * inter_h;
const T bbox1_area = BBoxArea<T>(box1, normalized); const T bbox1_area = BBoxArea<T>(box1, normalized);
const T bbox2_area = BBoxArea<T>(box2, normalized); const T bbox2_area = BBoxArea<T>(box2, normalized);
...@@ -139,7 +155,7 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size, ...@@ -139,7 +155,7 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size,
T bbox2_area = PolyArea<T>(box2, box_size, normalized); T bbox2_area = PolyArea<T>(box2, box_size, normalized);
T inter_area = PolyOverlapArea<T>(box1, box2, box_size, normalized); T inter_area = PolyOverlapArea<T>(box1, box2, box_size, normalized);
if (bbox1_area == 0 || bbox2_area == 0 || inter_area == 0) { if (bbox1_area == 0 || bbox2_area == 0 || inter_area == 0) {
// If coordinate values are is invalid // If coordinate values are invalid
// if area size <= 0, return 0. // if area size <= 0, return 0.
return T(0.); return T(0.);
} else { } else {
...@@ -147,12 +163,35 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size, ...@@ -147,12 +163,35 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size,
} }
} }
template <class T>
void SliceOneClass(const platform::DeviceContext& ctx,
const framework::Tensor& items, const int class_id,
framework::Tensor* one_class_item) {
T* item_data = one_class_item->mutable_data<T>(ctx.GetPlace());
const T* items_data = items.data<T>();
const int64_t num_item = items.dims()[0];
const int class_num = items.dims()[1];
if (items.dims().size() == 3) {
int item_size = items.dims()[2];
for (int i = 0; i < num_item; ++i) {
std::memcpy(item_data + i * item_size,
items_data + i * class_num * item_size + class_id * item_size,
sizeof(T) * item_size);
}
} else {
for (int i = 0; i < num_item; ++i) {
item_data[i] = items_data[i * class_num + class_id];
}
}
}
template <typename T> template <typename T>
class MultiClassNMSKernel : public framework::OpKernel<T> { class MultiClassNMSKernel : public framework::OpKernel<T> {
public: public:
void NMSFast(const Tensor& bbox, const Tensor& scores, void NMSFast(const Tensor& bbox, const Tensor& scores,
const T score_threshold, const T nms_threshold, const T eta, const T score_threshold, const T nms_threshold, const T eta,
const int64_t top_k, std::vector<int>* selected_indices) const { const int64_t top_k, std::vector<int>* selected_indices,
const bool normalized) const {
// The total boxes for each instance. // The total boxes for each instance.
int64_t num_boxes = bbox.dims()[0]; int64_t num_boxes = bbox.dims()[0];
// 4: [xmin ymin xmax ymax] // 4: [xmin ymin xmax ymax]
...@@ -178,15 +217,16 @@ class MultiClassNMSKernel : public framework::OpKernel<T> { ...@@ -178,15 +217,16 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
T overlap = T(0.); T overlap = T(0.);
// 4: [xmin ymin xmax ymax] // 4: [xmin ymin xmax ymax]
if (box_size == 4) { if (box_size == 4) {
overlap = JaccardOverlap<T>(bbox_data + idx * box_size, overlap =
bbox_data + kept_idx * box_size, true); JaccardOverlap<T>(bbox_data + idx * box_size,
bbox_data + kept_idx * box_size, normalized);
} }
// 8: [x1 y1 x2 y2 x3 y3 x4 y4] or 16, 24, 32 // 8: [x1 y1 x2 y2 x3 y3 x4 y4] or 16, 24, 32
if (box_size == 8 || box_size == 16 || box_size == 24 || if (box_size == 8 || box_size == 16 || box_size == 24 ||
box_size == 32) { box_size == 32) {
overlap = overlap = PolyIoU<T>(bbox_data + idx * box_size,
PolyIoU<T>(bbox_data + idx * box_size, bbox_data + kept_idx * box_size, box_size,
bbox_data + kept_idx * box_size, box_size, true); normalized);
} }
keep = overlap <= adaptive_threshold; keep = overlap <= adaptive_threshold;
} else { } else {
...@@ -205,37 +245,58 @@ class MultiClassNMSKernel : public framework::OpKernel<T> { ...@@ -205,37 +245,58 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
void MultiClassNMS(const framework::ExecutionContext& ctx, void MultiClassNMS(const framework::ExecutionContext& ctx,
const Tensor& scores, const Tensor& bboxes, const Tensor& scores, const Tensor& bboxes,
const int scores_size,
std::map<int, std::vector<int>>* indices, std::map<int, std::vector<int>>* indices,
int* num_nmsed_out) const { int* num_nmsed_out) const {
int64_t background_label = ctx.Attr<int>("background_label"); int64_t background_label = ctx.Attr<int>("background_label");
int64_t nms_top_k = ctx.Attr<int>("nms_top_k"); int64_t nms_top_k = ctx.Attr<int>("nms_top_k");
int64_t keep_top_k = ctx.Attr<int>("keep_top_k"); int64_t keep_top_k = ctx.Attr<int>("keep_top_k");
bool normalized = ctx.Attr<bool>("normalized");
T nms_threshold = static_cast<T>(ctx.Attr<float>("nms_threshold")); T nms_threshold = static_cast<T>(ctx.Attr<float>("nms_threshold"));
T nms_eta = static_cast<T>(ctx.Attr<float>("nms_eta")); T nms_eta = static_cast<T>(ctx.Attr<float>("nms_eta"));
T score_threshold = static_cast<T>(ctx.Attr<float>("score_threshold")); T score_threshold = static_cast<T>(ctx.Attr<float>("score_threshold"));
auto& dev_ctx = ctx.template device_context<platform::CPUDeviceContext>();
int64_t class_num = scores.dims()[0];
int64_t predict_dim = scores.dims()[1];
int num_det = 0; int num_det = 0;
int64_t class_num = scores_size == 3 ? scores.dims()[0] : scores.dims()[1];
Tensor bbox_slice, score_slice;
for (int64_t c = 0; c < class_num; ++c) { for (int64_t c = 0; c < class_num; ++c) {
if (c == background_label) continue; if (c == background_label) continue;
Tensor score = scores.Slice(c, c + 1); if (scores_size == 3) {
NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta, nms_top_k, score_slice = scores.Slice(c, c + 1);
&((*indices)[c])); bbox_slice = bboxes;
} else {
score_slice.Resize({scores.dims()[0], 1});
bbox_slice.Resize({scores.dims()[0], 4});
SliceOneClass<T>(dev_ctx, scores, c, &score_slice);
SliceOneClass<T>(dev_ctx, bboxes, c, &bbox_slice);
}
NMSFast(bbox_slice, score_slice, score_threshold, nms_threshold, nms_eta,
nms_top_k, &((*indices)[c]), normalized);
if (scores_size == 2) {
std::stable_sort((*indices)[c].begin(), (*indices)[c].end());
}
num_det += (*indices)[c].size(); num_det += (*indices)[c].size();
} }
*num_nmsed_out = num_det; *num_nmsed_out = num_det;
const T* scores_data = scores.data<T>(); const T* scores_data = scores.data<T>();
if (keep_top_k > -1 && num_det > keep_top_k) { if (keep_top_k > -1 && num_det > keep_top_k) {
const T* sdata;
std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs; std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
for (const auto& it : *indices) { for (const auto& it : *indices) {
int label = it.first; int label = it.first;
const T* sdata = scores_data + label * predict_dim; if (scores_size == 3) {
sdata = scores_data + label * scores.dims()[1];
} else {
score_slice.Resize({scores.dims()[0], 1});
SliceOneClass<T>(dev_ctx, scores, label, &score_slice);
sdata = score_slice.data<T>();
}
const std::vector<int>& label_indices = it.second; const std::vector<int>& label_indices = it.second;
for (size_t j = 0; j < label_indices.size(); ++j) { for (size_t j = 0; j < label_indices.size(); ++j) {
int idx = label_indices[j]; int idx = label_indices[j];
PADDLE_ENFORCE_LT(idx, predict_dim);
score_index_pairs.push_back( score_index_pairs.push_back(
std::make_pair(sdata[idx], std::make_pair(label, idx))); std::make_pair(sdata[idx], std::make_pair(label, idx)));
} }
...@@ -252,31 +313,55 @@ class MultiClassNMSKernel : public framework::OpKernel<T> { ...@@ -252,31 +313,55 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
int idx = score_index_pairs[j].second.second; int idx = score_index_pairs[j].second.second;
new_indices[label].push_back(idx); new_indices[label].push_back(idx);
} }
if (scores_size == 2) {
for (const auto& it : new_indices) {
int label = it.first;
std::stable_sort(new_indices[label].begin(),
new_indices[label].end());
}
}
new_indices.swap(*indices); new_indices.swap(*indices);
*num_nmsed_out = keep_top_k; *num_nmsed_out = keep_top_k;
} }
} }
void MultiClassOutput(const Tensor& scores, const Tensor& bboxes, void MultiClassOutput(const platform::DeviceContext& ctx,
const Tensor& scores, const Tensor& bboxes,
const std::map<int, std::vector<int>>& selected_indices, const std::map<int, std::vector<int>>& selected_indices,
Tensor* outs) const { const int scores_size, Tensor* outs) const {
int64_t class_num = scores.dims()[1];
int64_t predict_dim = scores.dims()[1]; int64_t predict_dim = scores.dims()[1];
int64_t box_size = bboxes.dims()[1]; int64_t box_size = bboxes.dims()[1];
int64_t out_dim = bboxes.dims()[1] + 2; if (scores_size == 2) {
box_size = bboxes.dims()[2];
}
int64_t out_dim = box_size + 2;
auto* scores_data = scores.data<T>(); auto* scores_data = scores.data<T>();
auto* bboxes_data = bboxes.data<T>(); auto* bboxes_data = bboxes.data<T>();
auto* odata = outs->data<T>(); auto* odata = outs->data<T>();
const T* sdata;
Tensor bbox;
bbox.Resize({scores.dims()[0], box_size});
int count = 0; int count = 0;
for (const auto& it : selected_indices) { for (const auto& it : selected_indices) {
int label = it.first; int label = it.first;
const T* sdata = scores_data + label * predict_dim;
const std::vector<int>& indices = it.second; const std::vector<int>& indices = it.second;
if (scores_size == 2) {
SliceOneClass<T>(ctx, bboxes, label, &bbox);
} else {
sdata = scores_data + label * predict_dim;
}
for (size_t j = 0; j < indices.size(); ++j) { for (size_t j = 0; j < indices.size(); ++j) {
int idx = indices[j]; int idx = indices[j];
const T* bdata = bboxes_data + idx * box_size; odata[count * out_dim] = label; // label
odata[count * out_dim] = label; // label const T* bdata;
odata[count * out_dim + 1] = sdata[idx]; // score if (scores_size == 3) {
bdata = bboxes_data + idx * box_size;
odata[count * out_dim + 1] = sdata[idx]; // score
} else {
bdata = bbox.data<T>() + idx * box_size;
odata[count * out_dim + 1] = *(scores_data + idx * class_num + label);
}
// xmin, ymin, xmax, ymax or multi-points coordinates // xmin, ymin, xmax, ymax or multi-points coordinates
std::memcpy(odata + count * out_dim + 2, bdata, box_size * sizeof(T)); std::memcpy(odata + count * out_dim + 2, bdata, box_size * sizeof(T));
count++; count++;
...@@ -285,52 +370,64 @@ class MultiClassNMSKernel : public framework::OpKernel<T> { ...@@ -285,52 +370,64 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
} }
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* boxes = ctx.Input<Tensor>("BBoxes"); auto* boxes = ctx.Input<LoDTensor>("BBoxes");
auto* scores = ctx.Input<Tensor>("Scores"); auto* scores = ctx.Input<LoDTensor>("Scores");
auto* outs = ctx.Output<LoDTensor>("Out"); auto* outs = ctx.Output<LoDTensor>("Out");
auto score_dims = scores->dims(); auto score_dims = scores->dims();
auto score_size = score_dims.size();
int64_t batch_size = score_dims[0]; auto& dev_ctx = ctx.template device_context<platform::CPUDeviceContext>();
int64_t class_num = score_dims[1];
int64_t predict_dim = score_dims[2];
int64_t box_dim = boxes->dims()[2];
int64_t out_dim = boxes->dims()[2] + 2;
std::vector<std::map<int, std::vector<int>>> all_indices; std::vector<std::map<int, std::vector<int>>> all_indices;
std::vector<size_t> batch_starts = {0}; std::vector<size_t> batch_starts = {0};
for (int64_t i = 0; i < batch_size; ++i) { int64_t batch_size = score_dims[0];
Tensor ins_score = scores->Slice(i, i + 1); int64_t box_dim = boxes->dims()[2];
ins_score.Resize({class_num, predict_dim}); int64_t out_dim = box_dim + 2;
int num_nmsed_out = 0;
Tensor ins_boxes = boxes->Slice(i, i + 1); Tensor boxes_slice, scores_slice;
ins_boxes.Resize({predict_dim, box_dim}); int n = score_size == 3 ? batch_size : boxes->lod().back().size() - 1;
for (int i = 0; i < n; ++i) {
if (score_size == 3) {
scores_slice = scores->Slice(i, i + 1);
scores_slice.Resize({score_dims[1], score_dims[2]});
boxes_slice = boxes->Slice(i, i + 1);
boxes_slice.Resize({score_dims[2], box_dim});
} else {
auto boxes_lod = boxes->lod().back();
scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]);
boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]);
}
std::map<int, std::vector<int>> indices; std::map<int, std::vector<int>> indices;
int num_nmsed_out = 0; MultiClassNMS(ctx, scores_slice, boxes_slice, score_size, &indices,
MultiClassNMS(ctx, ins_score, ins_boxes, &indices, &num_nmsed_out); &num_nmsed_out);
all_indices.push_back(indices); all_indices.push_back(indices);
batch_starts.push_back(batch_starts.back() + num_nmsed_out); batch_starts.push_back(batch_starts.back() + num_nmsed_out);
} }
int num_kept = batch_starts.back(); int num_kept = batch_starts.back();
if (num_kept == 0) { if (num_kept == 0) {
T* od = outs->mutable_data<T>({1}, ctx.GetPlace()); T* od = outs->mutable_data<T>({1, 1}, ctx.GetPlace());
od[0] = -1; od[0] = -1;
batch_starts = {0, 1};
} else { } else {
outs->mutable_data<T>({num_kept, out_dim}, ctx.GetPlace()); outs->mutable_data<T>({num_kept, out_dim}, ctx.GetPlace());
for (int64_t i = 0; i < batch_size; ++i) { for (int i = 0; i < n; ++i) {
Tensor ins_score = scores->Slice(i, i + 1); if (score_size == 3) {
ins_score.Resize({class_num, predict_dim}); scores_slice = scores->Slice(i, i + 1);
boxes_slice = boxes->Slice(i, i + 1);
Tensor ins_boxes = boxes->Slice(i, i + 1); scores_slice.Resize({score_dims[1], score_dims[2]});
ins_boxes.Resize({predict_dim, box_dim}); boxes_slice.Resize({score_dims[2], box_dim});
} else {
auto boxes_lod = boxes->lod().back();
scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]);
boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]);
}
int64_t s = batch_starts[i]; int64_t s = batch_starts[i];
int64_t e = batch_starts[i + 1]; int64_t e = batch_starts[i + 1];
if (e > s) { if (e > s) {
Tensor out = outs->Slice(s, e); Tensor out = outs->Slice(s, e);
MultiClassOutput(ins_score, ins_boxes, all_indices[i], &out); MultiClassOutput(dev_ctx, scores_slice, boxes_slice, all_indices[i],
score_dims.size(), &out);
} }
} }
} }
...@@ -346,17 +443,24 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -346,17 +443,24 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput("BBoxes", AddInput("BBoxes",
"(Tensor) A 3-D Tensor with shape " "Two types of bboxes are supported:"
"1. (Tensor) A 3-D Tensor with shape "
"[N, M, 4 or 8 16 24 32] represents the " "[N, M, 4 or 8 16 24 32] represents the "
"predicted locations of M bounding bboxes, N is the batch size. " "predicted locations of M bounding bboxes, N is the batch size. "
"Each bounding box has four coordinate values and the layout is " "Each bounding box has four coordinate values and the layout is "
"[xmin, ymin, xmax, ymax], when box size equals to 4."); "[xmin, ymin, xmax, ymax], when box size equals to 4."
"2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]"
"M is the number of bounding boxes, C is the class number");
AddInput("Scores", AddInput("Scores",
"(Tensor) A 3-D Tensor with shape [N, C, M] represents the " "Two types of scores are supported:"
"1. (Tensor) A 3-D Tensor with shape [N, C, M] represents the "
"predicted confidence predictions. N is the batch size, C is the " "predicted confidence predictions. N is the batch size, C is the "
"class number, M is number of bounding boxes. For each category " "class number, M is number of bounding boxes. For each category "
"there are total M scores which corresponding M bounding boxes. " "there are total M scores which corresponding M bounding boxes. "
" Please note, M is equal to the 1st dimension of BBoxes. "); " Please note, M is equal to the 2nd dimension of BBoxes. "
"2. (LoDTensor) A 2-D LoDTensor with shape [M, C]. "
"M is the number of bbox, C is the class number. In this case, "
"Input BBoxes should be the second case with shape [M, C, 4].");
AddAttr<int>( AddAttr<int>(
"background_label", "background_label",
"(int, defalut: 0) " "(int, defalut: 0) "
...@@ -384,6 +488,10 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -384,6 +488,10 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker {
"(int64_t) " "(int64_t) "
"Number of total bboxes to be kept per image after NMS " "Number of total bboxes to be kept per image after NMS "
"step. -1 means keeping all bboxes after NMS step."); "step. -1 means keeping all bboxes after NMS step.");
AddAttr<bool>("normalized",
"(bool, default true) "
"Whether detections are normalized.")
.SetDefault(true);
AddOutput("Out", AddOutput("Out",
"(LoDTensor) A 2-D LoDTensor with shape [No, 6] represents the " "(LoDTensor) A 2-D LoDTensor with shape [No, 6] represents the "
"detections. Each row has 6 values: " "detections. Each row has 6 values: "
...@@ -399,24 +507,21 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -399,24 +507,21 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC( AddComment(R"DOC(
This operator is to do multi-class non maximum suppression (NMS) on a batched This operator is to do multi-class non maximum suppression (NMS) on a batched
of boxes and scores. of boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive (intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta. threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1. per image if keep_top_k is larger than -1.
This operator support multi-class and batched inputs. It applying NMS This operator support multi-class and batched inputs. It applying NMS
independently for each class. The outputs is a 2-D LoDTenosr, for each independently for each class. The outputs is a 2-D LoDTenosr, for each
image, the offsets in first dimension of LoDTensor are called LoD, the number image, the offsets in first dimension of LoDTensor are called LoD, the number
of offset is N + 1, where N is the batch size. If LoD[i + 1] - LoD[i] == 0, of offset is N + 1, where N is the batch size. If LoD[i + 1] - LoD[i] == 0,
means there is no detected bbox for this image. If there is no detected boxes means there is no detected bbox for this image. If there is no detected boxes
for all images, all the elements in LoD are 0, and the Out only contains one for all images, all the elements in LoD are set to {1}, and the Out only
value which is -1. contains one value which is -1.
)DOC"); )DOC");
} }
}; };
......
...@@ -49,6 +49,7 @@ __all__ = [ ...@@ -49,6 +49,7 @@ __all__ = [
'box_coder', 'box_coder',
'polygon_box_transform', 'polygon_box_transform',
'yolov3_loss', 'yolov3_loss',
'multiclass_nms',
] ]
...@@ -262,8 +263,10 @@ def detection_output(loc, ...@@ -262,8 +263,10 @@ def detection_output(loc,
number is N + 1, N is the batch size. The i-th image has number is N + 1, N is the batch size. The i-th image has
`LoD[i + 1] - LoD[i]` detected results, if it is 0, the i-th image `LoD[i + 1] - LoD[i]` detected results, if it is 0, the i-th image
has no detected results. If all images have not detected results, has no detected results. If all images have not detected results,
all the elements in LoD are 0, and output tensor only contains one LoD will be set to {1}, and output tensor only contains one
value, which is -1. value, which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}.)
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -1960,3 +1963,119 @@ def generate_proposals(scores, ...@@ -1960,3 +1963,119 @@ def generate_proposals(scores,
rpn_roi_probs.stop_gradient = True rpn_roi_probs.stop_gradient = True
return rpn_rois, rpn_roi_probs return rpn_rois, rpn_roi_probs
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences aftern the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Out: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
boxes = fluid.layers.data(name='bboxes', shape=[81, 4],
dtype='float32', lod_level=1)
scores = fluid.layers.data(name='scores', shape=[81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
...@@ -469,5 +469,16 @@ class TestYoloDetection(unittest.TestCase): ...@@ -469,5 +469,16 @@ class TestYoloDetection(unittest.TestCase):
self.assertIsNotNone(loss) self.assertIsNotNone(loss)
class TestMulticlassNMS(unittest.TestCase):
def test_multiclass_nms(self):
program = Program()
with program_guard(program):
bboxes = layers.data(
name='bboxes', shape=[-1, 10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[-1, 10], dtype='float32')
output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7)
self.assertIsNotNone(output)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,7 +19,7 @@ import copy ...@@ -19,7 +19,7 @@ import copy
from op_test import OpTest from op_test import OpTest
def iou(box_a, box_b): def iou(box_a, box_b, norm):
"""Apply intersection-over-union overlap between box_a and box_b """Apply intersection-over-union overlap between box_a and box_b
""" """
xmin_a = min(box_a[0], box_a[2]) xmin_a = min(box_a[0], box_a[2])
...@@ -32,8 +32,10 @@ def iou(box_a, box_b): ...@@ -32,8 +32,10 @@ def iou(box_a, box_b):
xmax_b = max(box_b[0], box_b[2]) xmax_b = max(box_b[0], box_b[2])
ymax_b = max(box_b[1], box_b[3]) ymax_b = max(box_b[1], box_b[3])
area_a = (ymax_a - ymin_a) * (xmax_a - xmin_a) area_a = (ymax_a - ymin_a + (norm == False)) * (xmax_a - xmin_a +
area_b = (ymax_b - ymin_b) * (xmax_b - xmin_b) (norm == False))
area_b = (ymax_b - ymin_b + (norm == False)) * (xmax_b - xmin_b +
(norm == False))
if area_a <= 0 and area_b <= 0: if area_a <= 0 and area_b <= 0:
return 0.0 return 0.0
...@@ -42,17 +44,21 @@ def iou(box_a, box_b): ...@@ -42,17 +44,21 @@ def iou(box_a, box_b):
xb = min(xmax_a, xmax_b) xb = min(xmax_a, xmax_b)
yb = min(ymax_a, ymax_b) yb = min(ymax_a, ymax_b)
inter_area = max(xb - xa, 0.0) * max(yb - ya, 0.0) inter_area = max(xb - xa + (norm == False),
0.0) * max(yb - ya + (norm == False), 0.0)
box_a_area = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])
box_b_area = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])
iou_ratio = inter_area / (area_a + area_b - inter_area) iou_ratio = inter_area / (area_a + area_b - inter_area)
return iou_ratio return iou_ratio
def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): def nms(boxes,
scores,
score_threshold,
nms_threshold,
top_k=200,
normalized=True,
eta=1.0):
"""Apply non-maximum suppression at test time to avoid detecting too many """Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object. overlapping bounding boxes for a given object.
Args: Args:
...@@ -87,7 +93,7 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): ...@@ -87,7 +93,7 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0):
for k in range(len(selected_indices)): for k in range(len(selected_indices)):
if keep: if keep:
kept_idx = selected_indices[k] kept_idx = selected_indices[k]
overlap = iou(boxes[idx], boxes[kept_idx]) overlap = iou(boxes[idx], boxes[kept_idx], normalized)
keep = True if overlap <= adaptive_threshold else False keep = True if overlap <= adaptive_threshold else False
else: else:
break break
...@@ -99,16 +105,24 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): ...@@ -99,16 +105,24 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0):
def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold,
nms_top_k, keep_top_k): nms_top_k, keep_top_k, normalized, shared):
class_num = scores.shape[0] if shared:
priorbox_num = scores.shape[1] class_num = scores.shape[0]
priorbox_num = scores.shape[1]
else:
box_num = scores.shape[0]
class_num = scores.shape[1]
selected_indices = {} selected_indices = {}
num_det = 0 num_det = 0
for c in range(class_num): for c in range(class_num):
if c == background: continue if c == background: continue
indices = nms(boxes, scores[c], score_threshold, nms_threshold, if shared:
nms_top_k) indices = nms(boxes, scores[c], score_threshold, nms_threshold,
nms_top_k, normalized)
else:
indices = nms(boxes[:, c, :], scores[:, c], score_threshold,
nms_threshold, nms_top_k, normalized)
selected_indices[c] = indices selected_indices[c] = indices
num_det += len(indices) num_det += len(indices)
...@@ -116,7 +130,10 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, ...@@ -116,7 +130,10 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold,
score_index = [] score_index = []
for c, indices in selected_indices.items(): for c, indices in selected_indices.items():
for idx in indices: for idx in indices:
score_index.append((scores[c][idx], c, idx)) if shared:
score_index.append((scores[c][idx], c, idx))
else:
score_index.append((scores[idx][c], c, idx))
sorted_score_index = sorted( sorted_score_index = sorted(
score_index, key=lambda tup: tup[0], reverse=True) score_index, key=lambda tup: tup[0], reverse=True)
...@@ -127,24 +144,75 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, ...@@ -127,24 +144,75 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold,
selected_indices[c] = [] selected_indices[c] = []
for s, c, idx in sorted_score_index: for s, c, idx in sorted_score_index:
selected_indices[c].append(idx) selected_indices[c].append(idx)
if not shared:
for labels in selected_indices:
selected_indices[labels].sort()
num_det = keep_top_k num_det = keep_top_k
return selected_indices, num_det return selected_indices, num_det
def batched_multiclass_nms(boxes, scores, background, score_threshold, def lod_multiclass_nms(boxes, scores, background, score_threshold,
nms_threshold, nms_top_k, keep_top_k): nms_threshold, nms_top_k, keep_top_k, box_lod,
normalized):
det_outs = []
lod = []
head = 0
for n in range(len(box_lod[0])):
box = boxes[head:head + box_lod[0][n]]
score = scores[head:head + box_lod[0][n]]
head = head + box_lod[0][n]
nmsed_outs, nmsed_num = multiclass_nms(
box,
score,
background,
score_threshold,
nms_threshold,
nms_top_k,
keep_top_k,
normalized,
shared=False)
if nmsed_num == 0:
#lod.append(1)
continue
lod.append(nmsed_num)
for c, indices in nmsed_outs.items():
for idx in indices:
xmin, ymin, xmax, ymax = box[idx, c, :]
det_outs.append([c, score[idx][c], xmin, ymin, xmax, ymax])
if len(lod) == 0:
lod.append(1)
return det_outs, lod
def batched_multiclass_nms(boxes,
scores,
background,
score_threshold,
nms_threshold,
nms_top_k,
keep_top_k,
normalized=True):
batch_size = scores.shape[0] batch_size = scores.shape[0]
det_outs = [] det_outs = []
lod = [] lod = []
for n in range(batch_size): for n in range(batch_size):
nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background, nmsed_outs, nmsed_num = multiclass_nms(
score_threshold, nms_threshold, boxes[n],
nms_top_k, keep_top_k) scores[n],
lod.append(nmsed_num) background,
if nmsed_num == 0: continue score_threshold,
nms_threshold,
nms_top_k,
keep_top_k,
normalized,
shared=True)
if nmsed_num == 0:
continue
lod.append(nmsed_num)
tmp_det_out = [] tmp_det_out = []
for c, indices in nmsed_outs.items(): for c, indices in nmsed_outs.items():
for idx in indices: for idx in indices:
...@@ -154,7 +222,8 @@ def batched_multiclass_nms(boxes, scores, background, score_threshold, ...@@ -154,7 +222,8 @@ def batched_multiclass_nms(boxes, scores, background, score_threshold,
sorted_det_out = sorted( sorted_det_out = sorted(
tmp_det_out, key=lambda tup: tup[0], reverse=False) tmp_det_out, key=lambda tup: tup[0], reverse=False)
det_outs.extend(sorted_det_out) det_outs.extend(sorted_det_out)
if len(lod) == 0:
lod += [1]
return det_outs, lod return det_outs, lod
...@@ -168,7 +237,6 @@ class TestMulticlassNMSOp(OpTest): ...@@ -168,7 +237,6 @@ class TestMulticlassNMSOp(OpTest):
M = 1200 M = 1200
C = 21 C = 21
BOX_SIZE = 4 BOX_SIZE = 4
background = 0 background = 0
nms_threshold = 0.3 nms_threshold = 0.3
nms_top_k = 400 nms_top_k = 400
...@@ -206,6 +274,7 @@ class TestMulticlassNMSOp(OpTest): ...@@ -206,6 +274,7 @@ class TestMulticlassNMSOp(OpTest):
'keep_top_k': keep_top_k, 'keep_top_k': keep_top_k,
'score_threshold': score_threshold, 'score_threshold': score_threshold,
'nms_eta': 1.0, 'nms_eta': 1.0,
'normalized': True,
} }
def test_check_output(self): def test_check_output(self):
...@@ -219,13 +288,70 @@ class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp): ...@@ -219,13 +288,70 @@ class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp):
self.score_threshold = 2.0 self.score_threshold = 2.0
class TestMulticlassNMSLoDInput(OpTest):
def set_argument(self):
self.score_threshold = 0.01
def setUp(self):
self.set_argument()
M = 1200
C = 21
BOX_SIZE = 4
box_lod = [[1200]]
background = 0
nms_threshold = 0.3
nms_top_k = 400
keep_top_k = 200
score_threshold = self.score_threshold
normalized = False
scores = np.random.random((M, C)).astype('float32')
def softmax(x):
shiftx = x - np.max(x).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
scores = np.apply_along_axis(softmax, 1, scores)
boxes = np.random.random((M, C, BOX_SIZE)).astype('float32')
boxes[:, :, 0] = boxes[:, :, 0] * 10
boxes[:, :, 1] = boxes[:, :, 1] * 10
boxes[:, :, 2] = boxes[:, :, 2] * 10 + 10
boxes[:, :, 3] = boxes[:, :, 3] * 10 + 10
nmsed_outs, lod = lod_multiclass_nms(
boxes, scores, background, score_threshold, nms_threshold,
nms_top_k, keep_top_k, box_lod, normalized)
nmsed_outs = [-1] if not nmsed_outs else nmsed_outs
nmsed_outs = np.array(nmsed_outs).astype('float32')
self.op_type = 'multiclass_nms'
self.inputs = {
'BBoxes': (boxes, box_lod),
'Scores': (scores, box_lod),
}
self.outputs = {'Out': (nmsed_outs, [lod])}
self.attrs = {
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
'normalized': normalized,
}
def test_check_output(self):
self.check_output()
class TestIOU(unittest.TestCase): class TestIOU(unittest.TestCase):
def test_iou(self): def test_iou(self):
box1 = np.array([4.0, 3.0, 7.0, 5.0]).astype('float32') box1 = np.array([4.0, 3.0, 7.0, 5.0]).astype('float32')
box2 = np.array([3.0, 4.0, 6.0, 8.0]).astype('float32') box2 = np.array([3.0, 4.0, 6.0, 8.0]).astype('float32')
expt_output = np.array([2.0 / 16.0]).astype('float32') expt_output = np.array([2.0 / 16.0]).astype('float32')
calc_output = np.array([iou(box1, box2)]).astype('float32') calc_output = np.array([iou(box1, box2, True)]).astype('float32')
self.assertTrue(np.allclose(calc_output, expt_output)) self.assertTrue(np.allclose(calc_output, expt_output))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册