diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 39db2579ecb2323acc47aad72c56cdd4d3fe203b..2fdb32644adde6a2c5b369041431f606b229bad9 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -2035,8 +2035,8 @@ void NMSInferMeta(const MetaTensor& x, float threshold, MetaTensor* out) { "whose shape must be [N, 4] " "N is the number of boxes " "in last dimension in format [x1, x2, y1, y2]. ")); - auto num_boxes = boxes_dim[0]; - out->set_dims(phi::make_ddim({num_boxes})); + out->set_dims(phi::make_ddim({-1})); + out->set_dtype(DataType::INT64); } void NormInferMeta(const MetaTensor& x, diff --git a/paddle/phi/kernels/cpu/nms_kernel.cc b/paddle/phi/kernels/cpu/nms_kernel.cc index 7e656b14f1fc538d5206a7d802f6f976fab33beb..4b56f6bb9510502b4f47a5d4603626a62398d21f 100644 --- a/paddle/phi/kernels/cpu/nms_kernel.cc +++ b/paddle/phi/kernels/cpu/nms_kernel.cc @@ -16,16 +16,17 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/diagonal.h" #include "paddle/phi/kernels/funcs/eigen/common.h" namespace phi { template -static void NMS(const T* boxes_data, - int64_t* output_data, - float threshold, - int64_t num_boxes) { +static int64_t NMS(const T* boxes_data, + int64_t* output_data, + float threshold, + int64_t num_boxes) { auto num_masks = CeilDivide(num_boxes, 64); std::vector masks(num_masks, 0); @@ -54,9 +55,13 @@ static void NMS(const T* boxes_data, output_data[output_data_idx++] = i; } + int64_t num_keep_boxes = output_data_idx; + for (; output_data_idx < num_boxes; ++output_data_idx) { output_data[output_data_idx] = 0; } + + return num_keep_boxes; } template @@ -64,8 +69,15 @@ void NMSKernel(const Context& dev_ctx, const DenseTensor& boxes, float threshold, DenseTensor* output) { - auto output_data = dev_ctx.template Alloc(output); - NMS(boxes.data(), output_data, threshold, boxes.dims()[0]); + int64_t num_boxes = boxes.dims()[0]; + DenseTensor output_tmp; + output_tmp.Resize(phi::make_ddim({num_boxes})); + auto output_tmp_data = dev_ctx.template Alloc(&output_tmp); + + int64_t num_keep_boxes = + NMS(boxes.data(), output_tmp_data, threshold, num_boxes); + auto slice_out = output_tmp.Slice(0, num_keep_boxes); + phi::Copy(dev_ctx, slice_out, dev_ctx.GetPlace(), false, output); } } // namespace phi diff --git a/paddle/phi/kernels/gpu/nms_kernel.cu b/paddle/phi/kernels/gpu/nms_kernel.cu index 490753f1313655c6f6653e31c0faec290c24f1a5..dcc6d6e2b45f00f8dc0fcc1edc13016861843d51 100644 --- a/paddle/phi/kernels/gpu/nms_kernel.cu +++ b/paddle/phi/kernels/gpu/nms_kernel.cu @@ -59,7 +59,6 @@ void NMSKernel(const Context& dev_ctx, const DenseTensor& boxes, float threshold, DenseTensor* output) { - auto* output_data = dev_ctx.template Alloc(output); const int64_t num_boxes = boxes.dims()[0]; const auto blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); dim3 block(threadsPerBlock); @@ -93,11 +92,13 @@ void NMSKernel(const Context& dev_ctx, } } } + output->Resize(phi::make_ddim({last_box_num})); + auto* output_data = dev_ctx.template Alloc(output); paddle::memory::Copy(dev_ctx.GetPlace(), output_data, phi::CPUPlace(), output_host, - sizeof(int64_t) * num_boxes, + sizeof(int64_t) * last_box_num, dev_ctx.stream()); } } // namespace phi diff --git a/python/paddle/fluid/tests/unittests/test_nms_op.py b/python/paddle/fluid/tests/unittests/test_nms_op.py index a81a46e1140e8dfc636421ebe869056bf904f9e0..cbd24d4ddf22e92a62252990bdaca7e3d81792b4 100755 --- a/python/paddle/fluid/tests/unittests/test_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_nms_op.py @@ -65,7 +65,7 @@ def nms(boxes, nms_threshold): else: continue - return selected_indices + return selected_indices[:cnt] class TestNMSOp(OpTest): diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 032fe4bd356072da5b66fdfea34a7a41443ae111..e42c505cb0af394e064687836acd045899910d50 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -1611,7 +1611,9 @@ def nms(boxes, import paddle if category_idxs is None: sorted_global_indices = paddle.argsort(scores, descending=True) - return _nms(boxes[sorted_global_indices], iou_threshold) + sorted_keep_boxes_indices = _nms(boxes[sorted_global_indices], + iou_threshold) + return sorted_global_indices[sorted_keep_boxes_indices] if top_k is not None: assert top_k <= scores.shape[