diff --git a/deploy/cpp/include/config_parser.h b/deploy/cpp/include/config_parser.h index 82d103723aa134ff72449a2d0ca3735b68c86fee..1f2e381c5284bb7ce16a6b06f858a32e83290f98 100644 --- a/deploy/cpp/include/config_parser.h +++ b/deploy/cpp/include/config_parser.h @@ -120,6 +120,10 @@ class ConfigPaser { } } + if (config["mask"].IsDefined()) { + mask_ = config["mask"].as(); + } + return true; } std::string mode_; @@ -132,6 +136,7 @@ class ConfigPaser { std::vector fpn_stride_; bool use_dynamic_shape_; float conf_thresh_; + bool mask_ = false; }; } // namespace PaddleDetection diff --git a/deploy/cpp/include/object_detector.h b/deploy/cpp/include/object_detector.h index 0a336c33401d5d7d3e4e27a22862fd666da1a36e..30dd09ab7ef808314a353c72660f78b368004d25 100644 --- a/deploy/cpp/include/object_detector.h +++ b/deploy/cpp/include/object_detector.h @@ -114,6 +114,7 @@ class ObjectDetector { std::vector* result, std::vector bbox_num, std::vector output_data_, + std::vector output_mask_data_, bool is_rbox); std::shared_ptr predictor_; diff --git a/deploy/cpp/include/utils.h b/deploy/cpp/include/utils.h index 3802e1267176a050402d1fdf742e54a79f33ffb9..b41db0dacff17339ffcac591b7825cec09d3663d 100644 --- a/deploy/cpp/include/utils.h +++ b/deploy/cpp/include/utils.h @@ -14,13 +14,13 @@ #pragma once -#include -#include -#include -#include +#include #include +#include #include -#include +#include +#include +#include namespace PaddleDetection { @@ -32,8 +32,10 @@ struct ObjectResult { int class_id; // Confidence of detected object float confidence; + // Mask of detected object + std::vector mask; }; void nms(std::vector &input_boxes, float nms_threshold); -} // namespace PaddleDetection \ No newline at end of file +} // namespace PaddleDetection diff --git a/deploy/cpp/src/object_detector.cc b/deploy/cpp/src/object_detector.cc index a99fcd515337e72ff59a09c7eeaa12072a774cc1..38914b4b5d27e9eb2aa137f273cde148d3fb9ad7 100644 --- a/deploy/cpp/src/object_detector.cc +++ b/deploy/cpp/src/object_detector.cc @@ -103,6 +103,8 @@ cv::Mat VisualizeResult( const std::vector& colormap, const bool is_rbox = false) { cv::Mat vis_img = img.clone(); + int img_h = vis_img.rows; + int img_w = vis_img.cols; for (int i = 0; i < results.size(); ++i) { // Configure color and text size std::ostringstream oss; @@ -136,6 +138,33 @@ cv::Mat VisualizeResult( cv::Rect roi = cv::Rect(results[i].rect[0], results[i].rect[1], w, h); // Draw roi object, text, and background cv::rectangle(vis_img, roi, roi_color, 2); + + // Draw mask + std::vector mask_v = results[i].mask; + if (mask_v.size() > 0) { + cv::Mat mask = cv::Mat(img_h, img_w, CV_32S); + std::memcpy(mask.data, mask_v.data(), mask_v.size() * sizeof(int)); + + cv::Mat colored_img = vis_img.clone(); + + std::vector contours; + cv::Mat hierarchy; + mask.convertTo(mask, CV_8U); + cv::findContours( + mask, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE); + cv::drawContours(colored_img, + contours, + -1, + roi_color, + -1, + cv::LINE_8, + hierarchy, + 100); + + cv::Mat debug_roi = vis_img; + colored_img = 0.4 * colored_img + 0.6 * vis_img; + colored_img.copyTo(vis_img, mask); + } } origin.x = results[i].rect[0]; @@ -171,9 +200,16 @@ void ObjectDetector::Postprocess( std::vector* result, std::vector bbox_num, std::vector output_data_, + std::vector output_mask_data_, bool is_rbox = false) { result->clear(); int start_idx = 0; + int total_num = std::accumulate(bbox_num.begin(), bbox_num.end(), 0); + int out_mask_dim = -1; + if (config_.mask_) { + out_mask_dim = output_mask_data_.size() / total_num; + } + for (int im_id = 0; im_id < mats.size(); im_id++) { cv::Mat raw_mat = mats[im_id]; int rh = 1; @@ -218,6 +254,17 @@ void ObjectDetector::Postprocess( result_item.rect = {xmin, ymin, xmax, ymax}; result_item.class_id = class_id; result_item.confidence = score; + + if (config_.mask_) { + std::vector mask; + for (int k = 0; k < out_mask_dim; ++k) { + if (output_mask_data_[k + j * out_mask_dim] > -1) { + mask.push_back(output_mask_data_[k + j * out_mask_dim]); + } + } + result_item.mask = mask; + } + result->push_back(result_item); } } @@ -241,6 +288,7 @@ void ObjectDetector::Predict(const std::vector imgs, std::vector scale_factor_all(batch_size * 2); std::vector output_data_list_; std::vector out_bbox_num_data_; + std::vector out_mask_data_; // in_net img for each batch std::vector in_net_img_all(batch_size); @@ -322,7 +370,10 @@ void ObjectDetector::Predict(const std::vector imgs, std::vector output_shape = output_tensor->shape(); int out_num = std::accumulate( output_shape.begin(), output_shape.end(), 1, std::multiplies()); - if (output_tensor->type() == paddle_infer::DataType::INT32) { + if (config_.mask_ && (j == 2)) { + out_mask_data_.resize(out_num); + output_tensor->CopyToCpu(out_mask_data_.data()); + } else if (output_tensor->type() == paddle_infer::DataType::INT32) { out_bbox_num_data_.resize(out_num); output_tensor->CopyToCpu(out_bbox_num_data_.data()); } else { @@ -347,7 +398,10 @@ void ObjectDetector::Predict(const std::vector imgs, int out_num = std::accumulate( output_shape.begin(), output_shape.end(), 1, std::multiplies()); output_shape_list.push_back(output_shape); - if (output_tensor->type() == paddle_infer::DataType::INT32) { + if (config_.mask_ && (j == 2)) { + out_mask_data_.resize(out_num); + output_tensor->CopyToCpu(out_mask_data_.data()); + } else if (output_tensor->type() == paddle_infer::DataType::INT32) { out_bbox_num_data_.resize(out_num); output_tensor->CopyToCpu(out_bbox_num_data_.data()); } else { @@ -390,7 +444,12 @@ void ObjectDetector::Predict(const std::vector imgs, bbox_num->push_back(result->size()); } else { is_rbox = output_shape_list[0][output_shape_list[0].size() - 1] % 10 == 0; - Postprocess(imgs, result, out_bbox_num_data_, out_tensor_list[0], is_rbox); + Postprocess(imgs, + result, + out_bbox_num_data_, + out_tensor_list[0], + out_mask_data_, + is_rbox); for (int k = 0; k < out_bbox_num_data_.size(); k++) { int tmp = out_bbox_num_data_[k]; bbox_num->push_back(tmp); diff --git a/deploy/cpp/src/preprocess_op.cc b/deploy/cpp/src/preprocess_op.cc index 4ac3daa304e933e307596423442502a5bfc06da5..d4a1fb4191b449267581c71e94b5ff959ac886c3 100644 --- a/deploy/cpp/src/preprocess_op.cc +++ b/deploy/cpp/src/preprocess_op.cc @@ -60,12 +60,11 @@ void Permute::Run(cv::Mat* im, ImageBlob* data) { void Resize::Run(cv::Mat* im, ImageBlob* data) { auto resize_scale = GenerateScale(*im); - data->im_shape_ = {static_cast(im->cols * resize_scale.first), - static_cast(im->rows * resize_scale.second)}; - data->in_net_shape_ = {static_cast(im->cols * resize_scale.first), - static_cast(im->rows * resize_scale.second)}; cv::resize( *im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_); + + data->in_net_shape_ = {static_cast(im->rows), + static_cast(im->cols)}; data->im_shape_ = { static_cast(im->rows), static_cast(im->cols), }; @@ -154,6 +153,7 @@ float LetterBoxResize::GenerateScale(const cv::Mat& im) { void PadStride::Run(cv::Mat* im, ImageBlob* data) { if (stride_ <= 0) { + data->in_net_im_ = im->clone(); return; } int rc = im->channels(); @@ -242,7 +242,9 @@ bool CheckDynamicInput(const std::vector& imgs) { int h = imgs.at(0).rows; int w = imgs.at(0).cols; for (int i = 1; i < imgs.size(); ++i) { - if (imgs.at(i).rows != h || imgs.at(i).cols != w) { + int hi = imgs.at(i).rows; + int wi = imgs.at(i).cols; + if (hi != h || wi != w) { return true; } } diff --git a/deploy/python/infer.py b/deploy/python/infer.py index 3296e16e5a9612ba71d862d6843d9b9f576be1ff..2808d2c58a2514c60b68a3e1ab4a21f0093a25c4 100644 --- a/deploy/python/infer.py +++ b/deploy/python/infer.py @@ -206,7 +206,8 @@ class Detector(object): for k, v in res.items(): results[k].append(v) for k, v in results.items(): - results[k] = np.concatenate(v) + if k != 'masks': + results[k] = np.concatenate(v) return results def get_timer(self): @@ -296,7 +297,7 @@ class Detector(object): if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) out_path = os.path.join(self.output_dir, video_out_name) - fourcc = cv2.VideoWriter_fourcc(*'mp4v') + fourcc = cv2.VideoWriter_fourcc(* 'mp4v') writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) index = 1 while (1): diff --git a/deploy/python/visualize.py b/deploy/python/visualize.py index 9c07b8491d6790ddd2303d9abe1c45070f8c5657..c26a6e4673c846a5ad0cb0d2c098edea207a7a60 100644 --- a/deploy/python/visualize.py +++ b/deploy/python/visualize.py @@ -96,6 +96,8 @@ def draw_mask(im, np_boxes, np_masks, labels, threshold=0.5): expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1) np_boxes = np_boxes[expect_boxes, :] np_masks = np_masks[expect_boxes, :, :] + im_h, im_w = im.shape[:2] + np_masks = np_masks[:, :im_h, :im_w] for i in range(len(np_masks)): clsid, score = int(np_boxes[i][0]), np_boxes[i][1] mask = np_masks[i] diff --git a/ppdet/metrics/json_results.py b/ppdet/metrics/json_results.py index c703de63be89e326da979d2edbe0a3e1afca3bec..93354ec1fc592b1567b5f0a3e2044a215d231a30 100755 --- a/ppdet/metrics/json_results.py +++ b/ppdet/metrics/json_results.py @@ -65,6 +65,14 @@ def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0): return det_res +def strip_mask(mask): + row = mask[0, 0, :] + col = mask[0, :, 0] + im_h = len(col) - np.count_nonzero(col == -1) + im_w = len(row) - np.count_nonzero(row == -1) + return mask[:, :im_h, :im_w] + + def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map): import pycocotools.mask as mask_util seg_res = [] @@ -72,8 +80,10 @@ def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map): for i in range(len(mask_nums)): cur_image_id = int(image_id[i][0]) det_nums = mask_nums[i] + mask_i = masks[k:k + det_nums] + mask_i = strip_mask(mask_i) for j in range(det_nums): - mask = masks[k].astype(np.uint8) + mask = mask_i[j].astype(np.uint8) score = float(bboxes[k][1]) label = int(bboxes[k][0]) k = k + 1 diff --git a/ppdet/modeling/architectures/cascade_rcnn.py b/ppdet/modeling/architectures/cascade_rcnn.py index 4b5caa7a3ad16f535c007ffa0888b44c8958478b..fc5949af0ac4efaea3ea28bbb416859881461f30 100644 --- a/ppdet/modeling/architectures/cascade_rcnn.py +++ b/ppdet/modeling/architectures/cascade_rcnn.py @@ -111,8 +111,8 @@ class CascadeRCNN(BaseArch): bbox, bbox_num = self.bbox_post_process( preds, (refined_rois, rois_num), im_shape, scale_factor) # rescale the prediction back to origin image - bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num, - im_shape, scale_factor) + bbox, bbox_pred, bbox_num = self.bbox_post_process.get_pred( + bbox, bbox_num, im_shape, scale_factor) if not self.with_mask: return bbox_pred, bbox_num, None mask_out = self.mask_head(body_feats, bbox, bbox_num, self.inputs) diff --git a/ppdet/modeling/architectures/faster_rcnn.py b/ppdet/modeling/architectures/faster_rcnn.py index 26a2672d60f49aa989c7945b65ce3ecd9beec182..ce9a8e4b57d2dfe54fde037fed2dc0156cb71b51 100644 --- a/ppdet/modeling/architectures/faster_rcnn.py +++ b/ppdet/modeling/architectures/faster_rcnn.py @@ -87,8 +87,8 @@ class FasterRCNN(BaseArch): im_shape, scale_factor) # rescale the prediction back to origin image - bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num, - im_shape, scale_factor) + bboxes, bbox_pred, bbox_num = self.bbox_post_process.get_pred( + bbox, bbox_num, im_shape, scale_factor) return bbox_pred, bbox_num def get_loss(self, ): diff --git a/ppdet/modeling/architectures/mask_rcnn.py b/ppdet/modeling/architectures/mask_rcnn.py index 43b8bff94aaf6f496d978fe755b55ba79f7786b2..a322f9f8e7b41d47d90b03b594fcdb47665c2c45 100644 --- a/ppdet/modeling/architectures/mask_rcnn.py +++ b/ppdet/modeling/architectures/mask_rcnn.py @@ -112,8 +112,8 @@ class MaskRCNN(BaseArch): body_feats, bbox, bbox_num, self.inputs, feat_func=feat_func) # rescale the prediction back to origin image - bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num, - im_shape, scale_factor) + bbox, bbox_pred, bbox_num = self.bbox_post_process.get_pred( + bbox, bbox_num, im_shape, scale_factor) origin_shape = self.bbox_post_process.get_origin_shape() mask_pred = self.mask_post_process(mask_out, bbox_pred, bbox_num, origin_shape) diff --git a/ppdet/modeling/post_process.py b/ppdet/modeling/post_process.py index 72e409e4008ea55b4e84a09125a069215a8f34c3..e74095505222142d47c17a0e665d358561558173 100644 --- a/ppdet/modeling/post_process.py +++ b/ppdet/modeling/post_process.py @@ -171,7 +171,7 @@ class BBoxPostProcess(nn.Layer): pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) - return pred_result + return bboxes, pred_result, bbox_num def get_origin_shape(self, ): return self.origin_shape_list @@ -179,6 +179,7 @@ class BBoxPostProcess(nn.Layer): @register class MaskPostProcess(object): + __shared__ = ['export_onnx'] """ refer to: https://github.com/facebookresearch/detectron2/layers/mask_ops.py @@ -186,9 +187,10 @@ class MaskPostProcess(object): Get Mask output according to the output from model """ - def __init__(self, binary_thresh=0.5): + def __init__(self, binary_thresh=0.5, export_onnx=False): super(MaskPostProcess, self).__init__() self.binary_thresh = binary_thresh + self.export_onnx = export_onnx def paste_mask(self, masks, boxes, im_h, im_w): """ @@ -200,6 +202,7 @@ class MaskPostProcess(object): N = masks.shape[0] img_y = paddle.arange(y0_int, y1_int) + 0.5 img_x = paddle.arange(x0_int, x1_int) + 0.5 + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # img_x, img_y have shapes (N, w), (N, h) @@ -230,15 +233,34 @@ class MaskPostProcess(object): """ num_mask = mask_out.shape[0] origin_shape = paddle.cast(origin_shape, 'int32') - # TODO: support bs > 1 and mask output dtype is bool - pred_result = paddle.zeros( - [num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32') - - im_h, im_w = origin_shape[0][0], origin_shape[0][1] - pred_mask = self.paste_mask(mask_out[:, None, :, :], bboxes[:, 2:], - im_h, im_w) - pred_mask = pred_mask >= self.binary_thresh - pred_result = paddle.cast(pred_mask, 'int32') + + if self.export_onnx: + h, w = origin_shape[0][0], origin_shape[0][1] + mask_onnx = self.paste_mask(mask_out[:, None, :, :], bboxes[:, 2:], + h, w) + mask_onnx = mask_onnx >= self.binary_thresh + pred_result = paddle.cast(mask_onnx, 'int32') + + else: + max_h = paddle.max(origin_shape[:, 0]) + max_w = paddle.max(origin_shape[:, 1]) + pred_result = paddle.zeros( + [num_mask, max_h, max_w], dtype='int32') - 1 + + id_start = 0 + for i in range(paddle.shape(bbox_num)[0]): + bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] + mask_out_i = mask_out[id_start:id_start + bbox_num[i], :, :] + im_h = origin_shape[i, 0] + im_w = origin_shape[i, 1] + bbox_num_i = bbox_num[id_start] + pred_mask = self.paste_mask(mask_out_i[:, None, :, :], + bboxes_i[:, 2:], im_h, im_w) + pred_mask = paddle.cast(pred_mask >= self.binary_thresh, + 'int32') + pred_result[id_start:id_start + bbox_num[i], :im_h, : + im_w] = pred_mask + id_start += bbox_num[i] return pred_result