object_detector.cc 13.4 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sstream>
// for setprecision
#include <iomanip>
Z
zlsh80826 已提交
17
#include <chrono>
Q
qingqing01 已提交
18 19 20 21 22 23 24 25 26 27
#include "include/object_detector.h"


using namespace paddle_infer;

namespace PaddleDetection {

// Load Model and create model predictor
void ObjectDetector::LoadModel(const std::string& model_dir,
                               const int batch_size,
G
Guanghua Yu 已提交
28
                               const std::string& run_mode) {
Q
qingqing01 已提交
29 30 31 32
  paddle_infer::Config config;
  std::string prog_file = model_dir + OS_PATH_SEP + "model.pdmodel";
  std::string params_file = model_dir + OS_PATH_SEP + "model.pdiparams";
  config.SetModel(prog_file, params_file);
G
Guanghua Yu 已提交
33
  if (this->device_ == "GPU") {
G
Guanghua Yu 已提交
34
    config.EnableUseGpu(200, this->gpu_id_);
Q
qingqing01 已提交
35
    config.SwitchIrOptim(true);
36
    // use tensorrt
Q
qingqing01 已提交
37 38
    if (run_mode != "fluid") {
      auto precision = paddle_infer::Config::Precision::kFloat32;
39 40 41 42
      if (run_mode == "trt_fp32") {
        precision = paddle_infer::Config::Precision::kFloat32;
      }
      else if (run_mode == "trt_fp16") {
Q
qingqing01 已提交
43
        precision = paddle_infer::Config::Precision::kHalf;
44 45 46
      }
      else if (run_mode == "trt_int8") {
        precision = paddle_infer::Config::Precision::kInt8;
Q
qingqing01 已提交
47
      } else {
48
          printf("run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or 'trt_int8'");
Q
qingqing01 已提交
49
      }
50
      // set tensorrt
Q
qingqing01 已提交
51
      config.EnableTensorRtEngine(
52
          1 << 30,
Q
qingqing01 已提交
53
          batch_size,
G
Guanghua Yu 已提交
54
          this->min_subgraph_size_,
Q
qingqing01 已提交
55 56
          precision,
          false,
G
Guanghua Yu 已提交
57
          this->trt_calib_mode_);
58 59

      // set use dynamic shape
G
Guanghua Yu 已提交
60
      if (this->use_dynamic_shape_) {
61
        // set DynamicShsape for image tensor
G
Guanghua Yu 已提交
62 63 64
        const std::vector<int> min_input_shape = {1, 3, this->trt_min_shape_, this->trt_min_shape_};
        const std::vector<int> max_input_shape = {1, 3, this->trt_max_shape_, this->trt_max_shape_};
        const std::vector<int> opt_input_shape = {1, 3, this->trt_opt_shape_, this->trt_opt_shape_};
65 66 67 68 69 70 71 72 73 74 75
        const std::map<std::string, std::vector<int>> map_min_input_shape = {{"image", min_input_shape}};
        const std::map<std::string, std::vector<int>> map_max_input_shape = {{"image", max_input_shape}};
        const std::map<std::string, std::vector<int>> map_opt_input_shape = {{"image", opt_input_shape}};

        config.SetTRTDynamicShapeInfo(map_min_input_shape,
                                      map_max_input_shape,
                                      map_opt_input_shape);
        std::cout << "TensorRT dynamic shape enabled" << std::endl;
      }
    }

G
Guanghua Yu 已提交
76 77
  } else if (this->device_ == "XPU"){
    config.EnableXpu(10*1024*1024);
Q
qingqing01 已提交
78 79
  } else {
    config.DisableGpu();
G
Guanghua Yu 已提交
80 81 82 83 84 85
    if (this->use_mkldnn_) {
      config.EnableMKLDNN();
      // cache 10 different shapes for mkldnn to avoid memory leak
      config.SetMkldnnCacheCapacity(10);
    }
    config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
Q
qingqing01 已提交
86 87
  }
  config.SwitchUseFeedFetchOps(false);
G
Guanghua Yu 已提交
88
  config.SwitchIrOptim(true);
Q
qingqing01 已提交
89 90 91 92 93 94 95 96 97
  config.DisableGlogInfo();
  // Memory optimization
  config.EnableMemoryOptim();
  predictor_ = std::move(CreatePredictor(config));
}

// Visualiztion MaskDetector results
cv::Mat VisualizeResult(const cv::Mat& img,
                        const std::vector<ObjectResult>& results,
C
cnn 已提交
98
                        const std::vector<std::string>& lables,
C
cnn 已提交
99 100
                        const std::vector<int>& colormap,
                        const bool is_rbox=false) {
Q
qingqing01 已提交
101 102 103 104 105
  cv::Mat vis_img = img.clone();
  for (int i = 0; i < results.size(); ++i) {
    // Configure color and text size
    std::ostringstream oss;
    oss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
C
cnn 已提交
106
    oss << lables[results[i].class_id] << " ";
Q
qingqing01 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
    oss << results[i].confidence;
    std::string text = oss.str();
    int c1 = colormap[3 * results[i].class_id + 0];
    int c2 = colormap[3 * results[i].class_id + 1];
    int c3 = colormap[3 * results[i].class_id + 2];
    cv::Scalar roi_color = cv::Scalar(c1, c2, c3);
    int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL;
    double font_scale = 0.5f;
    float thickness = 0.5;
    cv::Size text_size = cv::getTextSize(text,
                                         font_face,
                                         font_scale,
                                         thickness,
                                         nullptr);
    cv::Point origin;
C
cnn 已提交
122 123 124 125

    if (is_rbox)
    {
        // Draw object, text, and background
C
cnn 已提交
126
        for (int k = 0; k < 4; k++)
C
cnn 已提交
127
        {
C
cnn 已提交
128 129 130 131
            cv::Point pt1 = cv::Point(results[i].rect[(k * 2) % 8],
                                      results[i].rect[(k * 2 + 1) % 8]);
            cv::Point pt2 = cv::Point(results[i].rect[(k * 2 + 2) % 8],
                                      results[i].rect[(k * 2 + 3) % 8]);
C
cnn 已提交
132 133 134 135 136
            cv::line(vis_img, pt1, pt2, roi_color, 2);
        }
    }
    else
    {
C
cnn 已提交
137 138 139
        int w = results[i].rect[2] - results[i].rect[0];
        int h = results[i].rect[3] - results[i].rect[1];
        cv::Rect roi = cv::Rect(results[i].rect[0], results[i].rect[1], w, h);
C
cnn 已提交
140 141 142 143 144 145
        // Draw roi object, text, and background
        cv::rectangle(vis_img, roi, roi_color, 2);
    }

    origin.x = results[i].rect[0];
    origin.y = results[i].rect[1];
Q
qingqing01 已提交
146 147 148

    // Configure text background
    cv::Rect text_back = cv::Rect(results[i].rect[0],
C
cnn 已提交
149
                                  results[i].rect[1] - text_size.height,
Q
qingqing01 已提交
150 151
                                  text_size.width,
                                  text_size.height);
C
cnn 已提交
152
    // Draw text, and background
Q
qingqing01 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
    cv::rectangle(vis_img, text_back, roi_color, -1);
    cv::putText(vis_img,
                text,
                origin,
                font_face,
                font_scale,
                cv::Scalar(255, 255, 255),
                thickness);
  }
  return vis_img;
}

void ObjectDetector::Preprocess(const cv::Mat& ori_im) {
  // Clone the image : keep the original mat for postprocess
  cv::Mat im = ori_im.clone();
  cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
  preprocessor_.Run(&im, &inputs_);
}

void ObjectDetector::Postprocess(
C
cnn 已提交
173
    const std::vector<cv::Mat> mats,
C
cnn 已提交
174
    std::vector<ObjectResult>* result,
C
cnn 已提交
175
    std::vector<int> bbox_num,
C
cnn 已提交
176
    bool is_rbox=false) {
Q
qingqing01 已提交
177
  result->clear();
C
cnn 已提交
178 179 180 181 182 183 184 185 186 187
  int start_idx = 0;
  for (int im_id = 0; im_id < bbox_num.size(); im_id++) {
    cv::Mat raw_mat = mats[im_id];
    for (int j = start_idx; j < start_idx+bbox_num[im_id]; j++) {
      int rh = 1;
      int rw = 1;
      if (config_.arch_ == "Face") {
        rh = raw_mat.rows;
        rw = raw_mat.cols;
      }
Q
qingqing01 已提交
188

C
cnn 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
      if (is_rbox) {
        for (int j = 0; j < bbox_num[im_id]; ++j) {
          // Class id
          int class_id = static_cast<int>(round(output_data_[0 + j * 10]));
          // Confidence score
          float score = output_data_[1 + j * 10];
          int x1 = (output_data_[2 + j * 10] * rw);
          int y1 = (output_data_[3 + j * 10] * rh);
          int x2 = (output_data_[4 + j * 10] * rw);
          int y2 = (output_data_[5 + j * 10] * rh);
          int x3 = (output_data_[6 + j * 10] * rw);
          int y3 = (output_data_[7 + j * 10] * rh);
          int x4 = (output_data_[8 + j * 10] * rw);
          int y4 = (output_data_[9 + j * 10] * rh);
          if (score > threshold_ && class_id > -1) {
            ObjectResult result_item;
            result_item.rect = {x1, y1, x2, y2, x3, y3, x4, y4};
            result_item.class_id = class_id;
            result_item.confidence = score;
            result->push_back(result_item);
          }
        }
C
cnn 已提交
211
      }
C
cnn 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
      else {
        for (int j = 0; j < bbox_num[im_id]; ++j) {
          // Class id
          int class_id = static_cast<int>(round(output_data_[0 + j * 6]));
          // Confidence score
          float score = output_data_[1 + j * 6];
          int xmin = (output_data_[2 + j * 6] * rw);
          int ymin = (output_data_[3 + j * 6] * rh);
          int xmax = (output_data_[4 + j * 6] * rw);
          int ymax = (output_data_[5 + j * 6] * rh);
          int wd = xmax - xmin;
          int hd = ymax - ymin;
          if (score > threshold_ && class_id > -1) {
            ObjectResult result_item;
            result_item.rect = {xmin, ymin, xmax, ymax};
            result_item.class_id = class_id;
            result_item.confidence = score;
            result->push_back(result_item);
          }
        }
C
cnn 已提交
232
      }
Q
qingqing01 已提交
233
    }
C
cnn 已提交
234
    start_idx += bbox_num[im_id];
Q
qingqing01 已提交
235 236 237
  }
}

C
cnn 已提交
238
void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,
Q
qingqing01 已提交
239 240 241
      const double threshold,
      const int warmup,
      const int repeats,
G
Guanghua Yu 已提交
242
      std::vector<ObjectResult>* result,
C
cnn 已提交
243
      std::vector<int>* bbox_num,
G
Guanghua Yu 已提交
244 245
      std::vector<double>* times) {
  auto preprocess_start = std::chrono::steady_clock::now();
C
cnn 已提交
246 247 248 249 250 251 252
  int batch_size = imgs.size();

  // in_data_batch
  std::vector<float> in_data_all;
  std::vector<float> im_shape_all(batch_size * 2);
  std::vector<float> scale_factor_all(batch_size * 2);
  
Q
qingqing01 已提交
253
  // Preprocess image
C
cnn 已提交
254 255 256 257 258 259 260 261 262 263 264 265 266
  for (int bs_idx = 0; bs_idx < batch_size; bs_idx++) {
    cv::Mat im = imgs.at(bs_idx);
    Preprocess(im);
    im_shape_all[bs_idx * 2] = inputs_.im_shape_[0];
    im_shape_all[bs_idx * 2 + 1] = inputs_.im_shape_[1];

    scale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0];
    scale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1];

    // TODO: reduce cost time
    in_data_all.insert(in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end());
  }

Q
qingqing01 已提交
267 268 269 270 271
  // Prepare input tensor
  auto input_names = predictor_->GetInputNames();
  for (const auto& tensor_name : input_names) {
    auto in_tensor = predictor_->GetInputHandle(tensor_name);
    if (tensor_name == "image") {
272 273
      int rh = inputs_.in_net_shape_[0];
      int rw = inputs_.in_net_shape_[1];
C
cnn 已提交
274 275
      in_tensor->Reshape({batch_size, 3, rh, rw});
      in_tensor->CopyFromCpu(in_data_all.data());
Q
qingqing01 已提交
276
    } else if (tensor_name == "im_shape") {
C
cnn 已提交
277 278
      in_tensor->Reshape({batch_size, 2});
      in_tensor->CopyFromCpu(im_shape_all.data());
Q
qingqing01 已提交
279
    } else if (tensor_name == "scale_factor") {
C
cnn 已提交
280 281
      in_tensor->Reshape({batch_size, 2});
      in_tensor->CopyFromCpu(scale_factor_all.data());
Q
qingqing01 已提交
282 283
    }
  }
G
Guanghua Yu 已提交
284
  auto preprocess_end = std::chrono::steady_clock::now();
Q
qingqing01 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
  // Run predictor
  for (int i = 0; i < warmup; i++)
  {
    predictor_->Run();
    // Get output tensor
    auto output_names = predictor_->GetOutputNames();
    auto out_tensor = predictor_->GetOutputHandle(output_names[0]);
    std::vector<int> output_shape = out_tensor->shape();
    // Calculate output length
    int output_size = 1;
    if (output_size < 6) {
      std::cerr << "[WARNING] No object detected." << std::endl;
    }
    output_data_.resize(output_size);
    out_tensor->CopyToCpu(output_data_.data()); 
  }

C
cnn 已提交
302
  bool is_rbox = false;
G
Guanghua Yu 已提交
303
  auto inference_start = std::chrono::steady_clock::now();
Q
qingqing01 已提交
304 305 306 307 308 309 310
  for (int i = 0; i < repeats; i++)
  {
    predictor_->Run();
    // Get output tensor
    auto output_names = predictor_->GetOutputNames();
    auto out_tensor = predictor_->GetOutputHandle(output_names[0]);
    std::vector<int> output_shape = out_tensor->shape();
C
cnn 已提交
311 312
    auto out_bbox_num = predictor_->GetOutputHandle(output_names[1]);
    std::vector<int> out_bbox_num_shape = out_bbox_num->shape();
Q
qingqing01 已提交
313 314 315 316 317
    // Calculate output length
    int output_size = 1;
    for (int j = 0; j < output_shape.size(); ++j) {
      output_size *= output_shape[j];
    }
C
cnn 已提交
318
    is_rbox = output_shape[output_shape.size()-1] % 10 == 0;
Q
qingqing01 已提交
319 320 321 322 323 324

    if (output_size < 6) {
      std::cerr << "[WARNING] No object detected." << std::endl;
    }
    output_data_.resize(output_size);
    out_tensor->CopyToCpu(output_data_.data()); 
C
cnn 已提交
325 326 327 328 329 330 331

    int out_bbox_num_size = 1;
    for (int j = 0; j < out_bbox_num_shape.size(); ++j) {
      out_bbox_num_size *= out_bbox_num_shape[j];
    }
    out_bbox_num_data_.resize(out_bbox_num_size);
    out_bbox_num->CopyToCpu(out_bbox_num_data_.data());
Q
qingqing01 已提交
332
  }
G
Guanghua Yu 已提交
333 334
  auto inference_end = std::chrono::steady_clock::now();
  auto postprocess_start = std::chrono::steady_clock::now();
Q
qingqing01 已提交
335
  // Postprocessing result
C
cnn 已提交
336 337 338 339 340 341
  Postprocess(imgs, result, out_bbox_num_data_, is_rbox);
  bbox_num->clear();
  for (int k=0; k<out_bbox_num_data_.size(); k++) {
    int tmp = out_bbox_num_data_[k];
    bbox_num->push_back(tmp);
  }
G
Guanghua Yu 已提交
342 343 344 345 346 347 348 349
  auto postprocess_end = std::chrono::steady_clock::now();

  std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
  times->push_back(double(preprocess_diff.count() * 1000));
  std::chrono::duration<float> inference_diff = inference_end - inference_start;
  times->push_back(double(inference_diff.count() / repeats * 1000));
  std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
  times->push_back(double(postprocess_diff.count() * 1000));
C
cnn 已提交
350
  
Q
qingqing01 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
}

std::vector<int> GenerateColorMap(int num_class) {
  auto colormap = std::vector<int>(3 * num_class, 0);
  for (int i = 0; i < num_class; ++i) {
    int j = 0;
    int lab = i;
    while (lab) {
      colormap[i * 3] |= (((lab >> 0) & 1) << (7 - j));
      colormap[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j));
      colormap[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j));
      ++j;
      lab >>= 3;
    }
  }
  return colormap;
}

}  // namespace PaddleDetection