preprocess_op.h 5.9 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <glog/logging.h>
#include <yaml-cpp/yaml.h>

20
#include <iostream>
Q
qingqing01 已提交
21
#include <memory>
22
#include <string>
Q
qingqing01 已提交
23
#include <unordered_map>
24 25
#include <utility>
#include <vector>
Q
qingqing01 已提交
26 27 28

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
29
#include <opencv2/imgproc/imgproc.hpp>
Q
qingqing01 已提交
30 31 32 33 34 35 36 37 38 39

namespace PaddleDetection {

// Object for storing all preprocessed data
class ImageBlob {
 public:
  // image width and height
  std::vector<float> im_shape_;
  // Buffer for image data after preprocessing
  std::vector<float> im_data_;
40
  // in net data shape(after pad)
41
  std::vector<float> in_net_shape_;
Q
qingqing01 已提交
42
  // Evaluation image width and height
43
  // std::vector<float>  eval_im_size_f_;
Q
qingqing01 已提交
44 45
  // Scale factor for image size to origin image size
  std::vector<float> scale_factor_;
46 47
  // in net image after preprocessing
  cv::Mat in_net_im_;
Q
qingqing01 已提交
48 49 50 51 52
};

// Abstraction of preprocessing opration class
class PreprocessOp {
 public:
53
  virtual void Init(const YAML::Node& item) = 0;
Q
qingqing01 已提交
54 55 56
  virtual void Run(cv::Mat* im, ImageBlob* data) = 0;
};

57
class InitInfo : public PreprocessOp {
Q
qingqing01 已提交
58
 public:
59
  virtual void Init(const YAML::Node& item) {}
Q
qingqing01 已提交
60 61 62
  virtual void Run(cv::Mat* im, ImageBlob* data);
};

63
class NormalizeImage : public PreprocessOp {
Q
qingqing01 已提交
64
 public:
65
  virtual void Init(const YAML::Node& item) {
Q
qingqing01 已提交
66 67 68 69 70 71 72 73 74 75 76
    mean_ = item["mean"].as<std::vector<float>>();
    scale_ = item["std"].as<std::vector<float>>();
    is_scale_ = item["is_scale"].as<bool>();
  }

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  // CHW or HWC
  std::vector<float> mean_;
  std::vector<float> scale_;
W
wangguanzhong 已提交
77
  bool is_scale_ = true;
Q
qingqing01 已提交
78 79 80 81
};

class Permute : public PreprocessOp {
 public:
82
  virtual void Init(const YAML::Node& item) {}
Q
qingqing01 已提交
83 84 85 86 87
  virtual void Run(cv::Mat* im, ImageBlob* data);
};

class Resize : public PreprocessOp {
 public:
88
  virtual void Init(const YAML::Node& item) {
Q
qingqing01 已提交
89 90 91
    interp_ = item["interp"].as<int>();
    keep_ratio_ = item["keep_ratio"].as<bool>();
    target_size_ = item["target_size"].as<std::vector<int>>();
92
  }
Q
qingqing01 已提交
93 94 95 96 97 98 99 100 101 102

  // Compute best resize scale for x-dimension, y-dimension
  std::pair<float, float> GenerateScale(const cv::Mat& im);

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  int interp_;
  bool keep_ratio_;
  std::vector<int> target_size_;
103
  std::vector<int> in_net_shape_;
Q
qingqing01 已提交
104 105
};

106 107 108 109
class LetterBoxResize : public PreprocessOp {
 public:
  virtual void Init(const YAML::Node& item) {
    target_size_ = item["target_size"].as<std::vector<int>>();
110
  }
111 112 113 114 115 116 117 118 119

  float GenerateScale(const cv::Mat& im);

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  std::vector<int> target_size_;
  std::vector<int> in_net_shape_;
};
Q
qingqing01 已提交
120 121 122
// Models with FPN need input shape % stride == 0
class PadStride : public PreprocessOp {
 public:
123
  virtual void Init(const YAML::Node& item) {
Q
qingqing01 已提交
124 125 126 127 128 129 130 131 132
    stride_ = item["stride"].as<int>();
  }

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  int stride_;
};

133 134 135 136
class TopDownEvalAffine : public PreprocessOp {
 public:
  virtual void Init(const YAML::Node& item) {
    trainsize_ = item["trainsize"].as<std::vector<int>>();
137
  }
138 139 140 141 142 143 144 145

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  int interp_ = 1;
  std::vector<int> trainsize_;
};

W
wangguanzhong 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
class WarpAffine : public PreprocessOp {
 public:
  virtual void Init(const YAML::Node& item) {
    input_h_ = item["input_h"].as<int>();
    input_w_ = item["input_w"].as<int>();
    keep_res_ = item["keep_res"].as<bool>();
  }

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  int input_h_;
  int input_w_;
  int interp_ = 1;
  bool keep_res_ = true;
  int pad_ = 31;
};

164 165 166 167 168 169 170 171 172 173 174 175
void CropImg(cv::Mat& img,
             cv::Mat& crop_img,
             std::vector<int>& area,
             std::vector<float>& center,
             std::vector<float>& scale,
             float expandratio = 0.15);

// check whether the input size is dynamic
bool CheckDynamicInput(const std::vector<cv::Mat>& imgs);

// Pad images in batch
std::vector<cv::Mat> PadBatch(const std::vector<cv::Mat>& imgs);
176

Q
qingqing01 已提交
177 178
class Preprocessor {
 public:
179
  void Init(const YAML::Node& config_node) {
Q
qingqing01 已提交
180 181 182 183 184 185
    // initialize image info at first
    ops_["InitInfo"] = std::make_shared<InitInfo>();
    for (const auto& item : config_node) {
      auto op_name = item["type"].as<std::string>();

      ops_[op_name] = CreateOp(op_name);
186
      ops_[op_name]->Init(item);
Q
qingqing01 已提交
187 188 189 190
    }
  }

  std::shared_ptr<PreprocessOp> CreateOp(const std::string& name) {
191
    if (name == "Resize") {
Q
qingqing01 已提交
192
      return std::make_shared<Resize>();
193 194
    } else if (name == "LetterBoxResize") {
      return std::make_shared<LetterBoxResize>();
195
    } else if (name == "Permute") {
Q
qingqing01 已提交
196
      return std::make_shared<Permute>();
197 198 199 200
    } else if (name == "NormalizeImage") {
      return std::make_shared<NormalizeImage>();
    } else if (name == "PadStride") {
      // use PadStride instead of PadBatch
Q
qingqing01 已提交
201
      return std::make_shared<PadStride>();
202 203
    } else if (name == "TopDownEvalAffine") {
      return std::make_shared<TopDownEvalAffine>();
W
wangguanzhong 已提交
204 205
    } else if (name == "WarpAffine") {
      return std::make_shared<WarpAffine>();
Q
qingqing01 已提交
206
    }
207 208
    std::cerr << "can not find function of OP: " << name
              << " and return: nullptr" << std::endl;
Q
qingqing01 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221
    return nullptr;
  }

  void Run(cv::Mat* im, ImageBlob* data);

 public:
  static const std::vector<std::string> RUN_ORDER;

 private:
  std::unordered_map<std::string, std::shared_ptr<PreprocessOp>> ops_;
};

}  // namespace PaddleDetection