preprocess_op.h 4.7 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <glog/logging.h>
#include <yaml-cpp/yaml.h>

#include <vector>
#include <string>
#include <utility>
#include <memory>
#include <unordered_map>
C
cnn 已提交
25
#include <iostream>
Q
qingqing01 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

namespace PaddleDetection {

// Object for storing all preprocessed data
class ImageBlob {
 public:
  // image width and height
  std::vector<float> im_shape_;
  // Buffer for image data after preprocessing
  std::vector<float> im_data_;
40
  // in net data shape(after pad)
41
  std::vector<float> in_net_shape_;
Q
qingqing01 已提交
42 43 44 45 46 47 48 49 50
  // Evaluation image width and height
  //std::vector<float>  eval_im_size_f_;
  // Scale factor for image size to origin image size
  std::vector<float> scale_factor_;
};

// Abstraction of preprocessing opration class
class PreprocessOp {
 public:
51
  virtual void Init(const YAML::Node& item) = 0;
Q
qingqing01 已提交
52 53 54 55 56
  virtual void Run(cv::Mat* im, ImageBlob* data) = 0;
};

class InitInfo : public PreprocessOp{
 public:
57
  virtual void Init(const YAML::Node& item) {}
Q
qingqing01 已提交
58 59 60
  virtual void Run(cv::Mat* im, ImageBlob* data);
};

61
class NormalizeImage : public PreprocessOp {
Q
qingqing01 已提交
62
 public:
63
  virtual void Init(const YAML::Node& item) {
Q
qingqing01 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
    mean_ = item["mean"].as<std::vector<float>>();
    scale_ = item["std"].as<std::vector<float>>();
    is_scale_ = item["is_scale"].as<bool>();
  }

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  // CHW or HWC
  std::vector<float> mean_;
  std::vector<float> scale_;
  bool is_scale_;
};

class Permute : public PreprocessOp {
 public:
80
  virtual void Init(const YAML::Node& item) {}
Q
qingqing01 已提交
81 82 83 84 85 86
  virtual void Run(cv::Mat* im, ImageBlob* data);

};

class Resize : public PreprocessOp {
 public:
87
  virtual void Init(const YAML::Node& item) {
Q
qingqing01 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101
    interp_ = item["interp"].as<int>();
    keep_ratio_ = item["keep_ratio"].as<bool>();
    target_size_ = item["target_size"].as<std::vector<int>>();
 }

  // Compute best resize scale for x-dimension, y-dimension
  std::pair<float, float> GenerateScale(const cv::Mat& im);

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  int interp_;
  bool keep_ratio_;
  std::vector<int> target_size_;
102
  std::vector<int> in_net_shape_;
Q
qingqing01 已提交
103 104 105 106 107
};

// Models with FPN need input shape % stride == 0
class PadStride : public PreprocessOp {
 public:
108
  virtual void Init(const YAML::Node& item) {
Q
qingqing01 已提交
109 110 111 112 113 114 115 116 117
    stride_ = item["stride"].as<int>();
  }

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  int stride_;
};

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
class TopDownEvalAffine : public PreprocessOp {
 public:
  virtual void Init(const YAML::Node& item) {
    trainsize_ = item["trainsize"].as<std::vector<int>>();
 }

  virtual void Run(cv::Mat* im, ImageBlob* data);

 private:
  int interp_ = 1;
  std::vector<int> trainsize_;
};

void CropImg(cv::Mat &img, cv::Mat &crop_img, std::vector<int> &area, std::vector<float> &center, std::vector<float> &scale, float expandratio=0.15);

Q
qingqing01 已提交
133 134
class Preprocessor {
 public:
135
  void Init(const YAML::Node& config_node) {
Q
qingqing01 已提交
136 137 138 139 140 141
    // initialize image info at first
    ops_["InitInfo"] = std::make_shared<InitInfo>();
    for (const auto& item : config_node) {
      auto op_name = item["type"].as<std::string>();

      ops_[op_name] = CreateOp(op_name);
142
      ops_[op_name]->Init(item);
Q
qingqing01 已提交
143 144 145 146
    }
  }

  std::shared_ptr<PreprocessOp> CreateOp(const std::string& name) {
147
    if (name == "Resize") {
Q
qingqing01 已提交
148
      return std::make_shared<Resize>();
149
    } else if (name == "Permute") {
Q
qingqing01 已提交
150
      return std::make_shared<Permute>();
151 152 153 154
    } else if (name == "NormalizeImage") {
      return std::make_shared<NormalizeImage>();
    } else if (name == "PadStride") {
      // use PadStride instead of PadBatch
Q
qingqing01 已提交
155
      return std::make_shared<PadStride>();
156 157
    } else if (name == "TopDownEvalAffine") {
      return std::make_shared<TopDownEvalAffine>();
Q
qingqing01 已提交
158
    }
C
cnn 已提交
159
    std::cerr << "can not find function of OP: " << name << " and return: nullptr" << std::endl;
Q
qingqing01 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173
    return nullptr;
  }

  void Run(cv::Mat* im, ImageBlob* data);

 public:
  static const std::vector<std::string> RUN_ORDER;

 private:
  std::unordered_map<std::string, std::shared_ptr<PreprocessOp>> ops_;
};

}  // namespace PaddleDetection