paddlex.h 8.4 KB
Newer Older
C
Channingss 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <functional>
#include <iostream>
J
jack 已提交
19 20
#include <map>
#include <memory>
C
Channingss 已提交
21
#include <numeric>
J
jack 已提交
22 23
#include <string>
#include <vector>
C
Channingss 已提交
24 25 26 27 28 29 30 31 32 33
#include "yaml-cpp/yaml.h"

#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif

#include "paddle_inference_api.h"  // NOLINT

J
jack 已提交
34 35 36
#include "config_parser.h"  // NOLINT
#include "results.h"  // NOLINT
#include "transforms.h"  // NOLINT
C
Channingss 已提交
37

C
Channingss 已提交
38
#ifdef WITH_ENCRYPTION
J
jack 已提交
39 40
#include "paddle_model_decrypt.h"  // NOLINT
#include "model_code.h"  // NOLINT
C
Channingss 已提交
41 42
#endif

C
Channingss 已提交
43 44
namespace PaddleX {

J
jack 已提交
45 46
/*
 * @brief
J
jack 已提交
47 48
 * This class encapsulates all necessary proccess steps of model infering, which
 * include image matrix preprocessing, model predicting and results postprocessing.
J
jack 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
 * The entire process of model infering can be simplified as below:
 * 1. preprocess image matrix (resize, padding, ......)
 * 2. model infer
 * 3. postprocess the results which generated from model infering
 *
 * @example
 *  PaddleX::Model cls_model;
 *  // initialize model configuration
 *  cls_model.Init(cls_model_dir, use_gpu, use_trt, gpu_id, encryption_key);
 *  // define a Classification result object
 *  PaddleX::ClsResult cls_result;
 *  // get image matrix from image file
 *  cv::Mat im = cv::imread(image_file_path, 1);
 *  cls_model.predict(im, &cls_result);
 * */
C
Channingss 已提交
64 65
class Model {
 public:
J
jack 已提交
66 67 68
  /*
   * @brief
   * This method aims to initialize the model configuration
J
jack 已提交
69
   *
J
jack 已提交
70 71 72
   * @param model_dir: the directory which contains model.yml
   * @param use_gpu: use gpu or not when infering
   * @param use_trt: use Tensor RT or not when infering
J
jack 已提交
73
   * @param gpu_id: the id of gpu when infering with using gpu
J
jack 已提交
74
   * @param key: the key of encryption when using encrypted model
J
jack 已提交
75
   * @param use_ir_optim: use ir optimization when infering
J
jack 已提交
76
   * */
C
Channingss 已提交
77 78
  void Init(const std::string& model_dir,
            bool use_gpu = false,
C
Channingss 已提交
79
            bool use_trt = false,
S
syyxsxx 已提交
80
            bool use_mkl = true,
C
Channingss 已提交
81
            int gpu_id = 0,
J
jack 已提交
82
            std::string key = "",
S
syyxsxx 已提交
83
            int mkl_thread_num = 4,
J
jack 已提交
84
            bool use_ir_optim = true) {
S
syyxsxx 已提交
85 86 87 88 89 90 91
    create_predictor(
                     model_dir,
                     use_gpu,
                     use_trt,
                     use_mkl,
                     gpu_id,
                     key,
S
syyxsxx 已提交
92
                     mkl_thread_num,
S
syyxsxx 已提交
93
                     use_ir_optim);
C
Channingss 已提交
94 95 96 97
  }

  void create_predictor(const std::string& model_dir,
                        bool use_gpu = false,
C
Channingss 已提交
98
                        bool use_trt = false,
S
syyxsxx 已提交
99
                        bool use_mkl = true,
C
Channingss 已提交
100
                        int gpu_id = 0,
J
jack 已提交
101
                        std::string key = "",
S
syyxsxx 已提交
102
                        int mkl_thread_num = 4,
J
jack 已提交
103
                        bool use_ir_optim = true);
J
jack 已提交
104

J
jack 已提交
105
  /*
J
jack 已提交
106 107
   * @brief
   * This method aims to load model configurations which include
J
jack 已提交
108 109
   * transform steps and label list
   *
J
jack 已提交
110
   * @param yaml_input:  model configuration string
J
jack 已提交
111 112
   * @return true if load configuration successfully
   * */
J
jack 已提交
113
  bool load_config(const std::string& yaml_input);
C
Channingss 已提交
114

J
jack 已提交
115 116 117 118 119 120 121 122 123
  /*
   * @brief
   * This method aims to transform single image matrix, the result will be
   * returned at second parameter.
   *
   * @param input_im: single image matrix to be transformed
   * @param blob: the raw data of single image matrix after transformed
   * @return true if preprocess image matrix successfully
   * */
C
Channingss 已提交
124
  bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
J
jack 已提交
125

J
jack 已提交
126 127 128 129 130 131 132
  /*
   * @brief
   * This method aims to transform mutiple image matrixs, the result will be
   * returned at second parameter.
   *
   * @param input_im_batch: a batch of image matrixs to be transformed
   * @param blob_blob: raw data of a batch of image matrixs after transformed
J
jack 已提交
133
   * @param thread_num: the number of preprocessing threads,
J
jack 已提交
134 135 136
   *                    each thread run preprocess on single image matrix
   * @return true if preprocess a batch of image matrixs successfully
   * */
J
jack 已提交
137 138 139
  bool preprocess(const std::vector<cv::Mat> &input_im_batch,
                  std::vector<ImageBlob> *blob_batch,
                  int thread_num = 1);
C
Channingss 已提交
140

J
jack 已提交
141 142
  /*
   * @brief
J
jack 已提交
143
   * This method aims to execute classification model prediction on single image matrix,
J
jack 已提交
144 145 146 147 148 149
   * the result will be returned at second parameter.
   *
   * @param im: single image matrix to be predicted
   * @param result: classification prediction result data after postprocessed
   * @return true if predict successfully
   * */
C
Channingss 已提交
150 151
  bool predict(const cv::Mat& im, ClsResult* result);

J
jack 已提交
152 153
  /*
   * @brief
J
jack 已提交
154
   * This method aims to execute classification model prediction on a batch of image matrixs,
J
jack 已提交
155 156 157 158 159 160 161 162
   * the result will be returned at second parameter.
   *
   * @param im: a batch of image matrixs to be predicted
   * @param results: a batch of classification prediction result data after postprocessed
   * @param thread_num: the number of predicting threads, each thread run prediction
   *                    on single image matrix
   * @return true if predict successfully
   * */
J
jack 已提交
163 164 165
  bool predict(const std::vector<cv::Mat> &im_batch,
               std::vector<ClsResult> *results,
               int thread_num = 1);
J
jack 已提交
166

J
jack 已提交
167 168 169 170 171 172 173 174 175
  /*
   * @brief
   * This method aims to execute detection or instance segmentation model prediction
   * on single image matrix, the result will be returned at second parameter.
   *
   * @param im: single image matrix to be predicted
   * @param result: detection or instance segmentation prediction result data after postprocessed
   * @return true if predict successfully
   * */
C
Channingss 已提交
176 177
  bool predict(const cv::Mat& im, DetResult* result);

J
jack 已提交
178 179 180 181 182 183 184 185 186 187 188
  /*
   * @brief
   * This method aims to execute detection or instance segmentation model prediction
   * on a batch of image matrixs, the result will be returned at second parameter.
   *
   * @param im: a batch of image matrix to be predicted
   * @param result: detection or instance segmentation prediction result data after postprocessed
   * @param thread_num: the number of predicting threads, each thread run prediction
   *                    on single image matrix
   * @return true if predict successfully
   * */
J
jack 已提交
189
  bool predict(const std::vector<cv::Mat> &im_batch,
190
               std::vector<DetResult> *results,
J
jack 已提交
191 192
               int thread_num = 1);

J
jack 已提交
193 194
  /*
   * @brief
J
jack 已提交
195
   * This method aims to execute segmentation model prediction on single image matrix,
J
jack 已提交
196 197 198 199 200 201
   * the result will be returned at second parameter.
   *
   * @param im: single image matrix to be predicted
   * @param result: segmentation prediction result data after postprocessed
   * @return true if predict successfully
   * */
C
Channingss 已提交
202 203
  bool predict(const cv::Mat& im, SegResult* result);

J
jack 已提交
204 205
  /*
   * @brief
J
jack 已提交
206
   * This method aims to execute segmentation model prediction on a batch of image matrix,
J
jack 已提交
207 208 209 210 211 212 213 214
   * the result will be returned at second parameter.
   *
   * @param im: a batch of image matrix to be predicted
   * @param result: segmentation prediction result data after postprocessed
   * @param thread_num: the number of predicting threads, each thread run prediction
   *                    on single image matrix
   * @return true if predict successfully
   * */
J
jack 已提交
215
  bool predict(const std::vector<cv::Mat> &im_batch,
216
               std::vector<SegResult> *results,
J
jack 已提交
217 218
               int thread_num = 1);

J
jack 已提交
219
  // model type, include 3 type: classifier, detector, segmenter
C
Channingss 已提交
220
  std::string type;
J
jack 已提交
221
  // model name, such as FasterRCNN, YOLOV3 and so on.
C
Channingss 已提交
222 223
  std::string name;
  std::map<int, std::string> labels;
J
jack 已提交
224
  // transform(preprocessing) pipeline manager
C
Channingss 已提交
225
  Transforms transforms_;
J
jack 已提交
226
  // single input preprocessed data
C
Channingss 已提交
227
  ImageBlob inputs_;
J
jack 已提交
228
  // batch input preprocessed data
J
jack 已提交
229
  std::vector<ImageBlob> inputs_batch_;
J
jack 已提交
230
  // raw data of predicting results
C
Channingss 已提交
231
  std::vector<float> outputs_;
J
jack 已提交
232
  // a predictor which run the model predicting
C
Channingss 已提交
233 234
  std::unique_ptr<paddle::PaddlePredictor> predictor_;
};
J
jack 已提交
235
}  // namespace PaddleX