paddlex.h 8.6 KB
Newer Older
C
Channingss 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <functional>
#include <iostream>
J
jack 已提交
19 20
#include <map>
#include <memory>
C
Channingss 已提交
21
#include <numeric>
J
jack 已提交
22 23
#include <string>
#include <vector>
C
Channingss 已提交
24 25 26 27 28 29 30 31 32 33
#include "yaml-cpp/yaml.h"

#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif

#include "paddle_inference_api.h"  // NOLINT

J
jack 已提交
34 35 36
#include "config_parser.h"  // NOLINT
#include "results.h"  // NOLINT
#include "transforms.h"  // NOLINT
C
Channingss 已提交
37

C
Channingss 已提交
38
#ifdef WITH_ENCRYPTION
J
jack 已提交
39 40
#include "paddle_model_decrypt.h"  // NOLINT
#include "model_code.h"  // NOLINT
C
Channingss 已提交
41 42
#endif

C
Channingss 已提交
43 44
namespace PaddleX {

J
jack 已提交
45 46
/*
 * @brief
J
jack 已提交
47 48
 * This class encapsulates all necessary proccess steps of model infering, which
 * include image matrix preprocessing, model predicting and results postprocessing.
J
jack 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
 * The entire process of model infering can be simplified as below:
 * 1. preprocess image matrix (resize, padding, ......)
 * 2. model infer
 * 3. postprocess the results which generated from model infering
 *
 * @example
 *  PaddleX::Model cls_model;
 *  // initialize model configuration
 *  cls_model.Init(cls_model_dir, use_gpu, use_trt, gpu_id, encryption_key);
 *  // define a Classification result object
 *  PaddleX::ClsResult cls_result;
 *  // get image matrix from image file
 *  cv::Mat im = cv::imread(image_file_path, 1);
 *  cls_model.predict(im, &cls_result);
 * */
C
Channingss 已提交
64 65
class Model {
 public:
J
jack 已提交
66 67 68
  /*
   * @brief
   * This method aims to initialize the model configuration
J
jack 已提交
69
   *
J
jack 已提交
70 71 72
   * @param model_dir: the directory which contains model.yml
   * @param use_gpu: use gpu or not when infering
   * @param use_trt: use Tensor RT or not when infering
S
syyxsxx 已提交
73 74
   * @param use_trt: use mkl or not when infering
   * @param mkl_thread_num: the threads of mkl when infering
J
jack 已提交
75
   * @param gpu_id: the id of gpu when infering with using gpu
J
jack 已提交
76
   * @param key: the key of encryption when using encrypted model
J
jack 已提交
77
   * @param use_ir_optim: use ir optimization when infering
J
jack 已提交
78
   * */
C
Channingss 已提交
79 80
  void Init(const std::string& model_dir,
            bool use_gpu = false,
C
Channingss 已提交
81
            bool use_trt = false,
S
syyxsxx 已提交
82
            bool use_mkl = true,
S
syyxsxx 已提交
83
            int mkl_thread_num = 4,
C
Channingss 已提交
84
            int gpu_id = 0,
J
jack 已提交
85 86
            std::string key = "",
            bool use_ir_optim = true) {
S
syyxsxx 已提交
87 88 89 90 91
    create_predictor(
                     model_dir,
                     use_gpu,
                     use_trt,
                     use_mkl,
S
syyxsxx 已提交
92
                     mkl_thread_num,
S
syyxsxx 已提交
93 94 95
                     gpu_id,
                     key,
                     use_ir_optim);
C
Channingss 已提交
96 97 98
  }
  void create_predictor(const std::string& model_dir,
                        bool use_gpu = false,
C
Channingss 已提交
99
                        bool use_trt = false,
S
syyxsxx 已提交
100
                        bool use_mkl = true,
S
syyxsxx 已提交
101
                        int mkl_thread_num = 4,
C
Channingss 已提交
102
                        int gpu_id = 0,
J
jack 已提交
103 104
                        std::string key = "",
                        bool use_ir_optim = true);
J
jack 已提交
105

J
jack 已提交
106
  /*
J
jack 已提交
107 108
   * @brief
   * This method aims to load model configurations which include
J
jack 已提交
109 110
   * transform steps and label list
   *
J
jack 已提交
111
   * @param yaml_input:  model configuration string
J
jack 已提交
112 113
   * @return true if load configuration successfully
   * */
J
jack 已提交
114
  bool load_config(const std::string& yaml_input);
C
Channingss 已提交
115

J
jack 已提交
116 117 118 119 120 121 122 123 124
  /*
   * @brief
   * This method aims to transform single image matrix, the result will be
   * returned at second parameter.
   *
   * @param input_im: single image matrix to be transformed
   * @param blob: the raw data of single image matrix after transformed
   * @return true if preprocess image matrix successfully
   * */
C
Channingss 已提交
125
  bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
J
jack 已提交
126

J
jack 已提交
127 128 129 130 131 132 133
  /*
   * @brief
   * This method aims to transform mutiple image matrixs, the result will be
   * returned at second parameter.
   *
   * @param input_im_batch: a batch of image matrixs to be transformed
   * @param blob_blob: raw data of a batch of image matrixs after transformed
J
jack 已提交
134
   * @param thread_num: the number of preprocessing threads,
J
jack 已提交
135 136 137
   *                    each thread run preprocess on single image matrix
   * @return true if preprocess a batch of image matrixs successfully
   * */
J
jack 已提交
138 139 140
  bool preprocess(const std::vector<cv::Mat> &input_im_batch,
                  std::vector<ImageBlob> *blob_batch,
                  int thread_num = 1);
C
Channingss 已提交
141

J
jack 已提交
142 143
  /*
   * @brief
J
jack 已提交
144
   * This method aims to execute classification model prediction on single image matrix,
J
jack 已提交
145 146 147 148 149 150
   * the result will be returned at second parameter.
   *
   * @param im: single image matrix to be predicted
   * @param result: classification prediction result data after postprocessed
   * @return true if predict successfully
   * */
C
Channingss 已提交
151 152
  bool predict(const cv::Mat& im, ClsResult* result);

J
jack 已提交
153 154
  /*
   * @brief
J
jack 已提交
155
   * This method aims to execute classification model prediction on a batch of image matrixs,
J
jack 已提交
156 157 158 159 160 161 162 163
   * the result will be returned at second parameter.
   *
   * @param im: a batch of image matrixs to be predicted
   * @param results: a batch of classification prediction result data after postprocessed
   * @param thread_num: the number of predicting threads, each thread run prediction
   *                    on single image matrix
   * @return true if predict successfully
   * */
J
jack 已提交
164 165 166
  bool predict(const std::vector<cv::Mat> &im_batch,
               std::vector<ClsResult> *results,
               int thread_num = 1);
J
jack 已提交
167

J
jack 已提交
168 169 170 171 172 173 174 175 176
  /*
   * @brief
   * This method aims to execute detection or instance segmentation model prediction
   * on single image matrix, the result will be returned at second parameter.
   *
   * @param im: single image matrix to be predicted
   * @param result: detection or instance segmentation prediction result data after postprocessed
   * @return true if predict successfully
   * */
C
Channingss 已提交
177 178
  bool predict(const cv::Mat& im, DetResult* result);

J
jack 已提交
179 180 181 182 183 184 185 186 187 188 189
  /*
   * @brief
   * This method aims to execute detection or instance segmentation model prediction
   * on a batch of image matrixs, the result will be returned at second parameter.
   *
   * @param im: a batch of image matrix to be predicted
   * @param result: detection or instance segmentation prediction result data after postprocessed
   * @param thread_num: the number of predicting threads, each thread run prediction
   *                    on single image matrix
   * @return true if predict successfully
   * */
J
jack 已提交
190
  bool predict(const std::vector<cv::Mat> &im_batch,
191
               std::vector<DetResult> *results,
J
jack 已提交
192 193
               int thread_num = 1);

J
jack 已提交
194 195
  /*
   * @brief
J
jack 已提交
196
   * This method aims to execute segmentation model prediction on single image matrix,
J
jack 已提交
197 198 199 200 201 202
   * the result will be returned at second parameter.
   *
   * @param im: single image matrix to be predicted
   * @param result: segmentation prediction result data after postprocessed
   * @return true if predict successfully
   * */
C
Channingss 已提交
203 204
  bool predict(const cv::Mat& im, SegResult* result);

J
jack 已提交
205 206
  /*
   * @brief
J
jack 已提交
207
   * This method aims to execute segmentation model prediction on a batch of image matrix,
J
jack 已提交
208 209 210 211 212 213 214 215
   * the result will be returned at second parameter.
   *
   * @param im: a batch of image matrix to be predicted
   * @param result: segmentation prediction result data after postprocessed
   * @param thread_num: the number of predicting threads, each thread run prediction
   *                    on single image matrix
   * @return true if predict successfully
   * */
J
jack 已提交
216
  bool predict(const std::vector<cv::Mat> &im_batch,
217
               std::vector<SegResult> *results,
J
jack 已提交
218 219
               int thread_num = 1);

J
jack 已提交
220
  // model type, include 3 type: classifier, detector, segmenter
C
Channingss 已提交
221
  std::string type;
J
jack 已提交
222
  // model name, such as FasterRCNN, YOLOV3 and so on.
C
Channingss 已提交
223 224
  std::string name;
  std::map<int, std::string> labels;
J
jack 已提交
225
  // transform(preprocessing) pipeline manager
C
Channingss 已提交
226
  Transforms transforms_;
J
jack 已提交
227
  // single input preprocessed data
C
Channingss 已提交
228
  ImageBlob inputs_;
J
jack 已提交
229
  // batch input preprocessed data
J
jack 已提交
230
  std::vector<ImageBlob> inputs_batch_;
J
jack 已提交
231
  // raw data of predicting results
C
Channingss 已提交
232
  std::vector<float> outputs_;
J
jack 已提交
233
  // a predictor which run the model predicting
C
Channingss 已提交
234 235
  std::unique_ptr<paddle::PaddlePredictor> predictor_;
};
J
jack 已提交
236
}  // namespace PaddleX