paddlex.h 2.5 KB
Newer Older
C
Channingss 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <functional>
#include <iostream>
#include <numeric>

#include "yaml-cpp/yaml.h"

#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif

#include "paddle_inference_api.h"  // NOLINT

C
Channingss 已提交
31 32 33
#include "config_parser.h"
#include "results.h"
#include "transforms.h"
C
Channingss 已提交
34

C
Channingss 已提交
35 36 37 38 39
#ifdef WITH_ENCRYPTION
#include "paddle_model_decrypt.h"
#include "model_code.h"
#endif

C
Channingss 已提交
40 41 42 43 44 45
namespace PaddleX {

class Model {
 public:
  void Init(const std::string& model_dir,
            bool use_gpu = false,
C
Channingss 已提交
46
            bool use_trt = false,
C
Channingss 已提交
47
            int gpu_id = 0,
J
jack 已提交
48 49 50
            std::string key = "",
	    int batch_size = 1) {
    create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
C
Channingss 已提交
51 52 53 54
  }

  void create_predictor(const std::string& model_dir,
                        bool use_gpu = false,
C
Channingss 已提交
55
                        bool use_trt = false,
C
Channingss 已提交
56
                        int gpu_id = 0,
J
jack 已提交
57 58
                        std::string key = "",
			int batch_size = 1);
C
Channingss 已提交
59 60 61 62

  bool load_config(const std::string& model_dir);

  bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
J
jack 已提交
63 64
  
  bool preprocess(const std::vector<cv::Mat> &input_im_batch, std::vector<ImageBlob> &blob_batch);
C
Channingss 已提交
65 66 67

  bool predict(const cv::Mat& im, ClsResult* result);

J
jack 已提交
68 69
  bool predict(const std::vector<cv::Mat> &im_batch, std::vector<ClsResult> &results);

C
Channingss 已提交
70 71
  bool predict(const cv::Mat& im, DetResult* result);

J
jack 已提交
72 73
  bool predict(const std::vector<cv::Mat> &im_batch, std::vector<DetResult> &result);
  
C
Channingss 已提交
74 75
  bool predict(const cv::Mat& im, SegResult* result);

J
jack 已提交
76 77
  bool predict(const std::vector<cv::Mat> &im_batch, std::vector<SegResult> &result);
  
C
Channingss 已提交
78 79 80 81 82
  std::string type;
  std::string name;
  std::map<int, std::string> labels;
  Transforms transforms_;
  ImageBlob inputs_;
J
jack 已提交
83
  std::vector<ImageBlob> inputs_batch_;
C
Channingss 已提交
84 85 86 87
  std::vector<float> outputs_;
  std::unique_ptr<paddle::PaddlePredictor> predictor_;
};
}  // namespce of PaddleX