paddle_engine.h 9.5 KB
Newer Older
Z
update  
zhangjun 已提交
1
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Z
zhangjun 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

H
HexToString 已提交
17
#include <dirent.h>
Z
zhangjun 已提交
18 19 20
#include <pthread.h>
#include <fstream>
#include <map>
Z
zhangjun 已提交
21
#include <memory>
Z
zhangjun 已提交
22
#include <string>
23
#include <utility>
Z
zhangjun 已提交
24 25 26
#include <vector>
#include "core/configure/include/configure_parser.h"
#include "core/configure/inferencer_configure.pb.h"
Z
zhangjun 已提交
27
#include "core/predictor/common/utils.h"
Z
zhangjun 已提交
28
#include "core/predictor/framework/infer.h"
Z
zhangjun 已提交
29 30 31 32 33 34 35
#include "paddle_inference_api.h"  // NOLINT

namespace baidu {
namespace paddle_serving {
namespace inference {

using paddle_infer::Config;
Z
zhangjun 已提交
36
using paddle_infer::PrecisionType;
Z
zhangjun 已提交
37 38 39 40
using paddle_infer::Predictor;
using paddle_infer::Tensor;
using paddle_infer::CreatePredictor;

Z
zhangjun 已提交
41
DECLARE_int32(gpuid);
Z
fix  
zhangjun 已提交
42 43
DECLARE_string(precision);
DECLARE_bool(use_calib);
Z
zhangjun 已提交
44

Z
zhangjun 已提交
45 46
static const int max_batch = 32;
static const int min_subgraph_size = 3;
Z
fix  
zhangjun 已提交
47 48
static PrecisionType precision_type;

Z
update  
zhangjun 已提交
49 50 51
std::shared_ptr<std::vector<paddle::PaddleTensor>> PrepareWarmupData() {
  auto warmup_data = std::make_shared<std::vector<paddle::PaddleTensor>>(1);
  paddle::PaddleTensor images;
Z
update  
zhangjun 已提交
52 53
  images.name = "image";
  images.shape = {2, 3, 300, 300};
Z
update  
zhangjun 已提交
54
  images.dtype = paddle::PaddleDType::FLOAT32;
Z
update  
zhangjun 已提交
55 56 57 58 59 60
  images.data.Resize(sizeof(float) * 2 * 3 * 300 * 300);

  (*warmup_data)[0] = std::move(images);
  return warmup_data;
}

Z
fix  
zhangjun 已提交
61 62 63 64 65 66 67 68 69 70 71
PrecisionType GetPrecision(const std::string& precision_data) {
  std::string precision_type = predictor::ToLower(precision_data);
  if (precision_type == "fp32") {
    return PrecisionType::kFloat32;
  } else if (precision_type == "int8") {
    return PrecisionType::kInt8;
  } else if (precision_type == "fp16") {
    return PrecisionType::kHalf;
  }
  return PrecisionType::kFloat32;
}
Z
zhangjun 已提交
72

H
HexToString 已提交
73
const std::string getFileBySuffix(
H
HexToString 已提交
74 75 76 77 78 79 80 81 82 83
    const std::string& path, const std::vector<std::string>& suffixVector) {
  DIR* dp = nullptr;
  std::string fileName = "";
  struct dirent* dirp = nullptr;
  if ((dp = opendir(path.c_str())) == nullptr) {
    return fileName;
  }
  while ((dirp = readdir(dp)) != nullptr) {
    if (dirp->d_type == DT_REG) {
      for (int idx = 0; idx < suffixVector.size(); ++idx) {
H
HexToString 已提交
84 85
        if (std::string(dirp->d_name).find(suffixVector[idx]) !=
            std::string::npos) {
H
HexToString 已提交
86 87 88 89 90 91 92 93 94 95 96
          fileName = static_cast<std::string>(dirp->d_name);
          break;
        }
      }
    }
    if (fileName.length() != 0) break;
  }
  closedir(dp);
  return fileName;
}

T
TeslaZhao 已提交
97 98 99
// Engine Core is the base class of inference engines, which can be derived from
// paddle Inference Engine, or inference engines of other machine learning
// platforms
H
HexToString 已提交
100
class EngineCore {
Z
zhangjun 已提交
101
 public:
H
HexToString 已提交
102
  virtual ~EngineCore() {}
Z
zhangjun 已提交
103
  virtual std::vector<std::string> GetInputNames() {
Z
zhangjun 已提交
104
    return _predictor->GetInputNames();
Z
zhangjun 已提交
105 106 107
  }

  virtual std::unique_ptr<Tensor> GetInputHandle(const std::string& name) {
Z
zhangjun 已提交
108
    return _predictor->GetInputHandle(name);
Z
zhangjun 已提交
109 110 111
  }

  virtual std::vector<std::string> GetOutputNames() {
Z
zhangjun 已提交
112
    return _predictor->GetOutputNames();
Z
zhangjun 已提交
113 114 115
  }

  virtual std::unique_ptr<Tensor> GetOutputHandle(const std::string& name) {
Z
zhangjun 已提交
116
    return _predictor->GetOutputHandle(name);
Z
zhangjun 已提交
117 118 119
  }

  virtual bool Run() {
Z
zhangjun 已提交
120
    if (!_predictor->Run()) {
Z
zhangjun 已提交
121 122 123 124 125 126
      LOG(ERROR) << "Failed call Run with paddle predictor";
      return false;
    }
    return true;
  }

127
  virtual int create(const configure::EngineDesc& conf, int gpu_id) = 0;
Z
zhangjun 已提交
128

Z
update  
zhangjun 已提交
129 130
  virtual int clone(void* predictor) {
    if (predictor == NULL) {
Z
zhangjun 已提交
131 132 133
      LOG(ERROR) << "origin paddle Predictor is null.";
      return -1;
    }
Z
zhangjun 已提交
134 135
    Predictor* prep = static_cast<Predictor*>(predictor);
    _predictor = prep->Clone();
Z
update  
zhangjun 已提交
136 137
    if (_predictor.get() == NULL) {
      LOG(ERROR) << "fail to clone paddle predictor: " << predictor;
Z
zhangjun 已提交
138 139 140 141 142
      return -1;
    }
    return 0;
  }

Z
update  
zhangjun 已提交
143
  virtual void* get() { return _predictor.get(); }
Z
zhangjun 已提交
144 145

 protected:
T
TeslaZhao 已提交
146 147 148 149 150
  // _predictor is a prediction instance of Paddle Inference.
  // when inferring on the CPU, _predictor is bound to a model.
  // when inferring on the GPU, _predictor is bound to a model and a GPU card.
  // Therefore, when using GPU multi-card inference, you need to create multiple
  // EngineCore.
Z
update  
zhangjun 已提交
151
  std::shared_ptr<Predictor> _predictor;
Z
zhangjun 已提交
152 153
};

Z
update  
zhangjun 已提交
154
// Paddle Inference Engine
H
HexToString 已提交
155
class PaddleInferenceEngine : public EngineCore {
Z
zhangjun 已提交
156
 public:
157
  int create(const configure::EngineDesc& engine_conf, int gpu_id) {
Z
update  
zhangjun 已提交
158 159
    std::string model_path = engine_conf.model_dir();
    if (access(model_path.c_str(), F_OK) == -1) {
Z
zhangjun 已提交
160
      LOG(ERROR) << "create paddle predictor failed, path not exits: "
Z
update  
zhangjun 已提交
161
                 << model_path;
Z
zhangjun 已提交
162 163 164 165
      return -1;
    }

    Config config;
H
HexToString 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178
    std::vector<std::string> suffixParaVector = {".pdiparams", "__params__"};
    std::vector<std::string> suffixModelVector = {".pdmodel", "__model__"};
    std::string paraFileName = getFileBySuffix(model_path, suffixParaVector);
    std::string modelFileName = getFileBySuffix(model_path, suffixModelVector);

    std::string encryParaPath = model_path + "/encrypt_model";
    std::string encryModelPath = model_path + "/encrypt_params";
    std::string encryKeyPath = model_path + "/key";

    // encrypt model
    if (access(encryParaPath.c_str(), F_OK) != -1 &&
        access(encryModelPath.c_str(), F_OK) != -1 &&
        access(encryKeyPath.c_str(), F_OK) != -1) {
Z
zhangjun 已提交
179
      // decrypt model
H
HexToString 已提交
180

Z
zhangjun 已提交
181
      std::string model_buffer, params_buffer, key_buffer;
H
HexToString 已提交
182 183 184
      predictor::ReadBinaryFile(model_path + "/encrypt_model", &model_buffer);
      predictor::ReadBinaryFile(model_path + "/encrypt_params", &params_buffer);
      predictor::ReadBinaryFile(model_path + "/key", &key_buffer);
Z
zhangjun 已提交
185 186 187 188 189 190 191 192 193

      auto cipher = paddle::MakeCipher("");
      std::string real_model_buffer = cipher->Decrypt(model_buffer, key_buffer);
      std::string real_params_buffer =
          cipher->Decrypt(params_buffer, key_buffer);
      config.SetModelBuffer(&real_model_buffer[0],
                            real_model_buffer.size(),
                            &real_params_buffer[0],
                            real_params_buffer.size());
H
HexToString 已提交
194 195 196
    } else if (paraFileName.length() != 0 && modelFileName.length() != 0) {
      config.SetParamsFile(model_path + "/" + paraFileName);
      config.SetProgFile(model_path + "/" + modelFileName);
Z
update  
zhangjun 已提交
197
    } else {
H
HexToString 已提交
198
      config.SetModel(model_path);
Z
zhangjun 已提交
199
    }
Z
zhangjun 已提交
200

Z
zhangjun 已提交
201
    config.SwitchSpecifyInputNames(true);
Z
update  
zhangjun 已提交
202 203 204
    config.SetCpuMathLibraryNumThreads(1);
    if (engine_conf.has_use_gpu() && engine_conf.use_gpu()) {
      // 2000MB GPU memory
205 206 207 208 209
      config.EnableUseGpu(50, gpu_id);
      if (engine_conf.has_gpu_multi_stream() &&
          engine_conf.gpu_multi_stream()) {
        config.EnableGpuMultiStream();
      }
Z
zhangjun 已提交
210
    }
Z
fix  
zhangjun 已提交
211
    precision_type = GetPrecision(FLAGS_precision);
Z
zhangjun 已提交
212

Z
update  
zhangjun 已提交
213 214 215 216 217 218 219
    if (engine_conf.has_enable_ir_optimization() &&
        !engine_conf.enable_ir_optimization()) {
      config.SwitchIrOptim(false);
    } else {
      config.SwitchIrOptim(true);
    }

Z
update  
zhangjun 已提交
220
    if (engine_conf.has_use_trt() && engine_conf.use_trt()) {
221
      config.SwitchIrOptim(true);
Z
zhangjun 已提交
222
      if (!engine_conf.has_use_gpu() || !engine_conf.use_gpu()) {
223 224 225 226 227
        config.EnableUseGpu(50, gpu_id);
        if (engine_conf.has_gpu_multi_stream() &&
            engine_conf.gpu_multi_stream()) {
          config.EnableGpuMultiStream();
        }
Z
zhangjun 已提交
228
      }
Z
update  
zhangjun 已提交
229 230 231
      config.EnableTensorRtEngine(1 << 20,
                                  max_batch,
                                  min_subgraph_size,
232
                                  precision_type,
Z
update  
zhangjun 已提交
233
                                  false,
Z
fix  
zhangjun 已提交
234
                                  FLAGS_use_calib);
Z
update  
zhangjun 已提交
235
      LOG(INFO) << "create TensorRT predictor";
Z
zhangjun 已提交
236 237
    }

Z
zhangjun 已提交
238
    if (engine_conf.has_use_lite() && engine_conf.use_lite()) {
239 240 241 242 243 244
      config.EnableLiteEngine(precision_type, true);
    }

    if ((!engine_conf.has_use_lite() && !engine_conf.has_use_gpu()) ||
        (engine_conf.has_use_lite() && !engine_conf.use_lite() &&
         engine_conf.has_use_gpu() && !engine_conf.use_gpu())) {
Z
zhangjun 已提交
245
#ifdef WITH_MKLML
Z
update  
zhangjun 已提交
246 247 248 249 250 251
#ifdef WITH_MKLDNN
      config.EnableMKLDNN();
      config.SwitchIrOptim(true);
      config.DisableGpu();
      // config.SetCpuMathLibraryNumThreads(2);

Z
fix  
zhangjun 已提交
252
      if (precision_type == PrecisionType::kInt8) {
253
        config.EnableMkldnnQuantizer();
Z
update  
zhangjun 已提交
254
        auto quantizer_config = config.mkldnn_quantizer_config();
255
        // TODO(somebody): warmup data
Z
update  
zhangjun 已提交
256 257 258
        // quantizer_config -> SetWarmupData();
        // quantizer_config -> SetWarmupBatchSize();
        // quantizer_config -> SetEnabledOpTypes(4);
Z
fix  
zhangjun 已提交
259
      } else if (precision_type == PrecisionType::kHalf) {
260 261
        config.EnableMkldnnBfloat16();
      }
Z
update  
zhangjun 已提交
262
#endif
Z
zhangjun 已提交
263
#endif
Z
zhangjun 已提交
264 265
    }

Z
zhangjun 已提交
266
    if (engine_conf.has_use_xpu() && engine_conf.use_xpu()) {
Z
update  
zhangjun 已提交
267 268 269
      // 2 MB l3 cache
      config.EnableXpu(2 * 1024 * 1024);
    }
Z
zhangjun 已提交
270

Z
zhangjun 已提交
271 272
    if (engine_conf.has_enable_memory_optimization() &&
        engine_conf.enable_memory_optimization()) {
Z
update  
zhangjun 已提交
273
      config.EnableMemoryOptim();
Z
zhangjun 已提交
274
    }
Z
zhangjun 已提交
275

Z
zhangjun 已提交
276
    predictor::AutoLock lock(predictor::GlobalCreateMutex::instance());
Z
update  
zhangjun 已提交
277 278
    _predictor = CreatePredictor(config);
    if (NULL == _predictor.get()) {
Z
zhangjun 已提交
279
      LOG(ERROR) << "create paddle predictor failed, path: " << model_path;
Z
zhangjun 已提交
280 281
      return -1;
    }
Z
update  
zhangjun 已提交
282

Z
zhangjun 已提交
283
    VLOG(2) << "create paddle predictor sucess, path: " << model_path;
Z
zhangjun 已提交
284 285 286 287
    return 0;
  }
};

Z
update  
zhangjun 已提交
288
}  // namespace inference
Z
zhangjun 已提交
289 290
}  // namespace paddle_serving
}  // namespace baidu