fluid_gpu_engine.h 19.7 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <pthread.h>
#include <fstream>
#include <map>
X
xulongteng 已提交
20
#include <memory>
W
Wang Guibao 已提交
21
#include <string>
X
xulongteng 已提交
22
#include <utility>
W
Wang Guibao 已提交
23
#include <vector>
G
guru4elephant 已提交
24 25 26
#include "core/configure/include/configure_parser.h"
#include "core/configure/inferencer_configure.pb.h"
#include "core/predictor/framework/infer.h"
B
barrierye 已提交
27
#include "paddle_inference_api.h"  // NOLINT
W
Wang Guibao 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
DECLARE_int32(gpuid);

namespace baidu {
namespace paddle_serving {
namespace fluid_gpu {

using configure::SigmoidConf;

class AutoLock {
 public:
  explicit AutoLock(pthread_mutex_t& mutex) : _mut(mutex) {
    pthread_mutex_lock(&mutex);
  }

  ~AutoLock() { pthread_mutex_unlock(&_mut); }

 private:
  pthread_mutex_t& _mut;
};

class GlobalPaddleCreateMutex {
 public:
  pthread_mutex_t& mutex() { return _mut; }

  static pthread_mutex_t& instance() {
    static GlobalPaddleCreateMutex gmutex;
    return gmutex.mutex();
  }

 private:
  GlobalPaddleCreateMutex() { pthread_mutex_init(&_mut, NULL); }

  pthread_mutex_t _mut;
};

class GlobalSigmoidCreateMutex {
 public:
  pthread_mutex_t& mutex() { return _mut; }
  static pthread_mutex_t& instance() {
    static GlobalSigmoidCreateMutex gmutex;
    return gmutex.mutex();
  }

 private:
  GlobalSigmoidCreateMutex() { pthread_mutex_init(&_mut, NULL); }

  pthread_mutex_t _mut;
};

// data interface
class FluidFamilyCore {
 public:
  virtual ~FluidFamilyCore() {}
  virtual bool Run(const void* in_data, void* out_data) {
    if (!_core->Run(*(std::vector<paddle::PaddleTensor>*)in_data,
                    (std::vector<paddle::PaddleTensor>*)out_data)) {
      LOG(ERROR) << "Failed call Run with paddle predictor";
      return false;
    }

    return true;
  }

91
  virtual int create(const predictor::InferEngineCreationParams& params) = 0;
W
Wang Guibao 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

  virtual int clone(void* origin_core) {
    if (origin_core == NULL) {
      LOG(ERROR) << "origin paddle Predictor is null.";
      return -1;
    }
    paddle::PaddlePredictor* p_predictor =
        (paddle::PaddlePredictor*)origin_core;
    _core = p_predictor->Clone();
    if (_core.get() == NULL) {
      LOG(ERROR) << "fail to clone paddle predictor: " << origin_core;
      return -1;
    }
    return 0;
  }

  virtual void* get() { return _core.get(); }

 protected:
  std::unique_ptr<paddle::PaddlePredictor> _core;
};

// infer interface
class FluidGpuAnalysisCore : public FluidFamilyCore {
 public:
117 118
  int create(const predictor::InferEngineCreationParams& params) {
    std::string data_path = params.get_path();
W
Wang Guibao 已提交
119 120 121 122 123 124 125 126 127 128 129
    if (access(data_path.c_str(), F_OK) == -1) {
      LOG(ERROR) << "create paddle predictor failed, path not exits: "
                 << data_path;
      return -1;
    }

    paddle::AnalysisConfig analysis_config;
    analysis_config.SetParamsFile(data_path + "/__params__");
    analysis_config.SetProgFile(data_path + "/__model__");
    analysis_config.EnableUseGpu(100, FLAGS_gpuid);
    analysis_config.SetCpuMathLibraryNumThreads(1);
130 131

    if (params.enable_memory_optimization()) {
W
wangguibao 已提交
132
      analysis_config.EnableMemoryOptim();
133 134
    }

W
Wang Guibao 已提交
135
    analysis_config.SwitchSpecifyInputNames(true);
W
Wang Guibao 已提交
136

W
Wang Guibao 已提交
137 138 139 140 141 142 143 144
    AutoLock lock(GlobalPaddleCreateMutex::instance());
    _core =
        paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(analysis_config);
    if (NULL == _core.get()) {
      LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
      return -1;
    }

145
    VLOG(2) << "create paddle predictor sucess, path: " << data_path;
W
Wang Guibao 已提交
146 147 148 149 150 151
    return 0;
  }
};

class FluidGpuNativeCore : public FluidFamilyCore {
 public:
152 153
  int create(const predictor::InferEngineCreationParams& params) {
    std::string data_path = params.get_path();
W
Wang Guibao 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
    if (access(data_path.c_str(), F_OK) == -1) {
      LOG(ERROR) << "create paddle predictor failed, path not exits: "
                 << data_path;
      return -1;
    }

    paddle::NativeConfig native_config;
    native_config.param_file = data_path + "/__params__";
    native_config.prog_file = data_path + "/__model__";
    native_config.use_gpu = true;
    native_config.fraction_of_gpu_memory = 0.01;
    native_config.device = FLAGS_gpuid;
    AutoLock lock(GlobalPaddleCreateMutex::instance());
    _core = paddle::CreatePaddlePredictor<paddle::NativeConfig,
                                          paddle::PaddleEngineKind::kNative>(
        native_config);
    if (NULL == _core.get()) {
      LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
      return -1;
    }

175
    VLOG(2) << "create paddle predictor sucess, path: " << data_path;
W
Wang Guibao 已提交
176 177 178 179
    return 0;
  }
};

M
MRXLT 已提交
180
class FluidGpuAnalysisDirCore : public FluidFamilyCore {
W
Wang Guibao 已提交
181
 public:
182 183
  int create(const predictor::InferEngineCreationParams& params) {
    std::string data_path = params.get_path();
W
Wang Guibao 已提交
184 185 186 187 188 189 190 191
    if (access(data_path.c_str(), F_OK) == -1) {
      LOG(ERROR) << "create paddle predictor failed, path not exits: "
                 << data_path;
      return -1;
    }

    paddle::AnalysisConfig analysis_config;
    analysis_config.SetModel(data_path);
M
MRXLT 已提交
192
    analysis_config.EnableUseGpu(1500, FLAGS_gpuid);
W
Wang Guibao 已提交
193 194
    analysis_config.SwitchSpecifyInputNames(true);
    analysis_config.SetCpuMathLibraryNumThreads(1);
195 196

    if (params.enable_memory_optimization()) {
W
wangguibao 已提交
197
      analysis_config.EnableMemoryOptim();
198
    }
W
Wang Guibao 已提交
199

M
bug fix  
MRXLT 已提交
200
#if 0  // todo: support flexible shape
M
MRXLT 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238

    int min_seq_len = 1;
    int max_seq_len = 512;
    int opt_seq_len = 128;
    int head_number = 12;
    int batch = 50;

    std::vector<int> min_in_shape = {batch, min_seq_len, 1};
    std::vector<int> max_in_shape = {batch, max_seq_len, 1};
    std::vector<int> opt_in_shape = {batch, opt_seq_len, 1};

    std::string input1_name = "src_text_a_ids";
    std::string input2_name = "pos_text_a_ids";
    std::string input3_name = "sent_text_a_ids";
    std::string input4_name = "stack_0.tmp_0";

    std::map<std::string, std::vector<int>> min_input_shape = {
        {input1_name, min_in_shape},
        {input2_name, min_in_shape},
        {input3_name, min_in_shape},
        {input4_name, {batch, head_number, min_seq_len, min_seq_len}},
    };

    std::map<std::string, std::vector<int>> max_input_shape = {
        {input1_name, max_in_shape},
        {input2_name, max_in_shape},
        {input3_name, max_in_shape},
        {input4_name, {batch, head_number, max_seq_len, max_seq_len}},
    };
    std::map<std::string, std::vector<int>> opt_input_shape = {
        {input1_name, opt_in_shape},
        {input2_name, opt_in_shape},
        {input3_name, opt_in_shape},
        {input4_name, {batch, head_number, opt_seq_len, opt_seq_len}},
    };

    analysis_config.SetTRTDynamicShapeInfo(
        min_input_shape, max_input_shape, opt_input_shape);
M
add trt  
MRXLT 已提交
239
#endif
M
bug fix  
MRXLT 已提交
240
    int max_batch = 32;
M
MRXLT 已提交
241
    int min_subgraph_size = 3;
M
add trt  
MRXLT 已提交
242 243
    if (params.use_trt()) {
      analysis_config.EnableTensorRtEngine(
M
MRXLT 已提交
244 245
          1 << 20,
          max_batch,
M
MRXLT 已提交
246
          min_subgraph_size,
M
add trt  
MRXLT 已提交
247
          paddle::AnalysisConfig::Precision::kFloat32,
M
bug fix  
MRXLT 已提交
248 249
          false,
          false);
M
bug fix  
MRXLT 已提交
250
      LOG(INFO) << "create TensorRT predictor";
M
MRXLT 已提交
251 252 253 254 255 256 257 258 259 260
    } else {
      if (params.enable_memory_optimization()) {
        analysis_config.EnableMemoryOptim();
      }

      if (params.enable_ir_optimization()) {
        analysis_config.SwitchIrOptim(true);
      } else {
        analysis_config.SwitchIrOptim(false);
      }
M
add trt  
MRXLT 已提交
261
    }
W
Wang Guibao 已提交
262 263 264 265 266 267 268 269
    AutoLock lock(GlobalPaddleCreateMutex::instance());
    _core =
        paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(analysis_config);
    if (NULL == _core.get()) {
      LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
      return -1;
    }

270
    VLOG(2) << "create paddle predictor sucess, path: " << data_path;
W
Wang Guibao 已提交
271 272 273 274 275 276
    return 0;
  }
};

class FluidGpuNativeDirCore : public FluidFamilyCore {
 public:
277 278
  int create(const predictor::InferEngineCreationParams& params) {
    std::string data_path = params.get_path();
W
Wang Guibao 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
    if (access(data_path.c_str(), F_OK) == -1) {
      LOG(ERROR) << "create paddle predictor failed, path not exits: "
                 << data_path;
      return -1;
    }

    paddle::NativeConfig native_config;
    native_config.model_dir = data_path;
    native_config.use_gpu = true;
    native_config.fraction_of_gpu_memory = 0.01;
    native_config.device = FLAGS_gpuid;
    AutoLock lock(GlobalPaddleCreateMutex::instance());
    _core = paddle::CreatePaddlePredictor<paddle::NativeConfig,
                                          paddle::PaddleEngineKind::kNative>(
        native_config);
    if (NULL == _core.get()) {
      LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
      return -1;
    }

299
    VLOG(2) << "create paddle predictor sucess, path: " << data_path;
W
Wang Guibao 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
    return 0;
  }
};

class Parameter {
 public:
  Parameter() : _row(0), _col(0), _params(NULL) {}
  ~Parameter() {
    LOG(INFO) << "before destroy Parameter, file_name[" << _file_name << "]";
    destroy();
  }

  int init(int row, int col, const char* file_name) {
    destroy();
    _file_name = file_name;
    _row = row;
    _col = col;
    _params = reinterpret_cast<float*>(malloc(_row * _col * sizeof(float)));
    if (_params == NULL) {
      LOG(ERROR) << "Load " << _file_name << " malloc error.";
      return -1;
    }
322
    VLOG(2) << "Load parameter file[" << _file_name << "] success.";
W
Wang Guibao 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
    return 0;
  }

  void destroy() {
    _row = 0;
    _col = 0;
    if (_params != NULL) {
      free(_params);
      _params = NULL;
    }
  }

  int load() {
    if (_params == NULL || _row <= 0 || _col <= 0) {
      LOG(ERROR) << "load parameter error [not inited].";
      return -1;
    }

    FILE* fs = fopen(_file_name.c_str(), "rb");
    if (fs == NULL) {
      LOG(ERROR) << "load " << _file_name << " fopen error.";
      return -1;
    }
    static const uint32_t MODEL_FILE_HEAD_LEN = 16;
    char head[MODEL_FILE_HEAD_LEN] = {0};
    if (fread(head, 1, MODEL_FILE_HEAD_LEN, fs) != MODEL_FILE_HEAD_LEN) {
      destroy();
      LOG(ERROR) << "Load " << _file_name << " read head error.";
      if (fs != NULL) {
        fclose(fs);
        fs = NULL;
      }
      return -1;
    }

    uint32_t matrix_size = _row * _col;
    if (matrix_size == fread(_params, sizeof(float), matrix_size, fs)) {
      if (fs != NULL) {
        fclose(fs);
        fs = NULL;
      }
      LOG(INFO) << "load " << _file_name << " read ok.";
      return 0;
    } else {
      LOG(ERROR) << "load " << _file_name << " read error.";
      destroy();
      if (fs != NULL) {
        fclose(fs);
        fs = NULL;
      }
      return -1;
    }
    return 0;
  }

 public:
  std::string _file_name;
  int _row;
  int _col;
  float* _params;
};

class SigmoidModel {
 public:
  ~SigmoidModel() {}
  int load(const char* sigmoid_w_file,
           const char* sigmoid_b_file,
           float exp_max,
           float exp_min) {
    AutoLock lock(GlobalSigmoidCreateMutex::instance());
    if (0 != _sigmoid_w.init(2, 1, sigmoid_w_file) || 0 != _sigmoid_w.load()) {
      LOG(ERROR) << "load params sigmoid_w failed.";
      return -1;
    }
397
    VLOG(2) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] ["
B
barrierye 已提交
398
            << _sigmoid_w._params[1] << "].";
W
Wang Guibao 已提交
399 400 401 402
    if (0 != _sigmoid_b.init(2, 1, sigmoid_b_file) || 0 != _sigmoid_b.load()) {
      LOG(ERROR) << "load params sigmoid_b failed.";
      return -1;
    }
403
    VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
B
barrierye 已提交
404
            << _sigmoid_b._params[1] << "].";
W
Wang Guibao 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
    _exp_max_input = exp_max;
    _exp_min_input = exp_min;
    return 0;
  }

  int softmax(float x, double& o) {  // NOLINT
    float _y0 = x * _sigmoid_w._params[0] + _sigmoid_b._params[0];
    float _y1 = x * _sigmoid_w._params[1] + _sigmoid_b._params[1];
    _y0 = (_y0 > _exp_max_input)
              ? _exp_max_input
              : ((_y0 < _exp_min_input) ? _exp_min_input : _y0);
    _y1 = (_y1 > _exp_max_input)
              ? _exp_max_input
              : ((_y1 < _exp_min_input) ? _exp_min_input : _y1);
    o = 1.0f / (1.0f + exp(_y0 - _y1));
    return 0;
  }

 public:
  Parameter _sigmoid_w;
  Parameter _sigmoid_b;
  float _exp_max_input;
  float _exp_min_input;
};

class SigmoidFluidModel {
 public:
  int softmax(float x, double& o) {  // NOLINT
    return _sigmoid_core->softmax(x, o);
  }  // NOLINT

  std::unique_ptr<SigmoidFluidModel> Clone() {
    std::unique_ptr<SigmoidFluidModel> clone_model;
    clone_model.reset(new SigmoidFluidModel());
    clone_model->_sigmoid_core = _sigmoid_core;
    clone_model->_fluid_core = _fluid_core->Clone();
    return std::move(clone_model);
  }

 public:
  std::unique_ptr<paddle::PaddlePredictor> _fluid_core;
  std::shared_ptr<SigmoidModel> _sigmoid_core;
};

class FluidGpuWithSigmoidCore : public FluidFamilyCore {
 public:
  virtual ~FluidGpuWithSigmoidCore() {}

 public:
454 455
  int create(const predictor::InferEngineCreationParams& params) {
    std::string model_path = params.get_path();
W
Wang Guibao 已提交
456 457 458 459 460 461 462 463 464 465 466 467
    size_t pos = model_path.find_last_of("/\\");
    std::string conf_path = model_path.substr(0, pos);
    std::string conf_file = model_path.substr(pos);
    configure::SigmoidConf conf;
    if (configure::read_proto_conf(conf_path, conf_file, &conf) != 0) {
      LOG(ERROR) << "failed load model path: " << model_path;
      return -1;
    }

    _core.reset(new SigmoidFluidModel);

    std::string fluid_model_data_path = conf.dnn_model_path();
468 469 470
    predictor::InferEngineCreationParams new_params(params);
    new_params.set_path(fluid_model_data_path);
    int ret = load_fluid_model(new_params);
W
Wang Guibao 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
    if (ret < 0) {
      LOG(ERROR) << "fail to load fluid model.";
      return -1;
    }
    const char* sigmoid_w_file = conf.sigmoid_w_file().c_str();
    const char* sigmoid_b_file = conf.sigmoid_b_file().c_str();
    float exp_max = conf.exp_max_input();
    float exp_min = conf.exp_min_input();
    _core->_sigmoid_core.reset(new SigmoidModel);
    LOG(INFO) << "create sigmoid core[" << _core->_sigmoid_core.get()
              << "], use count[" << _core->_sigmoid_core.use_count() << "].";
    ret = _core->_sigmoid_core->load(
        sigmoid_w_file, sigmoid_b_file, exp_max, exp_min);
    if (ret < 0) {
      LOG(ERROR) << "fail to load sigmoid model.";
      return -1;
    }
    return 0;
  }

  virtual bool Run(const void* in_data, void* out_data) {
    if (!_core->_fluid_core->Run(
            *(std::vector<paddle::PaddleTensor>*)in_data,
            (std::vector<paddle::PaddleTensor>*)out_data)) {
      LOG(ERROR) << "Failed call Run with paddle predictor";
      return false;
    }

    return true;
  }

  virtual int clone(SigmoidFluidModel* origin_core) {
    if (origin_core == NULL) {
      LOG(ERROR) << "origin paddle Predictor is null.";
      return -1;
    }
    _core = origin_core->Clone();
    if (_core.get() == NULL) {
      LOG(ERROR) << "fail to clone paddle predictor: " << origin_core;
      return -1;
    }
    LOG(INFO) << "clone sigmoid core[" << _core->_sigmoid_core.get()
              << "] use count[" << _core->_sigmoid_core.use_count() << "].";
    return 0;
  }

  virtual SigmoidFluidModel* get() { return _core.get(); }

519 520
  virtual int load_fluid_model(
      const predictor::InferEngineCreationParams& params) = 0;
W
Wang Guibao 已提交
521 522 523 524 525 526 527 528 529 530 531

  int softmax(float x, double& o) {  // NOLINT
    return _core->_sigmoid_core->softmax(x, o);
  }

 protected:
  std::unique_ptr<SigmoidFluidModel> _core;
};

class FluidGpuNativeDirWithSigmoidCore : public FluidGpuWithSigmoidCore {
 public:
532 533
  int load_fluid_model(const predictor::InferEngineCreationParams& params) {
    std::string data_path = params.get_path();
W
Wang Guibao 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
    if (access(data_path.c_str(), F_OK) == -1) {
      LOG(ERROR) << "create paddle predictor failed, path not exits: "
                 << data_path;
      return -1;
    }

    paddle::NativeConfig native_config;
    native_config.model_dir = data_path;
    native_config.use_gpu = true;
    native_config.fraction_of_gpu_memory = 0.01;
    native_config.device = FLAGS_gpuid;
    AutoLock lock(GlobalPaddleCreateMutex::instance());
    _core->_fluid_core =
        paddle::CreatePaddlePredictor<paddle::NativeConfig,
                                      paddle::PaddleEngineKind::kNative>(
            native_config);
    if (NULL == _core.get()) {
      LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
      return -1;
    }

555
    VLOG(2) << "create paddle predictor sucess, path: " << data_path;
W
Wang Guibao 已提交
556 557 558 559 560 561
    return 0;
  }
};

class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore {
 public:
562 563
  int load_fluid_model(const predictor::InferEngineCreationParams& params) {
    std::string data_path = params.get_path();
W
Wang Guibao 已提交
564 565 566 567 568 569 570 571 572 573 574
    if (access(data_path.c_str(), F_OK) == -1) {
      LOG(ERROR) << "create paddle predictor failed, path not exits: "
                 << data_path;
      return -1;
    }

    paddle::AnalysisConfig analysis_config;
    analysis_config.SetModel(data_path);
    analysis_config.EnableUseGpu(100, FLAGS_gpuid);
    analysis_config.SwitchSpecifyInputNames(true);
    analysis_config.SetCpuMathLibraryNumThreads(1);
575 576

    if (params.enable_memory_optimization()) {
W
wangguibao 已提交
577
      analysis_config.EnableMemoryOptim();
578 579
    }

W
Wang Guibao 已提交
580 581 582 583 584 585 586 587
    AutoLock lock(GlobalPaddleCreateMutex::instance());
    _core->_fluid_core =
        paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(analysis_config);
    if (NULL == _core.get()) {
      LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
      return -1;
    }

588
    VLOG(2) << "create paddle predictor sucess, path: " << data_path;
W
Wang Guibao 已提交
589 590 591 592
    return 0;
  }
};

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
class FluidGpuAnalysisEncryptCore : public FluidFamilyCore {
 public:
  void ReadBinaryFile(const std::string& filename, std::string* contents) {
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
    fin.seekg(0, std::ios::end);
    contents->clear();
    contents->resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(contents->at(0)), contents->size());
    fin.close();
  }

  int create(const predictor::InferEngineCreationParams& params) {
    std::string data_path = params.get_path();
    if (access(data_path.c_str(), F_OK) == -1) {
      LOG(ERROR) << "create paddle predictor failed, path note exits: "
                 << data_path;
      return -1;
    }

    std::string model_buffer, params_buffer, key_buffer;
    ReadBinaryFile(data_path + "encrypt_model", &model_buffer);
    ReadBinaryFile(data_path + "encrypt_params", &params_buffer);
    ReadBinaryFile(data_path + "key", &key_buffer);

    VLOG(2) << "prepare for encryption model";

    auto cipher = paddle::MakeCipher("");
    std::string real_model_buffer = cipher->Decrypt(model_buffer, key_buffer);
    std::string real_params_buffer = cipher->Decrypt(params_buffer, key_buffer);

    paddle::AnalysisConfig analysis_config;
    analysis_config.SetModelBuffer(&real_model_buffer[0],
                                   real_model_buffer.size(),
                                   &real_params_buffer[0],
                                   real_params_buffer.size());
    analysis_config.EnableUseGpu(100, FLAGS_gpuid);
    analysis_config.SetCpuMathLibraryNumThreads(1);
    if (params.enable_memory_optimization()) {
      analysis_config.EnableMemoryOptim();
    }
    analysis_config.SwitchSpecifyInputNames(true);
    AutoLock lock(GlobalPaddleCreateMutex::instance());
    VLOG(2) << "decrypt model file sucess";
    _core =
        paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(analysis_config);
    if (NULL == _core.get()) {
      LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
      return -1;
    }
    VLOG(2) << "create paddle predictor sucess, path: " << data_path;
    return 0;
  }
};
W
Wang Guibao 已提交
647 648 649
}  // namespace fluid_gpu
}  // namespace paddle_serving
}  // namespace baidu