analysis_predictor.cc 31.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <string>
21
#include <utility>
22
#include <vector>
23
#include "paddle/fluid/framework/feed_fetch_method.h"
24
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
26
#include "paddle/fluid/framework/ir/pass.h"
27
#include "paddle/fluid/framework/naive_executor.h"
28
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
29
#include "paddle/fluid/framework/var_type_traits.h"
30
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
31
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
32
#include "paddle/fluid/inference/api/helper.h"
33
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
34
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
35
#include "paddle/fluid/inference/utils/singleton.h"
36
#include "paddle/fluid/memory/memcpy.h"
37
#include "paddle/fluid/platform/cpu_helper.h"
38
#include "paddle/fluid/platform/gpu_info.h"
39
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
40 41
#include "paddle/fluid/platform/profiler.h"

42 43 44 45
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
46 47
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
48
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
49 50
#endif

N
nhzlx 已提交
51
#if PADDLE_WITH_ANAKIN
52
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
N
nhzlx 已提交
53
#endif
54

55 56
namespace paddle {

N
nhzlx 已提交
57
using inference::Singleton;
N
nhzlx 已提交
58
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
59
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
60 61
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
62
#endif
63

64 65 66 67
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
68 69
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
70 71 72 73 74 75
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
76
bool AnalysisPredictor::Init(
77 78
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
79
  VLOG(3) << "Predictor::init()";
80 81
  if (config_.with_profile_) {
    LOG(WARNING) << "Profiler is activated, which might affect the performance";
82 83
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
84
    platform::EnableProfiler(tracking_device);
85 86 87
  } else {
    LOG(INFO) << "Profiler is deactivated, and no profiling report will be "
                 "generated.";
T
tensor-tang 已提交
88 89
  }

90
  // no matter with or without MKLDNN
L
luotao1 已提交
91
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
92

93 94 95 96 97 98 99 100 101 102 103 104 105
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
106
  }
107 108 109 110 111 112 113 114 115

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
116
  if (parent_scope) {
117 118 119
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
120
    scope_ = parent_scope;
121
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
122
  } else {
Z
Zhaolong Xing 已提交
123 124 125 126 127
    if (config_.use_gpu_) {
      paddle::framework::InitDevices(false, {config_.device_id_});
    } else {
      paddle::framework::InitDevices(false, {});
    }
Y
Yan Chunwei 已提交
128
    scope_.reset(new paddle::framework::Scope());
129
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
130
  }
131 132 133 134 135
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
136 137
  if (!program) {
    if (!LoadProgramDesc()) return false;
138 139 140 141 142 143 144 145 146
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

147 148 149 150
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
151
  } else {
152 153
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
154 155
    inference_program_ = program;
  }
M
Michal Gallus 已提交
156

157 158 159 160 161
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
162
  if (config_.use_gpu_) {
163
    status_use_gpu_ = true;
164
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
165 166 167 168 169 170 171 172
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
173
                     config_.use_feed_fetch_ops_);
174

175
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
176

177 178 179
  return true;
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
void AnalysisPredictor::MkldnnPreSet(const std::vector<PaddleTensor> &inputs) {
#ifdef PADDLE_WITH_MKLDNN
  VLOG(2) << "AnalysisPredictor::Run get_cur_mkldnn_session_id="
          << platform::get_cur_mkldnn_session_id();
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    VLOG(2) << "In mkldnn cache clear mode.";
    platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_CacheClearing);
    platform::set_cur_input_shape_cache_capacity(
        config_.mkldnn_cache_capacity_);
    // Set current_input_shape for caching dynamic shape.
    std::stringstream ss;
    for (size_t i = 0; i < inputs.size(); ++i) {
      for (size_t j = 0; j < inputs[i].shape.size(); ++j) {
        ss << inputs[i].shape[j] << "-";
      }
    }
    VLOG(2) << "Set input shape=" << ss.str();
    platform::set_cur_input_shape_str(ss.str());
  }
#endif
}

void AnalysisPredictor::MkldnnPostReset() {
#ifdef PADDLE_WITH_MKLDNN
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    paddle::platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_Default);
    platform::set_cur_input_shape_cache_capacity(0);
    platform::set_cur_input_shape_str("");
  }
#endif
}

216 217 218
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
219
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
220 221 222
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPreSet(inputs);
#endif
M
minqiyang 已提交
223
  VLOG(3) << "Predictor::predict";
224 225 226 227
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
228
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
229 230
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
231
    return false;
232
  }
M
Michal Gallus 已提交
233

234 235 236
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
237

238 239 240 241
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
242
  }
Y
Yan Chunwei 已提交
243

M
minqiyang 已提交
244
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
245

Y
Yan Chunwei 已提交
246 247 248 249 250
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
251 252 253
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
254
  tensor_array_batch_cleaner_.ResetNoTensorVars();
255 256 257 258

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
259 260 261
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPostReset();
#endif
262 263
  return true;
}
264

265 266
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
267
  VLOG(3) << "Predictor::set_feed";
268 269 270 271 272 273 274 275 276 277 278 279 280 281
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
282
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
283
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
284
      input_ptr = input.mutable_data<float>(ddim, place_);
285 286
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
287 288 289 290 291
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

L
liuwei1031 已提交
292 293 294
    PADDLE_ENFORCE_NOT_NULL(input_ptr);
    PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());

295 296 297 298 299 300
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
301 302 303 304
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
305 306 307
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
308
                   inputs[i].data.length(), dev_ctx->stream());
309 310 311 312
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
313 314 315 316 317 318 319
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
320
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
321 322
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
323 324
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
325 326
      }
      idx = feed_names_[name];
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
357
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
358 359 360
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
361 362 363 364 365
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
366
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
367
    if (type == framework::proto::VarType::FP32) {
368 369
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
370
    } else if (type == framework::proto::VarType::INT64) {
371 372
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
373 374 375
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
376
    } else {
377
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
378 379
    }
  }
Y
Yan Chunwei 已提交
380 381
  return true;
}
382

383
void AnalysisPredictor::PrepareArgument() {
384 385
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
386
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
387
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
T
Tao Luo 已提交
388
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
389
  // Analyze inference_program
390 391
  argument_.SetUseAnakin(config_.anakin_engine_enabled());
  argument_.SetPredictorID(predictor_id_);
392
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
393 394
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
395 396
  } else {
    PADDLE_ENFORCE(
397
        !config_.params_file().empty(),
T
Tao Luo 已提交
398
        "Either model_dir or (param_file, prog_file) should be set.");
399
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
400
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
401

402 403
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
404
  }
405

406
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
407
    LOG(INFO) << "TensorRT subgraph engine is enabled";
408 409 410
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
411
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
412
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
413
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
414
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
W
Wojciech Uss 已提交
415
  }
416

417
  if (config_.anakin_engine_enabled()) {
418
    argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_);
419
    argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_);
420
    argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_);
421 422 423 424
    argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_);
    argument_.SetAnakinAutoConfigLayout(config_.anakin_auto_config_layout_);
    argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_);
    argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_);
425 426 427
    LOG(INFO) << "Anakin subgraph engine is enabled";
  }

428
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
429
    LOG(INFO) << "MKLDNN is enabled";
430 431 432
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

433 434 435 436 437 438 439 440 441 442
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

443
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
444 445 446 447
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
448
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
449
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
450
  argument_.SetScopeNotOwned(scope_.get());
451 452 453 454 455
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
456 457 458 459 460
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
461
  inference_program_.reset(
462
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
463 464 465 466
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
467
  LOG(INFO) << "======= optimize end =======";
Y
Yan Chunwei 已提交
468
}
469 470

template <>
471 472
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
473
  VLOG(3) << "create AnalysisConfig";
474 475
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
476
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
477
    // 1. GPU memory
478
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
479 480
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
481
    std::vector<std::string> flags;
482 483 484 485 486 487 488 489 490 491 492

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
493
      flags.push_back("dummy");
494
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
495
                         std::to_string(fraction_of_gpu_memory);
496
      flags.push_back(flag);
Z
Zhaolong Xing 已提交
497 498
      flags.push_back("--selected_gpus=" +
                      std::to_string(config.gpu_device_id()));
M
minqiyang 已提交
499
      VLOG(3) << "set flag: " << flag;
500 501 502 503 504
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
505 506
  // Each config can only be used for one predictor.
  config.SetInValid();
507 508 509 510 511 512 513
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
514 515
    return nullptr;
  }
516

G
Gabor Buella 已提交
517
  return predictor;
518 519
}

520 521 522 523 524 525 526 527 528 529 530 531
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

532
void AnalysisPredictor::PrepareFeedFetch() {
533 534
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
535 536 537 538 539 540 541 542
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
543
      idx2feeds_[idx] = op->Output("Out")[0];
544 545
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
546 547
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
548
      }
Y
Yan Chunwei 已提交
549
      fetches_[idx] = op;
N
nhzlx 已提交
550
      idx2fetches_[idx] = op->Input("X")[0];
551 552 553 554
    }
  }
}

555 556 557 558 559 560 561 562
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
563 564 565 566 567 568 569 570
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

571 572 573 574 575 576 577 578 579 580 581 582
std::map<std::string, std::vector<int64_t>>
AnalysisPredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  std::vector<std::string> names = GetInputNames();
  for (std::string name : names) {
    auto *var = inference_program_->Block(0).FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var, "input %s does not exist.", name);
    input_shapes[name] = var->GetShape();
  }
  return input_shapes;
}

N
nhzlx 已提交
583 584 585 586 587 588 589 590
std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

591 592 593 594 595 596 597
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
598 599 600 601 602 603 604
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

605 606 607 608 609 610 611 612 613 614
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
615 616 617 618 619 620
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
621 622 623 624
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
625
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
626
  executor_->Run();
Y
Yan Chunwei 已提交
627
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
628
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
629
  tensor_array_batch_cleaner_.ResetTensorArray();
630 631 632 633

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
634 635 636 637 638
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
639
  std::string filename;
640 641 642
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
643 644 645
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
646
    filename = config_.prog_file();
647
  } else {
648
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
649 650 651 652
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
653
    LOG(ERROR) << string::Sprintf(
654 655
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
656 657
    return false;
  }
658 659 660

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
661
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
662 663 664
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
665 666
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
667 668 669 670 671 672 673 674
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
675
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
676
  }
677 678 679 680 681 682 683
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
684

685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

705
      if (!config_.params_file().empty()) {
706 707 708 709 710 711
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
712
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
713 714 715 716 717
        op->CheckAttrs();
      }
    }
  }

718
  if (!config_.params_file().empty()) {
719 720 721 722 723 724
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
725
    op->SetAttr("file_path", {config_.params_file()});
726 727 728 729
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
730
  framework::NaiveExecutor e(place_);
731 732 733 734
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

735 736
  return true;
}
737

N
nhzlx 已提交
738
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
739 740 741 742 743 744 745 746
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
747
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
748 749 750 751
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
752 753
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
754
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
755
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
756 757
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
758 759 760
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
761

N
nhzlx 已提交
762
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
763 764 765
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
766

N
nhzlx 已提交
767 768 769 770 771
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
772
      std::string calibration_table_data_path =
N
nhzlx 已提交
773 774 775 776
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
777 778 779 780 781

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
782 783 784 785
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
786
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
787 788
  return true;
}
N
nhzlx 已提交
789
#endif
N
nhzlx 已提交
790

791
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
792
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
793
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
794 795
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
796 797
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
798
#endif
799
  if (config_.with_profile_) {
800 801 802 803 804 805
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
806

807 808 809 810 811 812
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif
813 814
}

815
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
816
  std::lock_guard<std::mutex> lk(clone_mutex_);
817 818 819 820 821
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

822
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
823 824 825
  return inference_program_->Proto()->SerializeAsString();
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
865
template <>
866 867 868 869
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
870 871
}

872
}  // namespace paddle
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
895
USE_TRT_CONVERTER(split);
896 897
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
898
USE_TRT_CONVERTER(leaky_relu);
899 900
USE_TRT_CONVERTER(shuffle_channel);
USE_TRT_CONVERTER(swish);
901
#endif
902

N
nhzlx 已提交
903
#if PADDLE_WITH_ANAKIN
904
USE_ANAKIN_CONVERTER(mul);
905 906
USE_ANAKIN_CONVERTER(fc);
USE_ANAKIN_CONVERTER(conv2d);
907
USE_ANAKIN_CONVERTER(conv2d_fusion);
908 909 910 911 912 913 914
USE_ANAKIN_CONVERTER(concat);
USE_ANAKIN_CONVERTER(split);
USE_ANAKIN_CONVERTER(relu);
USE_ANAKIN_CONVERTER(sigmoid);
USE_ANAKIN_CONVERTER(tanh);
USE_ANAKIN_CONVERTER(pool2d);
USE_ANAKIN_CONVERTER(elementwise_add);
915
USE_ANAKIN_CONVERTER(elementwise_mul);
916 917 918 919 920 921 922
USE_ANAKIN_CONVERTER(batch_norm);
USE_ANAKIN_CONVERTER(flatten);
USE_ANAKIN_CONVERTER(reshape);
USE_ANAKIN_CONVERTER(transpose);
USE_ANAKIN_CONVERTER(softmax);
USE_ANAKIN_CONVERTER(detection_out);
USE_ANAKIN_CONVERTER(density_prior_box);
923 924
USE_ANAKIN_CONVERTER(dropout);
USE_ANAKIN_CONVERTER(sum);
N
nhzlx 已提交
925
USE_ANAKIN_CONVERTER(prior_box);
926 927 928 929 930
USE_ANAKIN_CONVERTER(leaky_relu);
USE_ANAKIN_CONVERTER(affine_channel);
USE_ANAKIN_CONVERTER(relu6);
USE_ANAKIN_CONVERTER(swish);
USE_ANAKIN_CONVERTER(shuffle_channel);
N
nhzlx 已提交
931
#endif