analysis_predictor.cc 33.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <set>
21
#include <string>
22
#include <utility>
23
#include <vector>
24
#include "paddle/fluid/framework/feed_fetch_method.h"
25
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
27
#include "paddle/fluid/framework/ir/pass.h"
28
#include "paddle/fluid/framework/naive_executor.h"
29
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
30
#include "paddle/fluid/framework/var_type_traits.h"
31
#include "paddle/fluid/framework/version.h"
32
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
33
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
34
#include "paddle/fluid/inference/api/helper.h"
35
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
36
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
37
#include "paddle/fluid/inference/utils/singleton.h"
38
#include "paddle/fluid/memory/memcpy.h"
39
#include "paddle/fluid/platform/cpu_helper.h"
40
#include "paddle/fluid/platform/gpu_info.h"
41
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
42 43
#include "paddle/fluid/platform/profiler.h"

44 45 46 47
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
48 49
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
50
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
51 52
#endif

N
nhzlx 已提交
53
#if PADDLE_WITH_ANAKIN
54
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
N
nhzlx 已提交
55
#endif
56

57 58
namespace paddle {

N
nhzlx 已提交
59
using inference::Singleton;
N
nhzlx 已提交
60
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
61
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
62 63
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
64
#endif
65

66 67 68 69
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
70 71
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
72 73 74 75 76 77
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
78
bool AnalysisPredictor::Init(
79 80
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
81
  VLOG(3) << "Predictor::init()";
82 83
  if (config_.with_profile_) {
    LOG(WARNING) << "Profiler is activated, which might affect the performance";
84 85
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
86
    platform::EnableProfiler(tracking_device);
87 88 89
  } else {
    LOG(INFO) << "Profiler is deactivated, and no profiling report will be "
                 "generated.";
T
tensor-tang 已提交
90 91
  }

92
  // no matter with or without MKLDNN
L
luotao1 已提交
93
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
94

95 96 97 98 99 100 101 102 103 104 105 106 107
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
108
  }
109 110 111 112 113 114 115 116 117

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
118
  if (parent_scope) {
119 120 121
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
122
    scope_ = parent_scope;
123
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
124
  } else {
Z
Zhaolong Xing 已提交
125 126 127 128 129
    if (config_.use_gpu_) {
      paddle::framework::InitDevices(false, {config_.device_id_});
    } else {
      paddle::framework::InitDevices(false, {});
    }
Y
Yan Chunwei 已提交
130
    scope_.reset(new paddle::framework::Scope());
131
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
132
  }
133 134 135 136 137
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
138 139
  if (!program) {
    if (!LoadProgramDesc()) return false;
140 141 142 143 144 145 146
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
147 148
    if (!CheckOperatorCompatible()) {
      LOG(WARNING) << "WARNING: Results may be DIFF! "
149 150
                      "Please use the corresponding version of the model and "
                      "prediction library, and do not use the develop branch.";
151
    }
152 153
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

154 155 156 157
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
158
  } else {
159 160
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
161 162
    inference_program_ = program;
  }
M
Michal Gallus 已提交
163

164 165 166 167 168
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
169
  if (config_.use_gpu_) {
170
    status_use_gpu_ = true;
171
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
172 173 174 175 176 177 178 179
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
180
                     config_.use_feed_fetch_ops_);
181

182
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
183

184 185 186
  return true;
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
void AnalysisPredictor::MkldnnPreSet(const std::vector<PaddleTensor> &inputs) {
#ifdef PADDLE_WITH_MKLDNN
  VLOG(2) << "AnalysisPredictor::Run get_cur_mkldnn_session_id="
          << platform::get_cur_mkldnn_session_id();
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    VLOG(2) << "In mkldnn cache clear mode.";
    platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_CacheClearing);
    platform::set_cur_input_shape_cache_capacity(
        config_.mkldnn_cache_capacity_);
    // Set current_input_shape for caching dynamic shape.
    std::stringstream ss;
    for (size_t i = 0; i < inputs.size(); ++i) {
      for (size_t j = 0; j < inputs[i].shape.size(); ++j) {
        ss << inputs[i].shape[j] << "-";
      }
    }
    VLOG(2) << "Set input shape=" << ss.str();
    platform::set_cur_input_shape_str(ss.str());
  }
#endif
}

void AnalysisPredictor::MkldnnPostReset() {
#ifdef PADDLE_WITH_MKLDNN
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    paddle::platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_Default);
    platform::set_cur_input_shape_cache_capacity(0);
    platform::set_cur_input_shape_str("");
  }
#endif
}

223 224 225
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
226
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
227 228 229
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPreSet(inputs);
#endif
M
minqiyang 已提交
230
  VLOG(3) << "Predictor::predict";
231 232 233 234
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
235
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
236 237
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
238
    return false;
239
  }
M
Michal Gallus 已提交
240

241 242 243
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
244

245 246 247 248
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
249
  }
Y
Yan Chunwei 已提交
250

M
minqiyang 已提交
251
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
252

Y
Yan Chunwei 已提交
253 254 255 256 257
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
258 259 260
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
261
  tensor_array_batch_cleaner_.ResetNoTensorVars();
262 263 264 265

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
266 267 268
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPostReset();
#endif
269 270
  return true;
}
271

272 273
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
274
  VLOG(3) << "Predictor::set_feed";
275 276 277 278 279 280 281 282 283 284 285 286 287 288
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
289
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
290
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
291
      input_ptr = input.mutable_data<float>(ddim, place_);
292 293
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
294 295 296 297 298
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

L
liuwei1031 已提交
299 300 301
    PADDLE_ENFORCE_NOT_NULL(input_ptr);
    PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());

302 303 304 305 306 307
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
308 309 310 311
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
312 313 314
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
315
                   inputs[i].data.length(), dev_ctx->stream());
316 317 318 319
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
320 321 322 323 324 325 326
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
327
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
328 329
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
330 331
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
332 333
      }
      idx = feed_names_[name];
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
364
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
365 366 367
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
368 369 370 371 372
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
373
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
374
    if (type == framework::proto::VarType::FP32) {
375 376
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
377
    } else if (type == framework::proto::VarType::INT64) {
378 379
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
380 381 382
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
383
    } else {
384
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
385 386
    }
  }
Y
Yan Chunwei 已提交
387 388
  return true;
}
389

390
void AnalysisPredictor::PrepareArgument() {
391 392
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
393
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
394
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
T
Tao Luo 已提交
395
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
396
  // Analyze inference_program
397 398
  argument_.SetUseAnakin(config_.anakin_engine_enabled());
  argument_.SetPredictorID(predictor_id_);
399
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
400 401
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
402 403
  } else {
    PADDLE_ENFORCE(
404
        !config_.params_file().empty(),
T
Tao Luo 已提交
405
        "Either model_dir or (param_file, prog_file) should be set.");
406
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
407
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
408

409 410
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
411
  }
412

413
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
414
    LOG(INFO) << "TensorRT subgraph engine is enabled";
415 416 417
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
418
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
419
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
420
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
421
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
W
Wojciech Uss 已提交
422
  }
423

424
  if (config_.anakin_engine_enabled()) {
425
    argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_);
426
    argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_);
427
    argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_);
428 429 430 431
    argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_);
    argument_.SetAnakinAutoConfigLayout(config_.anakin_auto_config_layout_);
    argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_);
    argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_);
432 433 434
    LOG(INFO) << "Anakin subgraph engine is enabled";
  }

435
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
436
    LOG(INFO) << "MKLDNN is enabled";
437 438 439
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

440 441 442 443 444 445 446 447 448 449
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

450
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
451 452 453 454
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
455
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
456
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
457
  argument_.SetScopeNotOwned(scope_.get());
458 459 460 461 462
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
463 464 465 466 467
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
468
  inference_program_.reset(
469
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
470 471 472 473
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
474
  LOG(INFO) << "======= optimize end =======";
Y
Yan Chunwei 已提交
475
}
476 477

template <>
478 479
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
480
  VLOG(3) << "create AnalysisConfig";
481 482
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
483
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
484
    // 1. GPU memory
485
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
486 487
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
488
    std::vector<std::string> flags;
489 490 491 492 493 494 495 496 497 498 499

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
500
      flags.push_back("dummy");
501
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
502
                         std::to_string(fraction_of_gpu_memory);
503
      flags.push_back(flag);
Z
Zhaolong Xing 已提交
504 505
      flags.push_back("--selected_gpus=" +
                      std::to_string(config.gpu_device_id()));
M
minqiyang 已提交
506
      VLOG(3) << "set flag: " << flag;
507 508 509
      framework::InitGflags(flags);
    }
  }
510 511 512 513 514
  if (config.glog_info_disabled()) {
    FLAGS_logtostderr = 1;
    FLAGS_minloglevel = google::WARNING;
    LOG(WARNING) << " - GLOG's LOG(INFO) is disabled.";
  }
515 516

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
517 518
  // Each config can only be used for one predictor.
  config.SetInValid();
519 520 521 522 523 524 525
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
526 527
    return nullptr;
  }
528

G
Gabor Buella 已提交
529
  return predictor;
530 531
}

532 533 534 535 536 537 538 539 540 541 542 543
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

544
void AnalysisPredictor::PrepareFeedFetch() {
545 546
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
547 548 549 550 551 552 553 554
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
555
      idx2feeds_[idx] = op->Output("Out")[0];
556 557
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
558 559
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
560
      }
Y
Yan Chunwei 已提交
561
      fetches_[idx] = op;
N
nhzlx 已提交
562
      idx2fetches_[idx] = op->Input("X")[0];
563 564 565 566
    }
  }
}

567 568 569 570 571 572 573 574
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
575 576 577 578 579 580 581 582
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

583 584 585 586 587 588 589 590 591 592 593 594
std::map<std::string, std::vector<int64_t>>
AnalysisPredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  std::vector<std::string> names = GetInputNames();
  for (std::string name : names) {
    auto *var = inference_program_->Block(0).FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var, "input %s does not exist.", name);
    input_shapes[name] = var->GetShape();
  }
  return input_shapes;
}

N
nhzlx 已提交
595 596 597 598 599 600 601 602
std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

603 604 605 606 607 608 609
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
610 611 612 613 614 615 616
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

617 618 619 620 621 622 623 624 625 626
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
627 628 629 630 631 632
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
633 634 635 636
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
637
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
638
  executor_->Run();
Y
Yan Chunwei 已提交
639
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
640
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
641
  tensor_array_batch_cleaner_.ResetTensorArray();
642 643 644 645

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
646 647 648 649 650
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
651
  std::string filename;
652 653 654
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
655 656 657
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
658
    filename = config_.prog_file();
659
  } else {
660
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
661 662 663 664
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
665
    LOG(ERROR) << string::Sprintf(
666 667
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
668 669
    return false;
  }
670 671 672

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
673
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
674 675 676
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
677 678
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
679 680 681 682 683 684 685 686
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
687
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
688
  }
689 690 691 692 693 694 695
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
696

697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

717
      if (!config_.params_file().empty()) {
718 719 720 721 722 723
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
724
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
725 726 727 728 729
        op->CheckAttrs();
      }
    }
  }

730
  if (!config_.params_file().empty()) {
731 732 733 734 735 736
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
737
    op->SetAttr("file_path", {config_.params_file()});
738 739 740 741
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
742
  framework::NaiveExecutor e(place_);
743 744 745 746
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

747 748
  return true;
}
749

N
nhzlx 已提交
750
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
751 752 753 754 755 756 757 758
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
759
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
760 761 762 763
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
764 765
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
766
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
767
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
768 769
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
770 771 772
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
773

N
nhzlx 已提交
774
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
775 776 777
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
778

N
nhzlx 已提交
779 780 781 782 783
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
784
      std::string calibration_table_data_path =
N
nhzlx 已提交
785 786 787 788
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
789 790 791 792 793

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
794 795 796 797
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
798
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
799 800
  return true;
}
N
nhzlx 已提交
801
#endif
N
nhzlx 已提交
802

803
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
804
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
805
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
806 807
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
808 809
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
810
#endif
811
  if (config_.with_profile_) {
812 813 814 815 816 817
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
818

819 820 821 822 823 824
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif
825 826
}

827
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
828
  std::lock_guard<std::mutex> lk(clone_mutex_);
829 830 831 832 833
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

834
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
835 836 837
  return inference_program_->Proto()->SerializeAsString();
}

838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
bool AnalysisPredictor::CheckOperatorCompatible() {
  if (!inference_program_) {
    LOG(FATAL) << "Inference program version check failed because the program "
                  "does not exist.";
    return false;
  }
  bool res = true;
  op_compatible_map_.ReadFromProto(*inference_program_->OpCompatibleMap());
  const auto &version = framework::DumpVersion(framework::kCurProgramVersion);
  LOG(INFO) << "MODEL VERSION: "
            << framework::DumpVersion(inference_program_->Version());
  LOG(INFO) << "PREDICTOR VERSION: " << version;
  std::set<std::string> op_types;
  for (size_t i = 0; i < inference_program_->Size(); ++i) {
    const auto &block = inference_program_->Block(i);
    for (const auto *op : block.AllOps()) {
      op_types.insert(op->Type());
    }
  }
  for (const auto type : op_types) {
    auto compatible_type =
        op_compatible_map_.IsRequireMiniVersion(type, version);
    if (compatible_type != framework::OpCompatibleType::compatible) {
861 862 863 864
      if (!framework::kCurProgramVersion) {
        LOG(WARNING) << " - Version incompatible ("
                     << static_cast<int>(compatible_type) << ") " << type;
      }
865 866 867 868 869 870
      res = false;
    }
  }
  return res;
}

871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
910
template <>
911 912 913 914
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
915 916
}

917
}  // namespace paddle
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
940
USE_TRT_CONVERTER(split);
941 942
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
943
USE_TRT_CONVERTER(leaky_relu);
944 945
USE_TRT_CONVERTER(shuffle_channel);
USE_TRT_CONVERTER(swish);
946
#endif
947

N
nhzlx 已提交
948
#if PADDLE_WITH_ANAKIN
949
USE_ANAKIN_CONVERTER(mul);
950 951
USE_ANAKIN_CONVERTER(fc);
USE_ANAKIN_CONVERTER(conv2d);
952
USE_ANAKIN_CONVERTER(conv2d_fusion);
953 954 955 956 957 958 959
USE_ANAKIN_CONVERTER(concat);
USE_ANAKIN_CONVERTER(split);
USE_ANAKIN_CONVERTER(relu);
USE_ANAKIN_CONVERTER(sigmoid);
USE_ANAKIN_CONVERTER(tanh);
USE_ANAKIN_CONVERTER(pool2d);
USE_ANAKIN_CONVERTER(elementwise_add);
960
USE_ANAKIN_CONVERTER(elementwise_mul);
961 962 963 964 965 966 967
USE_ANAKIN_CONVERTER(batch_norm);
USE_ANAKIN_CONVERTER(flatten);
USE_ANAKIN_CONVERTER(reshape);
USE_ANAKIN_CONVERTER(transpose);
USE_ANAKIN_CONVERTER(softmax);
USE_ANAKIN_CONVERTER(detection_out);
USE_ANAKIN_CONVERTER(density_prior_box);
968 969
USE_ANAKIN_CONVERTER(dropout);
USE_ANAKIN_CONVERTER(sum);
N
nhzlx 已提交
970
USE_ANAKIN_CONVERTER(prior_box);
971 972 973 974 975
USE_ANAKIN_CONVERTER(leaky_relu);
USE_ANAKIN_CONVERTER(affine_channel);
USE_ANAKIN_CONVERTER(relu6);
USE_ANAKIN_CONVERTER(swish);
USE_ANAKIN_CONVERTER(shuffle_channel);
N
nhzlx 已提交
976
#endif