analysis_predictor.cc 34.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <string>
21
#include <utility>
22
#include <vector>
23
#include "paddle/fluid/framework/feed_fetch_method.h"
24
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
26
#include "paddle/fluid/framework/ir/pass.h"
27
#include "paddle/fluid/framework/naive_executor.h"
28
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
29
#include "paddle/fluid/framework/var_type_traits.h"
30
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
31
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
32
#include "paddle/fluid/inference/api/helper.h"
33
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
34
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
35
#include "paddle/fluid/inference/utils/singleton.h"
36
#include "paddle/fluid/memory/memcpy.h"
37
#include "paddle/fluid/platform/cpu_helper.h"
38
#include "paddle/fluid/platform/gpu_info.h"
39
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
40 41
#include "paddle/fluid/platform/profiler.h"

42 43 44 45
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
46 47
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
48
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
49 50
#endif

N
nhzlx 已提交
51
#if PADDLE_WITH_ANAKIN
52
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
N
nhzlx 已提交
53
#endif
54

T
tensor-tang 已提交
55
DECLARE_bool(profile);
56 57 58

namespace paddle {

N
nhzlx 已提交
59
using inference::Singleton;
N
nhzlx 已提交
60
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
61
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
62 63
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
64
#endif
65

66 67 68 69
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
70 71
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
72 73 74 75 76 77
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
78
bool AnalysisPredictor::Init(
79 80
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
81
  VLOG(3) << "Predictor::init()";
T
tensor-tang 已提交
82 83 84
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";
85 86
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
87 88 89
    platform::EnableProfiler(tracking_device);
  }

90
  // no matter with or without MKLDNN
L
luotao1 已提交
91
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
92

93 94 95 96 97 98 99 100 101 102 103 104 105
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
106
  }
107 108 109 110 111 112 113 114 115

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
116
  if (parent_scope) {
117 118 119
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
120
    scope_ = parent_scope;
121
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
122
  } else {
Z
Zhaolong Xing 已提交
123 124 125 126 127
    if (config_.use_gpu_) {
      paddle::framework::InitDevices(false, {config_.device_id_});
    } else {
      paddle::framework::InitDevices(false, {});
    }
Y
Yan Chunwei 已提交
128
    scope_.reset(new paddle::framework::Scope());
129
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
130
  }
131 132 133 134 135
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
136 137
  if (!program) {
    if (!LoadProgramDesc()) return false;
138 139 140 141 142 143 144 145 146
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

147 148 149 150
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
151
  } else {
152 153
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
154 155
    inference_program_ = program;
  }
M
Michal Gallus 已提交
156

157 158 159 160 161
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
162
  if (config_.use_gpu_) {
163
    status_use_gpu_ = true;
164
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
165 166 167 168 169 170 171 172
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
173
                     config_.use_feed_fetch_ops_);
174

175
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
176

177 178 179
  return true;
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
void AnalysisPredictor::MkldnnPreSet(const std::vector<PaddleTensor> &inputs) {
#ifdef PADDLE_WITH_MKLDNN
  VLOG(2) << "AnalysisPredictor::Run get_cur_mkldnn_session_id="
          << platform::get_cur_mkldnn_session_id();
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    VLOG(2) << "In mkldnn cache clear mode.";
    platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_CacheClearing);
    platform::set_cur_input_shape_cache_capacity(
        config_.mkldnn_cache_capacity_);
    // Set current_input_shape for caching dynamic shape.
    std::stringstream ss;
    for (size_t i = 0; i < inputs.size(); ++i) {
      for (size_t j = 0; j < inputs[i].shape.size(); ++j) {
        ss << inputs[i].shape[j] << "-";
      }
    }
    VLOG(2) << "Set input shape=" << ss.str();
    platform::set_cur_input_shape_str(ss.str());
  }
#endif
}

void AnalysisPredictor::MkldnnPostReset() {
#ifdef PADDLE_WITH_MKLDNN
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    paddle::platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_Default);
    platform::set_cur_input_shape_cache_capacity(0);
    platform::set_cur_input_shape_str("");
  }
#endif
}

216 217 218
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
219
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
220 221 222
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPreSet(inputs);
#endif
M
minqiyang 已提交
223
  VLOG(3) << "Predictor::predict";
224 225 226 227
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
228
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
229 230
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
231
    return false;
232
  }
M
Michal Gallus 已提交
233

234 235 236
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
237

238 239 240 241
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
242
  }
Y
Yan Chunwei 已提交
243 244 245 246 247 248

  // Collect variable shapes for memory optimization.
  if (need_collect_var_shapes_for_memory_optim()) {
    CollectVarShapes();
  }

M
minqiyang 已提交
249
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
250

Y
Yan Chunwei 已提交
251 252 253 254 255
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
256 257 258
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
259
  tensor_array_batch_cleaner_.ResetNoTensorVars();
260 261 262 263

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
264 265 266
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPostReset();
#endif
267 268
  return true;
}
269

270 271
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
272
  VLOG(3) << "Predictor::set_feed";
273 274 275 276 277 278 279 280 281 282 283 284 285 286
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
287
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
288
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
289
      input_ptr = input.mutable_data<float>(ddim, place_);
290 291
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
292 293 294 295 296
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

L
liuwei1031 已提交
297 298 299
    PADDLE_ENFORCE_NOT_NULL(input_ptr);
    PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());

300 301 302 303 304 305
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
306 307 308 309
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
310 311 312
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
313
                   inputs[i].data.length(), dev_ctx->stream());
314 315 316 317
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
318 319 320 321 322 323 324
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
325
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
326 327
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
328 329
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
330 331
      }
      idx = feed_names_[name];
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
362
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
363 364 365
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
366 367 368 369 370
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
371
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
372
    if (type == framework::proto::VarType::FP32) {
373 374
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
375
    } else if (type == framework::proto::VarType::INT64) {
376 377
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
378 379 380
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
381
    } else {
382
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
383 384
    }
  }
Y
Yan Chunwei 已提交
385 386
  return true;
}
387

388
void AnalysisPredictor::PrepareArgument() {
389 390
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
391
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
392
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
Y
Yan Chunwei 已提交
393 394 395
  argument_.SetStaticMemoryOptim(config_.static_memory_optim_);
  argument_.SetStaticMemoryOptimForceUpdate(
      config_.static_memory_optim_force_update_);
T
Tao Luo 已提交
396
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
397
  // Analyze inference_program
398 399
  argument_.SetUseAnakin(config_.anakin_engine_enabled());
  argument_.SetPredictorID(predictor_id_);
400
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
401 402
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
403 404
  } else {
    PADDLE_ENFORCE(
405
        !config_.params_file().empty(),
T
Tao Luo 已提交
406
        "Either model_dir or (param_file, prog_file) should be set.");
407
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
408
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
409

410 411
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
412
  }
413

414
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
415
    LOG(INFO) << "TensorRT subgraph engine is enabled";
416 417 418
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
419
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
420
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
421
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
422
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
W
Wojciech Uss 已提交
423
  }
424

425
  if (config_.anakin_engine_enabled()) {
426
    argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_);
427
    argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_);
428
    argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_);
429 430 431 432
    argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_);
    argument_.SetAnakinAutoConfigLayout(config_.anakin_auto_config_layout_);
    argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_);
    argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_);
433 434 435
    LOG(INFO) << "Anakin subgraph engine is enabled";
  }

436
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
437
    LOG(INFO) << "MKLDNN is enabled";
438 439 440
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

441 442 443 444 445 446 447 448 449 450
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

451
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
452 453 454 455
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
456
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
457
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
458
  argument_.SetScopeNotOwned(scope_.get());
459 460 461 462 463
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
464 465 466 467 468
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
469
  inference_program_.reset(
470
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
471 472 473 474
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
475
  LOG(INFO) << "== optimize end ==";
Y
Yan Chunwei 已提交
476
}
477 478

template <>
479 480
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
481
  VLOG(3) << "create AnalysisConfig";
482 483
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
484
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
485
    // 1. GPU memory
486
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
487 488
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
489
    std::vector<std::string> flags;
490 491 492 493 494 495 496 497 498 499 500

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
501 502
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
503
                         std::to_string(fraction_of_gpu_memory);
504
      flags.push_back(flag);
Z
Zhaolong Xing 已提交
505 506
      flags.push_back("--selected_gpus=" +
                      std::to_string(config.gpu_device_id()));
M
minqiyang 已提交
507
      VLOG(3) << "set flag: " << flag;
508 509 510 511 512
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
513 514
  // Each config can only be used for one predictor.
  config.SetInValid();
515 516 517 518 519 520 521
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
522 523
    return nullptr;
  }
524

G
Gabor Buella 已提交
525
  return predictor;
526 527
}

528 529 530 531 532 533 534 535 536 537 538 539
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

540
void AnalysisPredictor::PrepareFeedFetch() {
541 542
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
543 544 545 546 547 548 549 550
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
551
      idx2feeds_[idx] = op->Output("Out")[0];
552 553
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
554 555
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
556
      }
Y
Yan Chunwei 已提交
557
      fetches_[idx] = op;
N
nhzlx 已提交
558
      idx2fetches_[idx] = op->Input("X")[0];
559 560 561 562
    }
  }
}

563 564 565 566 567 568 569 570
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

587 588 589 590 591 592 593
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
594 595 596 597 598 599 600
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

601 602 603 604 605 606 607 608 609 610
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
611 612 613 614 615 616
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
617 618 619 620
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
621
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
622
  executor_->Run();
Y
Yan Chunwei 已提交
623
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
624
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
625
  tensor_array_batch_cleaner_.ResetTensorArray();
626 627 628 629

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
630 631 632 633 634
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
635
  std::string filename;
636 637 638
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
639 640 641
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
642
    filename = config_.prog_file();
643
  } else {
644
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
645 646 647 648
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
649
    LOG(ERROR) << string::Sprintf(
650 651
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
652 653
    return false;
  }
654 655 656

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
657
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
658 659 660
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
661 662
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
663 664 665 666 667 668 669 670
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
671
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
672
  }
673 674 675 676 677 678 679
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
680

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

701
      if (!config_.params_file().empty()) {
702 703 704 705 706 707
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
708
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
709 710 711 712 713
        op->CheckAttrs();
      }
    }
  }

714
  if (!config_.params_file().empty()) {
715 716 717 718 719 720
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
721
    op->SetAttr("file_path", {config_.params_file()});
722 723 724 725
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
726
  framework::NaiveExecutor e(place_);
727 728 729 730
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

731 732
  return true;
}
733

N
nhzlx 已提交
734
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
735 736 737 738 739 740 741 742
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
743
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
744 745 746 747
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
748 749
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
750
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
751
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
752 753
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
754 755 756
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
757

N
nhzlx 已提交
758
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
759 760 761
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
762

N
nhzlx 已提交
763 764 765 766 767
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
768
      std::string calibration_table_data_path =
N
nhzlx 已提交
769 770 771 772
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
773 774 775 776 777

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
778 779 780 781
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
782
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
783 784
  return true;
}
N
nhzlx 已提交
785
#endif
N
nhzlx 已提交
786

787
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
788
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
789
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
790 791
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
792 793
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
794
#endif
795 796 797 798 799 800 801
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
802

803 804 805 806 807 808 809
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif

Y
Yan Chunwei 已提交
810 811 812 813 814 815
  // TODO(Superjomn) deduce the directory path.
  std::string out_path = inference::analysis::GetMemoryCachePath(
      config_.model_dir(), config_.prog_file());
  if (need_collect_var_shapes_for_memory_optim()) {
    SerializeBatchVarShapes(out_path);
  }
816 817
}

818
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
819
  std::lock_guard<std::mutex> lk(clone_mutex_);
820 821 822 823 824
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

Y
Yan Chunwei 已提交
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
void AnalysisPredictor::CollectVarShapes() {
  VLOG(4) << "Collecting var shapes";
  if (batch_var_shapes_.size() >= max_shape_collect_count_) return;
  std::map<std::string, std::vector<int>> var_shapes;
  for (auto var_name : inference_program_->Block(0).LocalVarNames()) {
    auto *var = sub_scope_->FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->Type() == framework::VarTypeTrait<framework::LoDTensor>::kId ||
        var->Type() == framework::VarTypeTrait<framework::Tensor>::kId) {
      auto &tensor = var->Get<framework::LoDTensor>();
      auto shape = framework::vectorize(tensor.dims());
      var_shapes[var_name].assign(shape.begin(), shape.end());
    }
  }
  batch_var_shapes_.push_back(var_shapes);
  LOG_FIRST_N(INFO, 1) << "Collected " << batch_var_shapes_.size()
                       << " batch of var shapes for analysis";
}

void AnalysisPredictor::SerializeBatchVarShapes(const std::string &path) {
  LOG(INFO) << "serialize batch var shapes to " << path;
  std::ofstream file(path);
  if (!file.is_open()) {
    LOG(ERROR) << "failed to serialize the var shapes to " << path;
    return;
  }

  // The sirialized data format:
  // <tensor_name>:dim0,dim1,dim2,;
  for (auto &batch : batch_var_shapes_) {
    for (auto &ele : batch) {
      file << ele.first << ":";
      for (size_t i = 0; i < ele.second.size() - 1; i++) {
        file << ele.second[i] << ",";
      }
      file << ele.second.back() << ";";
    }
    file << "\n";
  }
}

bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() {
  if (need_collect_var_shapes_ >= 0) return need_collect_var_shapes_;
  bool need = false;
  // check if the cache exists
  if (!config_.enable_memory_optim()) {
    need = false;
Y
Yan Chunwei 已提交
872
  } else if (config_.static_memory_optim_ &&
Y
Yan Chunwei 已提交
873 874 875
             !inference::IsFileExists(inference::analysis::GetMemoryCachePath(
                 config_.model_dir(), config_.prog_file()))) {
    need = true;
Y
Yan Chunwei 已提交
876 877
  } else if (config_.static_memory_optim_ &&
             config_.static_memory_optim_force_update_) {
Y
Yan Chunwei 已提交
878 879 880 881 882 883 884
    need = true;
  }

  need_collect_var_shapes_ = need ? 1 : 0;
  return need;
}

885
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
886 887 888
  return inference_program_->Proto()->SerializeAsString();
}

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
928
template <>
929 930 931 932
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
933 934
}

935
}  // namespace paddle
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
958
USE_TRT_CONVERTER(split);
959 960
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
961
USE_TRT_CONVERTER(leaky_relu);
962 963
USE_TRT_CONVERTER(shuffle_channel);
USE_TRT_CONVERTER(swish);
964
#endif
965

N
nhzlx 已提交
966
#if PADDLE_WITH_ANAKIN
967
USE_ANAKIN_CONVERTER(mul);
968 969
USE_ANAKIN_CONVERTER(fc);
USE_ANAKIN_CONVERTER(conv2d);
970
USE_ANAKIN_CONVERTER(conv2d_fusion);
971 972 973 974 975 976 977
USE_ANAKIN_CONVERTER(concat);
USE_ANAKIN_CONVERTER(split);
USE_ANAKIN_CONVERTER(relu);
USE_ANAKIN_CONVERTER(sigmoid);
USE_ANAKIN_CONVERTER(tanh);
USE_ANAKIN_CONVERTER(pool2d);
USE_ANAKIN_CONVERTER(elementwise_add);
978
USE_ANAKIN_CONVERTER(elementwise_mul);
979 980 981 982 983 984 985
USE_ANAKIN_CONVERTER(batch_norm);
USE_ANAKIN_CONVERTER(flatten);
USE_ANAKIN_CONVERTER(reshape);
USE_ANAKIN_CONVERTER(transpose);
USE_ANAKIN_CONVERTER(softmax);
USE_ANAKIN_CONVERTER(detection_out);
USE_ANAKIN_CONVERTER(density_prior_box);
986 987
USE_ANAKIN_CONVERTER(dropout);
USE_ANAKIN_CONVERTER(sum);
N
nhzlx 已提交
988
USE_ANAKIN_CONVERTER(prior_box);
989 990 991 992 993
USE_ANAKIN_CONVERTER(leaky_relu);
USE_ANAKIN_CONVERTER(affine_channel);
USE_ANAKIN_CONVERTER(relu6);
USE_ANAKIN_CONVERTER(swish);
USE_ANAKIN_CONVERTER(shuffle_channel);
N
nhzlx 已提交
994
#endif