analysis_predictor.cc 32.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <string>
21
#include <utility>
22
#include <vector>
23
#include "paddle/fluid/framework/feed_fetch_method.h"
24
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
26
#include "paddle/fluid/framework/ir/pass.h"
27
#include "paddle/fluid/framework/naive_executor.h"
28
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
29
#include "paddle/fluid/framework/var_type_traits.h"
30
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
31
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
32
#include "paddle/fluid/inference/api/helper.h"
33
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
34
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
35
#include "paddle/fluid/inference/utils/singleton.h"
36
#include "paddle/fluid/memory/memcpy.h"
37
#include "paddle/fluid/platform/cpu_helper.h"
38
#include "paddle/fluid/platform/gpu_info.h"
39
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
40 41
#include "paddle/fluid/platform/profiler.h"

42 43 44 45
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
46 47
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
48
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
49 50
#endif

N
nhzlx 已提交
51
#if PADDLE_WITH_ANAKIN
52
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
N
nhzlx 已提交
53
#endif
54

T
tensor-tang 已提交
55
DECLARE_bool(profile);
56 57 58

namespace paddle {

N
nhzlx 已提交
59
using inference::Singleton;
N
nhzlx 已提交
60
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
61
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
62 63
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
64
#endif
65

66 67 68 69
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
70 71
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
72 73 74 75 76 77
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
78
bool AnalysisPredictor::Init(
79 80
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
81
  VLOG(3) << "Predictor::init()";
T
tensor-tang 已提交
82 83 84
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";
85 86
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
87 88 89
    platform::EnableProfiler(tracking_device);
  }

90
  // no matter with or without MKLDNN
L
luotao1 已提交
91
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
92

93 94 95 96 97 98 99 100 101 102 103 104 105
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
106
  }
107 108 109 110 111 112 113 114 115

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
116
  if (parent_scope) {
117 118 119
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
120
    scope_ = parent_scope;
121
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
122
  } else {
Z
Zhaolong Xing 已提交
123 124 125 126 127
    if (config_.use_gpu_) {
      paddle::framework::InitDevices(false, {config_.device_id_});
    } else {
      paddle::framework::InitDevices(false, {});
    }
Y
Yan Chunwei 已提交
128
    scope_.reset(new paddle::framework::Scope());
129
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
130
  }
131 132 133 134 135
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
136 137
  if (!program) {
    if (!LoadProgramDesc()) return false;
138

139 140 141 142 143 144 145 146 147
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

148 149 150
    // Optimize the program, and load parameters and modify them in the
    // scope_.
    // This will change the scope_ address.
151
    if (config_.ir_optim()) {
152 153 154 155 156 157 158
      status_ir_optim_enabled_ = true;
      OptimizeInferenceProgram();
    } else {
      // Load parameters
      LOG(INFO) << "load parameters ";
      LoadParameters();
    }
Y
Yan Chunwei 已提交
159
  } else {
160 161
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
162 163
    inference_program_ = program;
  }
M
Michal Gallus 已提交
164

165 166 167 168 169
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
170
  if (config_.use_gpu_) {
171
    status_use_gpu_ = true;
172
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
173 174 175 176 177 178 179 180
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
181
                     config_.use_feed_fetch_ops_);
182

183
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
184

185 186 187
  return true;
}

L
luotao1 已提交
188
void AnalysisPredictor::SetMkldnnThreadID(int tid) {
L
luotao1 已提交
189 190 191 192 193 194 195
#ifdef PADDLE_WITH_MKLDNN
  platform::set_cur_thread_id(tid);
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
#endif
}

196 197 198
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
199
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
M
minqiyang 已提交
200
  VLOG(3) << "Predictor::predict";
201 202 203 204
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
205
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
206 207
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
208
    return false;
209
  }
M
Michal Gallus 已提交
210

211 212 213
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
214

215 216 217 218
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
219
  }
Y
Yan Chunwei 已提交
220 221 222 223 224 225

  // Collect variable shapes for memory optimization.
  if (need_collect_var_shapes_for_memory_optim()) {
    CollectVarShapes();
  }

M
minqiyang 已提交
226
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
227

Y
Yan Chunwei 已提交
228 229 230 231 232
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
233 234 235
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
236
  tensor_array_batch_cleaner_.ResetNoTensorVars();
237 238
  return true;
}
239

240 241
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
242
  VLOG(3) << "Predictor::set_feed";
243 244 245 246 247 248 249 250 251 252 253 254 255 256
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
257
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
258
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
259
      input_ptr = input.mutable_data<float>(ddim, place_);
260 261
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
262 263 264 265 266
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

L
liuwei1031 已提交
267 268 269
    PADDLE_ENFORCE_NOT_NULL(input_ptr);
    PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());

270 271 272 273 274 275
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
276 277 278 279
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
280 281 282
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
283
                   inputs[i].data.length(), dev_ctx->stream());
284 285 286 287
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
288 289 290 291 292 293 294
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
295
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
296 297
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
298 299
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
300 301
      }
      idx = feed_names_[name];
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
332
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
333 334 335
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
336 337 338 339 340
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
341
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
342
    if (type == framework::proto::VarType::FP32) {
343 344
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
345
    } else if (type == framework::proto::VarType::INT64) {
346 347
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
348 349 350
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
351
    } else {
352
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
353 354
    }
  }
Y
Yan Chunwei 已提交
355 356
  return true;
}
357

358
void AnalysisPredictor::PrepareArgument() {
359 360
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
Y
Yan Chunwei 已提交
361
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
Y
Yan Chunwei 已提交
362 363 364
  argument_.SetStaticMemoryOptim(config_.static_memory_optim_);
  argument_.SetStaticMemoryOptimForceUpdate(
      config_.static_memory_optim_force_update_);
T
Tao Luo 已提交
365
  argument_.SetModelFromMemory(config_.model_from_memory_);
366
  argument_.SetEngineOptInfo(config_.engine_opt_info_);
Y
Yan Chunwei 已提交
367
  // Analyze inference_program
368 369
  argument_.SetUseAnakin(config_.anakin_engine_enabled());
  argument_.SetPredictorID(predictor_id_);
370 371
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
372 373
  } else {
    PADDLE_ENFORCE(
374
        !config_.params_file().empty(),
T
Tao Luo 已提交
375
        "Either model_dir or (param_file, prog_file) should be set.");
376
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
377
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
378

379 380
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
381
  }
382

383
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
384
    LOG(INFO) << "TensorRT subgraph engine is enabled";
385 386 387
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
388
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
389
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
390
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
391
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
W
Wojciech Uss 已提交
392
  }
393

394
  if (config_.anakin_engine_enabled()) {
395
    argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_);
396
    argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_);
397
    argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_);
398 399 400 401
    argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_);
    argument_.SetAnakinAutoConfigLayout(config_.anakin_auto_config_layout_);
    argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_);
    argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_);
402 403 404
    LOG(INFO) << "Anakin subgraph engine is enabled";
  }

405
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
406
    LOG(INFO) << "MKLDNN is enabled";
407 408 409
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

410 411 412 413 414 415 416 417 418 419
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

420
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
421 422 423 424
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
425
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
426
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
427
  argument_.SetScopeNotOwned(scope_.get());
428 429 430 431 432 433 434
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  status_program_optimized_ = true;

  PrepareArgument();
435 436 437 438 439
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
440
  inference_program_.reset(
441
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
442
  LOG(INFO) << "== optimize end ==";
Y
Yan Chunwei 已提交
443
}
444 445

template <>
446 447
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
448
  VLOG(3) << "create AnalysisConfig";
449
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
450
    // 1. GPU memory
451
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
452 453
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
454
    std::vector<std::string> flags;
455 456 457 458 459 460 461 462 463 464 465

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
466 467
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
468
                         std::to_string(fraction_of_gpu_memory);
469
      flags.push_back(flag);
Z
Zhaolong Xing 已提交
470 471
      flags.push_back("--selected_gpus=" +
                      std::to_string(config.gpu_device_id()));
M
minqiyang 已提交
472
      VLOG(3) << "set flag: " << flag;
473 474 475 476 477
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
478 479 480 481 482 483 484
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
485 486
    return nullptr;
  }
487

G
Gabor Buella 已提交
488
  return predictor;
489 490
}

491 492 493 494 495 496 497 498 499 500 501 502
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

503
void AnalysisPredictor::PrepareFeedFetch() {
504 505
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
506 507 508 509 510 511 512 513
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
514
      idx2feeds_[idx] = op->Output("Out")[0];
515 516
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
517 518
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
519
      }
Y
Yan Chunwei 已提交
520
      fetches_[idx] = op;
N
nhzlx 已提交
521
      idx2fetches_[idx] = op->Input("X")[0];
522 523 524 525
    }
  }
}

526 527 528 529 530 531 532 533
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

550 551 552 553 554 555 556
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
557 558 559 560 561 562 563
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

564 565 566 567 568 569 570 571 572 573
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
574 575 576 577 578 579
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
580 581 582 583
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
584
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
585
  executor_->Run();
Y
Yan Chunwei 已提交
586
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
587
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
588
  tensor_array_batch_cleaner_.ResetTensorArray();
589 590 591 592 593
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
594
  std::string filename;
595 596 597
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
598 599 600
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
601
    filename = config_.prog_file();
602
  } else {
603
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
604 605 606 607
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
608
    LOG(ERROR) << string::Sprintf(
609 610
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
611 612
    return false;
  }
613 614 615

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
616
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
617 618 619
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
620 621
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
622 623 624 625 626 627 628 629
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
630
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
631
  }
632 633 634 635 636 637 638
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
639

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

660
      if (!config_.params_file().empty()) {
661 662 663 664 665 666
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
667
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
668 669 670 671 672
        op->CheckAttrs();
      }
    }
  }

673
  if (!config_.params_file().empty()) {
674 675 676 677 678 679
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
680
    op->SetAttr("file_path", {config_.params_file()});
681 682 683 684
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
685
  framework::NaiveExecutor e(place_);
686 687 688 689
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

690 691
  return true;
}
692

N
nhzlx 已提交
693
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
694 695 696 697 698 699 700 701
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
702
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
703 704 705 706
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
707 708
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
709
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
710
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
711 712
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
713 714 715
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
716

N
nhzlx 已提交
717
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
718 719 720
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
721

N
nhzlx 已提交
722 723 724 725 726
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
727
      std::string calibration_table_data_path =
N
nhzlx 已提交
728 729 730 731
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
732 733 734 735 736

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
737 738 739 740
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
741
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
742 743
  return true;
}
N
nhzlx 已提交
744
#endif
N
nhzlx 已提交
745

746
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
747
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
748
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
749 750
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
751 752
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
753
#endif
754 755 756 757 758 759 760
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
761

762 763 764 765 766 767 768
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif

Y
Yan Chunwei 已提交
769 770 771 772 773 774
  // TODO(Superjomn) deduce the directory path.
  std::string out_path = inference::analysis::GetMemoryCachePath(
      config_.model_dir(), config_.prog_file());
  if (need_collect_var_shapes_for_memory_optim()) {
    SerializeBatchVarShapes(out_path);
  }
775 776
}

777
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
778
  std::lock_guard<std::mutex> lk(clone_mutex_);
779 780 781 782 783
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

Y
Yan Chunwei 已提交
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
void AnalysisPredictor::CollectVarShapes() {
  VLOG(4) << "Collecting var shapes";
  if (batch_var_shapes_.size() >= max_shape_collect_count_) return;
  std::map<std::string, std::vector<int>> var_shapes;
  for (auto var_name : inference_program_->Block(0).LocalVarNames()) {
    auto *var = sub_scope_->FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->Type() == framework::VarTypeTrait<framework::LoDTensor>::kId ||
        var->Type() == framework::VarTypeTrait<framework::Tensor>::kId) {
      auto &tensor = var->Get<framework::LoDTensor>();
      auto shape = framework::vectorize(tensor.dims());
      var_shapes[var_name].assign(shape.begin(), shape.end());
    }
  }
  batch_var_shapes_.push_back(var_shapes);
  LOG_FIRST_N(INFO, 1) << "Collected " << batch_var_shapes_.size()
                       << " batch of var shapes for analysis";
}

void AnalysisPredictor::SerializeBatchVarShapes(const std::string &path) {
  LOG(INFO) << "serialize batch var shapes to " << path;
  std::ofstream file(path);
  if (!file.is_open()) {
    LOG(ERROR) << "failed to serialize the var shapes to " << path;
    return;
  }

  // The sirialized data format:
  // <tensor_name>:dim0,dim1,dim2,;
  for (auto &batch : batch_var_shapes_) {
    for (auto &ele : batch) {
      file << ele.first << ":";
      for (size_t i = 0; i < ele.second.size() - 1; i++) {
        file << ele.second[i] << ",";
      }
      file << ele.second.back() << ";";
    }
    file << "\n";
  }
}

bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() {
  if (need_collect_var_shapes_ >= 0) return need_collect_var_shapes_;
  bool need = false;
  // check if the cache exists
  if (!config_.enable_memory_optim()) {
    need = false;
Y
Yan Chunwei 已提交
831
  } else if (config_.static_memory_optim_ &&
Y
Yan Chunwei 已提交
832 833 834
             !inference::IsFileExists(inference::analysis::GetMemoryCachePath(
                 config_.model_dir(), config_.prog_file()))) {
    need = true;
Y
Yan Chunwei 已提交
835 836
  } else if (config_.static_memory_optim_ &&
             config_.static_memory_optim_force_update_) {
Y
Yan Chunwei 已提交
837 838 839 840 841 842 843
    need = true;
  }

  need_collect_var_shapes_ = need ? 1 : 0;
  return need;
}

844
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
845 846 847
  return inference_program_->Proto()->SerializeAsString();
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
887
template <>
888 889 890 891
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
892 893
}

894
}  // namespace paddle
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
917
USE_TRT_CONVERTER(split);
918 919
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
920
USE_TRT_CONVERTER(leaky_relu);
921
#endif
922

N
nhzlx 已提交
923
#if PADDLE_WITH_ANAKIN
924
USE_ANAKIN_CONVERTER(mul);
925 926
USE_ANAKIN_CONVERTER(fc);
USE_ANAKIN_CONVERTER(conv2d);
927
USE_ANAKIN_CONVERTER(conv2d_fusion);
928 929 930 931 932 933 934
USE_ANAKIN_CONVERTER(concat);
USE_ANAKIN_CONVERTER(split);
USE_ANAKIN_CONVERTER(relu);
USE_ANAKIN_CONVERTER(sigmoid);
USE_ANAKIN_CONVERTER(tanh);
USE_ANAKIN_CONVERTER(pool2d);
USE_ANAKIN_CONVERTER(elementwise_add);
935
USE_ANAKIN_CONVERTER(elementwise_mul);
936 937 938 939 940 941 942
USE_ANAKIN_CONVERTER(batch_norm);
USE_ANAKIN_CONVERTER(flatten);
USE_ANAKIN_CONVERTER(reshape);
USE_ANAKIN_CONVERTER(transpose);
USE_ANAKIN_CONVERTER(softmax);
USE_ANAKIN_CONVERTER(detection_out);
USE_ANAKIN_CONVERTER(density_prior_box);
943 944
USE_ANAKIN_CONVERTER(dropout);
USE_ANAKIN_CONVERTER(sum);
N
nhzlx 已提交
945
USE_ANAKIN_CONVERTER(prior_box);
946 947 948 949 950
USE_ANAKIN_CONVERTER(leaky_relu);
USE_ANAKIN_CONVERTER(affine_channel);
USE_ANAKIN_CONVERTER(relu6);
USE_ANAKIN_CONVERTER(swish);
USE_ANAKIN_CONVERTER(shuffle_channel);
N
nhzlx 已提交
951
#endif