analysis_predictor.cc 32.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <set>
21
#include <string>
22
#include <utility>
23
#include <vector>
24
#include "paddle/fluid/framework/feed_fetch_method.h"
25
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
27
#include "paddle/fluid/framework/ir/pass.h"
28
#include "paddle/fluid/framework/naive_executor.h"
29
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
30
#include "paddle/fluid/framework/var_type_traits.h"
31
#include "paddle/fluid/framework/version.h"
32
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
33
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
34
#include "paddle/fluid/inference/api/helper.h"
35
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
36
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
37
#include "paddle/fluid/inference/utils/singleton.h"
38
#include "paddle/fluid/memory/memcpy.h"
39
#include "paddle/fluid/platform/cpu_helper.h"
40
#include "paddle/fluid/platform/gpu_info.h"
41
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
42 43
#include "paddle/fluid/platform/profiler.h"

44 45 46 47
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
48 49
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
50
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
51 52
#endif

53 54
namespace paddle {

N
nhzlx 已提交
55
using inference::Singleton;
N
nhzlx 已提交
56
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
57
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
58 59
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
60
#endif
61

62 63 64 65
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
66 67
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
68 69 70 71 72 73
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
74
bool AnalysisPredictor::Init(
75 76
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
77
  VLOG(3) << "Predictor::init()";
78 79
  if (config_.with_profile_) {
    LOG(WARNING) << "Profiler is activated, which might affect the performance";
80 81
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
82
    platform::EnableProfiler(tracking_device);
83 84 85
  } else {
    LOG(INFO) << "Profiler is deactivated, and no profiling report will be "
                 "generated.";
T
tensor-tang 已提交
86 87
  }

88
  // no matter with or without MKLDNN
L
luotao1 已提交
89
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
90

91 92 93 94 95 96 97 98 99 100 101 102 103
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
104
  }
105 106 107 108 109 110 111 112 113

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
114
  if (parent_scope) {
115 116 117
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
118
    scope_ = parent_scope;
119
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
120
  } else {
121
    paddle::framework::InitDevices(false);
Y
Yan Chunwei 已提交
122
    scope_.reset(new paddle::framework::Scope());
123
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
124
  }
125 126 127 128 129
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
130 131
  if (!program) {
    if (!LoadProgramDesc()) return false;
132 133 134 135 136 137 138
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
139 140
    if (!CheckOperatorCompatible()) {
      LOG(WARNING) << "WARNING: Results may be DIFF! "
141 142
                      "Please use the corresponding version of the model and "
                      "prediction library, and do not use the develop branch.";
143
    }
144 145
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

146 147 148 149
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
150
  } else {
151 152
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
153 154
    inference_program_ = program;
  }
M
Michal Gallus 已提交
155

156 157 158 159 160
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
161
  if (config_.use_gpu_) {
162
    status_use_gpu_ = true;
163
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
164 165 166 167 168 169 170 171
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
172
                     config_.use_feed_fetch_ops_);
173

174
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
175

176 177 178
  return true;
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
void AnalysisPredictor::MkldnnPreSet(const std::vector<PaddleTensor> &inputs) {
#ifdef PADDLE_WITH_MKLDNN
  VLOG(2) << "AnalysisPredictor::Run get_cur_mkldnn_session_id="
          << platform::get_cur_mkldnn_session_id();
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    VLOG(2) << "In mkldnn cache clear mode.";
    platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_CacheClearing);
    platform::set_cur_input_shape_cache_capacity(
        config_.mkldnn_cache_capacity_);
    // Set current_input_shape for caching dynamic shape.
    std::stringstream ss;
    for (size_t i = 0; i < inputs.size(); ++i) {
      for (size_t j = 0; j < inputs[i].shape.size(); ++j) {
        ss << inputs[i].shape[j] << "-";
      }
    }
    VLOG(2) << "Set input shape=" << ss.str();
    platform::set_cur_input_shape_str(ss.str());
  }
#endif
}

void AnalysisPredictor::MkldnnPostReset() {
#ifdef PADDLE_WITH_MKLDNN
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    paddle::platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_Default);
    platform::set_cur_input_shape_cache_capacity(0);
    platform::set_cur_input_shape_str("");
  }
#endif
}

215 216 217
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
218
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
219 220 221
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPreSet(inputs);
#endif
M
minqiyang 已提交
222
  VLOG(3) << "Predictor::predict";
223 224 225 226
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
227
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
228 229
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
230
    return false;
231
  }
M
Michal Gallus 已提交
232

233 234 235
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
236

237 238 239 240
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
241
  }
Y
Yan Chunwei 已提交
242

M
minqiyang 已提交
243
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
244

Y
Yan Chunwei 已提交
245 246 247 248 249
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
250 251 252
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
253
  tensor_array_batch_cleaner_.ResetNoTensorVars();
254 255 256 257

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
258 259 260
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPostReset();
#endif
261 262
  return true;
}
263

264 265
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
266
  VLOG(3) << "Predictor::set_feed";
267 268 269 270 271 272 273 274 275 276 277 278 279 280
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
281
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
282
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
283
      input_ptr = input.mutable_data<float>(ddim, place_);
284 285
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
286 287 288 289 290
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

L
liuwei1031 已提交
291 292 293
    PADDLE_ENFORCE_NOT_NULL(input_ptr);
    PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());

294 295 296 297 298 299
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
300 301 302 303
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
304 305 306
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
307
                   inputs[i].data.length(), dev_ctx->stream());
308 309 310 311
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
312 313 314 315 316 317 318
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
319
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
320 321
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
322 323
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
324 325
      }
      idx = feed_names_[name];
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
356
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
357 358 359
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
360 361 362 363 364
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
365
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
366
    if (type == framework::proto::VarType::FP32) {
367 368
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
369
    } else if (type == framework::proto::VarType::INT64) {
370 371
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
372 373 374
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
375
    } else {
376
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
377 378
    }
  }
Y
Yan Chunwei 已提交
379 380
  return true;
}
381

382
void AnalysisPredictor::PrepareArgument() {
383
  argument_.SetUseGPU(config_.use_gpu());
384
  argument_.SetUseFcPadding(config_.use_fc_padding());
385
  argument_.SetGPUDeviceId(config_.gpu_device_id());
386
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
387
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
T
Tao Luo 已提交
388
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
389
  // Analyze inference_program
390
  argument_.SetPredictorID(predictor_id_);
391
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
392 393
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
394 395
  } else {
    PADDLE_ENFORCE(
396
        !config_.params_file().empty(),
T
Tao Luo 已提交
397
        "Either model_dir or (param_file, prog_file) should be set.");
398
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
399
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
400

401 402
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
403
  }
404

405
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
406
    LOG(INFO) << "TensorRT subgraph engine is enabled";
407 408 409
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
410
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
411
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
412
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
413
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
W
Wojciech Uss 已提交
414
  }
415

石晓伟 已提交
416 417 418 419 420 421 422
  if (config_.lite_engine_enabled()) {
    argument_.SetLitePrecisionMode(config_.lite_precision_mode_);
    argument_.SetLitePassesFilter(config_.lite_passes_filter_);
    argument_.SetLiteOpsFilter(config_.lite_ops_filter_);
    LOG(INFO) << "Lite subgraph engine is enabled";
  }

423
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
424
    LOG(INFO) << "MKLDNN is enabled";
425 426 427
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

428 429 430 431 432 433 434 435 436 437
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

438
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
439 440 441 442
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
443
  argument_.SetDisableLogs(config_.glog_info_disabled());
444
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
445
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
446
  argument_.SetScopeNotOwned(scope_.get());
447 448 449 450 451
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
452 453 454 455 456
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
457
  inference_program_.reset(
458
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
459 460 461 462
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
463
  LOG(INFO) << "======= optimize end =======";
Y
Yan Chunwei 已提交
464
}
465 466

template <>
467 468
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
P
Pei Yang 已提交
469 470 471 472
  if (config.glog_info_disabled()) {
    FLAGS_logtostderr = 1;
    FLAGS_minloglevel = 2;  // GLOG_ERROR
  }
M
minqiyang 已提交
473
  VLOG(3) << "create AnalysisConfig";
474 475
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
476
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
477
    // 1. GPU memory
478
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
479 480
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
481
    std::vector<std::string> flags;
482 483 484 485 486 487 488 489 490 491 492

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
493
      flags.push_back("dummy");
494
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
495
                         std::to_string(fraction_of_gpu_memory);
496
      flags.push_back(flag);
L
Lv Mengsi 已提交
497
      flags.push_back("--cudnn_deterministic=True");
M
minqiyang 已提交
498
      VLOG(3) << "set flag: " << flag;
499 500 501 502 503
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
504 505
  // Each config can only be used for one predictor.
  config.SetInValid();
506 507 508 509 510 511 512
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
513 514
    return nullptr;
  }
515

G
Gabor Buella 已提交
516
  return predictor;
517 518
}

519 520 521 522 523 524 525 526 527 528 529 530
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

531
void AnalysisPredictor::PrepareFeedFetch() {
532 533
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
534 535 536 537 538 539 540 541
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
542
      idx2feeds_[idx] = op->Output("Out")[0];
543 544
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
545 546
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
547
      }
Y
Yan Chunwei 已提交
548
      fetches_[idx] = op;
N
nhzlx 已提交
549
      idx2fetches_[idx] = op->Input("X")[0];
550 551 552 553
    }
  }
}

554 555 556 557 558 559 560 561
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
562 563 564 565 566 567 568 569
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

570 571 572 573 574 575 576 577 578 579 580 581
std::map<std::string, std::vector<int64_t>>
AnalysisPredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  std::vector<std::string> names = GetInputNames();
  for (std::string name : names) {
    auto *var = inference_program_->Block(0).FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var, "input %s does not exist.", name);
    input_shapes[name] = var->GetShape();
  }
  return input_shapes;
}

N
nhzlx 已提交
582 583 584 585 586 587 588 589
std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

590 591 592 593 594 595 596
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
597 598 599 600 601 602 603
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

604 605 606 607 608 609 610 611 612 613
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
614 615 616 617 618 619
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
620 621 622 623
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
624
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
625
  executor_->Run();
Y
Yan Chunwei 已提交
626
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
627
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
628
  tensor_array_batch_cleaner_.ResetTensorArray();
629 630 631 632

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
633 634 635 636 637
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
638
  std::string filename;
639 640 641
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
642 643 644
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
645
    filename = config_.prog_file();
646
  } else {
647
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
648 649 650 651
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
652
    LOG(ERROR) << string::Sprintf(
653 654
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
655 656
    return false;
  }
657 658 659

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
660
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
661 662 663
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
664 665
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
666 667 668 669 670 671 672 673
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
674
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
675
  }
676 677 678 679 680 681 682
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
683

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

704
      if (!config_.params_file().empty()) {
705 706 707 708 709 710
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
711
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
712 713 714 715 716
        op->CheckAttrs();
      }
    }
  }

717
  if (!config_.params_file().empty()) {
718 719 720 721 722 723
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
724
    op->SetAttr("file_path", {config_.params_file()});
725 726 727 728
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
729
  framework::NaiveExecutor e(place_);
730 731 732 733
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

734 735
  return true;
}
736

N
nhzlx 已提交
737
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
738 739 740 741 742 743 744 745
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
746
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
747 748 749 750
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
751 752
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
753
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
754
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
755 756
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
757 758 759
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
760

N
nhzlx 已提交
761
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
762 763 764
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
765

N
nhzlx 已提交
766 767 768 769 770
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
771
      std::string calibration_table_data_path =
N
nhzlx 已提交
772 773 774 775
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
776 777 778 779 780

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
781 782 783 784
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
785
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
786 787
  return true;
}
N
nhzlx 已提交
788
#endif
N
nhzlx 已提交
789

790
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
791
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
792
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
793 794
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
795 796
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
797
#endif
798
  if (config_.with_profile_) {
799 800 801 802 803 804
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
805

806 807 808 809 810 811
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif
812 813
}

814
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
815
  std::lock_guard<std::mutex> lk(clone_mutex_);
816 817 818 819 820
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

821
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
822 823 824
  return inference_program_->Proto()->SerializeAsString();
}

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
bool AnalysisPredictor::CheckOperatorCompatible() {
  if (!inference_program_) {
    LOG(FATAL) << "Inference program version check failed because the program "
                  "does not exist.";
    return false;
  }
  bool res = true;
  op_compatible_map_.ReadFromProto(*inference_program_->OpCompatibleMap());
  const auto &version = framework::DumpVersion(framework::kCurProgramVersion);
  LOG(INFO) << "MODEL VERSION: "
            << framework::DumpVersion(inference_program_->Version());
  LOG(INFO) << "PREDICTOR VERSION: " << version;
  std::set<std::string> op_types;
  for (size_t i = 0; i < inference_program_->Size(); ++i) {
    const auto &block = inference_program_->Block(i);
    for (const auto *op : block.AllOps()) {
      op_types.insert(op->Type());
    }
  }
  for (const auto type : op_types) {
    auto compatible_type =
        op_compatible_map_.IsRequireMiniVersion(type, version);
    if (compatible_type != framework::OpCompatibleType::compatible) {
848 849 850 851
      if (!framework::kCurProgramVersion) {
        LOG(WARNING) << " - Version incompatible ("
                     << static_cast<int>(compatible_type) << ") " << type;
      }
852 853 854 855 856 857
      res = false;
    }
  }
  return res;
}

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
897
template <>
898 899 900 901
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
902 903
}

904
}  // namespace paddle
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
927
USE_TRT_CONVERTER(split);
928 929
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
930
USE_TRT_CONVERTER(leaky_relu);
931 932
USE_TRT_CONVERTER(shuffle_channel);
USE_TRT_CONVERTER(swish);
933
USE_TRT_CONVERTER(instance_norm);
P
Pei Yang 已提交
934 935 936
USE_TRT_CONVERTER(layer_norm);
USE_TRT_CONVERTER(gelu);
USE_TRT_CONVERTER(multihead_matmul);
937
#endif