analysis_predictor.cc 34.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <set>
21
#include <string>
22
#include <utility>
23
#include <vector>
24
#include "paddle/fluid/framework/feed_fetch_method.h"
25
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
27
#include "paddle/fluid/framework/ir/pass.h"
28
#include "paddle/fluid/framework/naive_executor.h"
29
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
30
#include "paddle/fluid/framework/var_type_traits.h"
31
#include "paddle/fluid/framework/version.h"
32
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
33
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
34
#include "paddle/fluid/inference/api/helper.h"
35
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
36
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
37
#include "paddle/fluid/inference/utils/singleton.h"
38
#include "paddle/fluid/memory/memcpy.h"
39
#include "paddle/fluid/platform/cpu_helper.h"
T
Tao Luo 已提交
40 41 42
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
43
#include "paddle/fluid/platform/gpu_info.h"
44
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
45 46
#include "paddle/fluid/platform/profiler.h"

47 48 49 50
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
51 52
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
53
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
54 55
#endif

56 57
namespace paddle {

N
nhzlx 已提交
58
using inference::Singleton;
N
nhzlx 已提交
59
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
60
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
61 62
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
63
#endif
64

65 66 67 68
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
69 70
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
71 72 73 74 75 76
    return true;
  }
  return false;
}
}  // namespace

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
bool PaddleTensorToLoDTensor(const PaddleTensor &pt, framework::LoDTensor *t,
                             const platform::Place &place) {
  framework::DDim ddim = framework::make_ddim(pt.shape);
  void *input_ptr;
  if (pt.dtype == PaddleDType::INT64) {
    input_ptr = t->mutable_data<int64_t>(ddim, place);
  } else if (pt.dtype == PaddleDType::FLOAT32) {
    input_ptr = t->mutable_data<float>(ddim, place);
  } else if (pt.dtype == PaddleDType::INT32) {
    input_ptr = t->mutable_data<int32_t>(ddim, place);
  } else {
    LOG(ERROR) << "unsupported feed type " << pt.dtype;
    return false;
  }

  PADDLE_ENFORCE_NOT_NULL(
      input_ptr,
      paddle::platform::errors::Fatal(
          "Cannot convert to LoDTensor because LoDTensor creation failed."));
  PADDLE_ENFORCE_NOT_NULL(
      pt.data.data(),
      paddle::platform::errors::InvalidArgument(
          "The data contained in the input PaddleTensor is illegal."));

  if (platform::is_cpu_place(place)) {
    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
    std::memcpy(static_cast<void *>(input_ptr), pt.data.data(),
                pt.data.length());
  } else {
#ifdef PADDLE_WITH_CUDA
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto *dev_ctx =
        static_cast<const platform::CUDADeviceContext *>(pool.Get(place));
    auto dst_gpu_place = boost::get<platform::CUDAPlace>(place);
    memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                 platform::CPUPlace(), pt.data.data(), pt.data.length(),
                 dev_ctx->stream());
#else
    PADDLE_THROW(paddle::platform::errors::Fatal(
        "Not compile with CUDA, should not reach here."));
#endif
  }
  // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
  framework::LoD lod;
  for (auto &level : pt.lod) {
    lod.emplace_back(level);
  }
  t->set_lod(lod);
  return true;
}

Y
Yan Chunwei 已提交
128
bool AnalysisPredictor::Init(
129 130
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
131
  VLOG(3) << "Predictor::init()";
132 133
  if (config_.with_profile_) {
    LOG(WARNING) << "Profiler is activated, which might affect the performance";
134 135
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
136
    platform::EnableProfiler(tracking_device);
137 138 139
  } else {
    LOG(INFO) << "Profiler is deactivated, and no profiling report will be "
                 "generated.";
T
tensor-tang 已提交
140 141
  }

142
  // no matter with or without MKLDNN
L
luotao1 已提交
143
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
144

145 146 147 148 149 150 151 152 153 154 155 156 157
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
158
  }
159 160 161 162 163 164 165 166 167

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
168
  if (parent_scope) {
169 170 171
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
172
    scope_ = parent_scope;
173
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
174
  } else {
175
    paddle::framework::InitDevices(false);
Y
Yan Chunwei 已提交
176
    scope_.reset(new paddle::framework::Scope());
177
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
178
  }
179 180 181 182 183
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
184 185
  if (!program) {
    if (!LoadProgramDesc()) return false;
186 187 188 189 190 191 192
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
193 194
    if (!CheckOperatorCompatible()) {
      LOG(WARNING) << "WARNING: Results may be DIFF! "
195 196
                      "Please use the corresponding version of the model and "
                      "prediction library, and do not use the develop branch.";
197
    }
198 199
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

200 201 202 203
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
204
  } else {
205 206
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
207 208
    inference_program_ = program;
  }
M
Michal Gallus 已提交
209

210 211 212 213 214
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
215
  if (config_.use_gpu_) {
216
    status_use_gpu_ = true;
217
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
218 219 220 221 222 223 224 225
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
226
                     config_.use_feed_fetch_ops_);
227

228
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
229

230 231 232
  return true;
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
void AnalysisPredictor::MkldnnPreSet(const std::vector<PaddleTensor> &inputs) {
#ifdef PADDLE_WITH_MKLDNN
  VLOG(2) << "AnalysisPredictor::Run get_cur_mkldnn_session_id="
          << platform::get_cur_mkldnn_session_id();
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    VLOG(2) << "In mkldnn cache clear mode.";
    platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_CacheClearing);
    platform::set_cur_input_shape_cache_capacity(
        config_.mkldnn_cache_capacity_);
    // Set current_input_shape for caching dynamic shape.
    std::stringstream ss;
    for (size_t i = 0; i < inputs.size(); ++i) {
      for (size_t j = 0; j < inputs[i].shape.size(); ++j) {
        ss << inputs[i].shape[j] << "-";
      }
    }
    VLOG(2) << "Set input shape=" << ss.str();
    platform::set_cur_input_shape_str(ss.str());
  }
#endif
}

void AnalysisPredictor::MkldnnPostReset() {
#ifdef PADDLE_WITH_MKLDNN
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    paddle::platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_Default);
    platform::set_cur_input_shape_cache_capacity(0);
    platform::set_cur_input_shape_str("");
  }
#endif
}

269 270 271
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
272
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
273 274 275
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPreSet(inputs);
#endif
M
minqiyang 已提交
276
  VLOG(3) << "Predictor::predict";
277 278 279 280
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
281
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
282 283
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
284
    return false;
285
  }
M
Michal Gallus 已提交
286

287 288 289
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
290

291 292 293 294
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
295
  }
Y
Yan Chunwei 已提交
296

M
minqiyang 已提交
297
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
298

Y
Yan Chunwei 已提交
299 300 301 302 303
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
304 305 306
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
307
  tensor_array_batch_cleaner_.ResetNoTensorVars();
308 309 310 311

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
312 313
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPostReset();
T
Tao Luo 已提交
314 315 316 317 318 319 320 321
#endif
#if defined(PADDLE_WITH_MKLML) && defined(_LINUX)
  // Frees unused memory allocated by the Intel® MKL Memory Allocator to
  // avoid memory leak. See:
  // https://software.intel.com/en-us/mkl-developer-reference-c-mkl-free-buffers
  platform::dynload::MKL_Free_Buffers();
// We don't support windows since MKL_Free_Buffers is not in
// mklml_win_2019.0.1.20181227.zip. We will upgrade mklml_win version later.
322
#endif
323 324
  return true;
}
325

326 327
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
328
  VLOG(3) << "Predictor::set_feed";
329 330 331 332 333 334 335 336 337 338
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
339 340
    framework::LoDTensor *input = &feed_tensors_[i];
    if (!PaddleTensorToLoDTensor(inputs[i], input, place_)) {
341 342 343
      return false;
    }
    int idx = -1;
344
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
345 346
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
347 348
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
349 350
      }
      idx = feed_names_[name];
351 352 353
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
354
    framework::SetFeedVariable(scope, *input, "feed", idx);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
381
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
382 383 384
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
385
    PADDLE_ENFORCE((size_t)idx == i);
386
    framework::FetchType &fetch_var =
387
        framework::GetFetchVariable(*scope, "fetch", idx);
388
    auto &fetch = boost::get<framework::LoDTensor>(fetch_var);
389 390
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
391
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
392
    if (type == framework::proto::VarType::FP32) {
393 394
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
395
    } else if (type == framework::proto::VarType::INT64) {
396 397
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
398 399 400
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
401
    } else {
402
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
403 404
    }
  }
Y
Yan Chunwei 已提交
405 406
  return true;
}
407

408
void AnalysisPredictor::PrepareArgument() {
409
  argument_.SetUseGPU(config_.use_gpu());
410
  argument_.SetUseFcPadding(config_.use_fc_padding());
411
  argument_.SetGPUDeviceId(config_.gpu_device_id());
412
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
413
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
T
Tao Luo 已提交
414
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
415
  // Analyze inference_program
416
  argument_.SetPredictorID(predictor_id_);
417
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
418 419
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
420 421
  } else {
    PADDLE_ENFORCE(
422
        !config_.params_file().empty(),
T
Tao Luo 已提交
423
        "Either model_dir or (param_file, prog_file) should be set.");
424
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
425
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
426

427 428
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
429
  }
430

431
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
432
    LOG(INFO) << "TensorRT subgraph engine is enabled";
433 434 435
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
436
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
437
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
438
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
439
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
440 441 442
    argument_.SetMinInputShape(config_.min_input_shape_);
    argument_.SetMaxInputShape(config_.max_input_shape_);
    argument_.SetOptimInputShape(config_.optim_input_shape_);
443
    argument_.SetCloseTrtPluginFp16(config_.disable_trt_plugin_fp16_);
W
Wojciech Uss 已提交
444
  }
445

石晓伟 已提交
446 447 448 449
  if (config_.lite_engine_enabled()) {
    argument_.SetLitePrecisionMode(config_.lite_precision_mode_);
    argument_.SetLitePassesFilter(config_.lite_passes_filter_);
    argument_.SetLiteOpsFilter(config_.lite_ops_filter_);
450 451 452
    argument_.SetLiteZeroCopy(config_.lite_zero_copy_);
    argument_.SetUseXpu(config_.use_xpu_);
    argument_.SetXpuL3WorkspaceSize(config_.xpu_l3_workspace_size_);
石晓伟 已提交
453 454 455
    LOG(INFO) << "Lite subgraph engine is enabled";
  }

456
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
457
    LOG(INFO) << "MKLDNN is enabled";
458 459 460
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

461 462 463 464 465 466 467 468 469 470
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

471
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
472 473 474 475
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
476
  argument_.SetDisableLogs(config_.glog_info_disabled());
477
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
478
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
479
  argument_.SetScopeNotOwned(scope_.get());
480 481 482 483 484
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
485 486 487 488 489
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
490
  inference_program_.reset(
491
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
492 493 494 495
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
496
  LOG(INFO) << "======= optimize end =======";
Y
Yan Chunwei 已提交
497
}
498 499

template <>
500 501
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
P
Pei Yang 已提交
502 503 504 505
  if (config.glog_info_disabled()) {
    FLAGS_logtostderr = 1;
    FLAGS_minloglevel = 2;  // GLOG_ERROR
  }
M
minqiyang 已提交
506
  VLOG(3) << "create AnalysisConfig";
507 508
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
509
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
510
    // 1. GPU memory
511
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
512 513
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
514
    std::vector<std::string> flags;
515 516 517 518 519 520 521 522 523 524 525

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
526
      flags.push_back("dummy");
527
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
528
                         std::to_string(fraction_of_gpu_memory);
529
      flags.push_back(flag);
L
Lv Mengsi 已提交
530
      flags.push_back("--cudnn_deterministic=True");
M
minqiyang 已提交
531
      VLOG(3) << "set flag: " << flag;
532 533 534 535 536
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
537 538
  // Each config can only be used for one predictor.
  config.SetInValid();
539 540 541 542 543 544 545
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
546 547
    return nullptr;
  }
548

G
Gabor Buella 已提交
549
  return predictor;
550 551
}

552 553 554 555 556 557 558 559 560 561 562 563
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

564
void AnalysisPredictor::PrepareFeedFetch() {
565 566
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
567 568 569 570 571 572 573 574
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
575
      idx2feeds_[idx] = op->Output("Out")[0];
576 577
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
578 579
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
580
      }
Y
Yan Chunwei 已提交
581
      fetches_[idx] = op;
N
nhzlx 已提交
582
      idx2fetches_[idx] = op->Input("X")[0];
583 584 585 586
    }
  }
}

587 588 589
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
590
  var->GetMutable<framework::FeedList>();
591
  var = scope->Var("fetch");
592
  var->GetMutable<framework::FetchList>();
593 594
}

N
nhzlx 已提交
595 596 597 598 599 600 601 602
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

603 604 605 606 607 608 609 610 611 612 613 614
std::map<std::string, std::vector<int64_t>>
AnalysisPredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  std::vector<std::string> names = GetInputNames();
  for (std::string name : names) {
    auto *var = inference_program_->Block(0).FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var, "input %s does not exist.", name);
    input_shapes[name] = var->GetShape();
  }
  return input_shapes;
}

N
nhzlx 已提交
615 616 617 618 619 620 621 622
std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

623 624 625 626 627 628 629
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
630 631 632 633 634 635 636
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

637 638 639 640 641 642 643 644 645 646
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
647 648 649 650 651 652
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
653 654 655 656
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
657
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
658
  executor_->Run();
Y
Yan Chunwei 已提交
659
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
660
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
661
  tensor_array_batch_cleaner_.ResetTensorArray();
662 663 664 665

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
T
Tao Luo 已提交
666 667 668 669 670 671 672 673
#if defined(PADDLE_WITH_MKLML) && defined(_LINUX)
  // Frees unused memory allocated by the Intel® MKL Memory Allocator to
  // avoid memory leak. See:
  // https://software.intel.com/en-us/mkl-developer-reference-c-mkl-free-buffers
  platform::dynload::MKL_Free_Buffers();
// We don't support windows since MKL_Free_Buffers is not in
// mklml_win_2019.0.1.20181227.zip. We will upgrade mklml_win version later.
#endif
674 675 676 677 678
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
679
  std::string filename;
680 681 682
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
683 684 685
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
686
    filename = config_.prog_file();
687
  } else {
688
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
689 690 691 692
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
693
    LOG(ERROR) << string::Sprintf(
694 695
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
696 697
    return false;
  }
698 699 700

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
701
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
702 703 704
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
705 706
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
707 708 709 710 711 712 713 714
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
715
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
716
  }
717 718 719 720 721 722 723
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
724

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

745
      if (!config_.params_file().empty()) {
746 747 748 749 750 751
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
752
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
753 754 755 756 757
        op->CheckAttrs();
      }
    }
  }

758
  if (!config_.params_file().empty()) {
759 760 761 762 763 764
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
765
    op->SetAttr("file_path", {config_.params_file()});
766 767 768 769
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
770
  framework::NaiveExecutor e(place_);
771 772 773 774
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

775 776
  return true;
}
777

N
nhzlx 已提交
778
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
779 780 781 782 783 784 785 786
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
787
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
788 789 790 791
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
792 793
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
794
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
795
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
796 797
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
798 799 800
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
801

N
nhzlx 已提交
802
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
803 804 805
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
806

N
nhzlx 已提交
807 808 809 810 811
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
812
      std::string calibration_table_data_path =
N
nhzlx 已提交
813 814 815 816
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
817 818 819 820 821

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
822 823 824 825
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
826
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
827 828
  return true;
}
N
nhzlx 已提交
829
#endif
N
nhzlx 已提交
830

831
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
832
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
833
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
834 835
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
836 837
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
838
#endif
839
  if (config_.with_profile_) {
840 841 842 843 844 845
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
846

847 848 849 850 851 852
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif
853 854
}

855
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
856
  std::lock_guard<std::mutex> lk(clone_mutex_);
857 858 859 860 861
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

862
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
863 864 865
  return inference_program_->Proto()->SerializeAsString();
}

866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
bool AnalysisPredictor::CheckOperatorCompatible() {
  if (!inference_program_) {
    LOG(FATAL) << "Inference program version check failed because the program "
                  "does not exist.";
    return false;
  }
  bool res = true;
  op_compatible_map_.ReadFromProto(*inference_program_->OpCompatibleMap());
  const auto &version = framework::DumpVersion(framework::kCurProgramVersion);
  LOG(INFO) << "MODEL VERSION: "
            << framework::DumpVersion(inference_program_->Version());
  LOG(INFO) << "PREDICTOR VERSION: " << version;
  std::set<std::string> op_types;
  for (size_t i = 0; i < inference_program_->Size(); ++i) {
    const auto &block = inference_program_->Block(i);
    for (const auto *op : block.AllOps()) {
      op_types.insert(op->Type());
    }
  }
  for (const auto type : op_types) {
    auto compatible_type =
        op_compatible_map_.IsRequireMiniVersion(type, version);
    if (compatible_type != framework::OpCompatibleType::compatible) {
889 890 891 892
      if (!framework::kCurProgramVersion) {
        LOG(WARNING) << " - Version incompatible ("
                     << static_cast<int>(compatible_type) << ") " << type;
      }
893 894 895 896 897 898
      res = false;
    }
  }
  return res;
}

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
938
template <>
939 940 941 942
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
943 944
}

945
}  // namespace paddle
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
968 969
USE_TRT_CONVERTER(hard_sigmoid);
USE_TRT_CONVERTER(hard_swish);
970
USE_TRT_CONVERTER(split);
971 972
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
973
USE_TRT_CONVERTER(leaky_relu);
974 975
USE_TRT_CONVERTER(shuffle_channel);
USE_TRT_CONVERTER(swish);
976
USE_TRT_CONVERTER(instance_norm);
P
Pei Yang 已提交
977 978 979
USE_TRT_CONVERTER(layer_norm);
USE_TRT_CONVERTER(gelu);
USE_TRT_CONVERTER(multihead_matmul);
980 981
USE_TRT_CONVERTER(fused_embedding_eltwise_layernorm);
USE_TRT_CONVERTER(skip_layernorm);
982
USE_TRT_CONVERTER(slice);
983
USE_TRT_CONVERTER(scale);
984
USE_TRT_CONVERTER(stack);
985
#endif