analysis_predictor.cc 36.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <set>
21
#include <string>
22
#include <utility>
23
#include <vector>
24
#include "paddle/fluid/framework/feed_fetch_method.h"
25
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
27
#include "paddle/fluid/framework/ir/pass.h"
28
#include "paddle/fluid/framework/naive_executor.h"
29
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
30
#include "paddle/fluid/framework/var_type_traits.h"
31
#include "paddle/fluid/framework/version.h"
32
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
33
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
34
#include "paddle/fluid/inference/api/helper.h"
35
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
36
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
37
#include "paddle/fluid/inference/utils/singleton.h"
38
#include "paddle/fluid/memory/memcpy.h"
39
#include "paddle/fluid/platform/cpu_helper.h"
40
#include "paddle/fluid/platform/device_context.h"
41
#include "paddle/fluid/platform/gpu_info.h"
42
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
43 44
#include "paddle/fluid/platform/profiler.h"

45 46 47 48
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif

49 50 51 52
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
53 54
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
55
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
56 57
#endif

58 59
namespace paddle {

N
nhzlx 已提交
60
using inference::Singleton;
N
nhzlx 已提交
61
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
62
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
63 64
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
65
#endif
66

67 68 69 70
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
71 72
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
73 74 75 76 77 78
    return true;
  }
  return false;
}
}  // namespace

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
bool PaddleTensorToLoDTensor(const PaddleTensor &pt, framework::LoDTensor *t,
                             const platform::Place &place) {
  framework::DDim ddim = framework::make_ddim(pt.shape);
  void *input_ptr;
  if (pt.dtype == PaddleDType::INT64) {
    input_ptr = t->mutable_data<int64_t>(ddim, place);
  } else if (pt.dtype == PaddleDType::FLOAT32) {
    input_ptr = t->mutable_data<float>(ddim, place);
  } else if (pt.dtype == PaddleDType::INT32) {
    input_ptr = t->mutable_data<int32_t>(ddim, place);
  } else {
    LOG(ERROR) << "unsupported feed type " << pt.dtype;
    return false;
  }

  PADDLE_ENFORCE_NOT_NULL(
      input_ptr,
      paddle::platform::errors::Fatal(
          "Cannot convert to LoDTensor because LoDTensor creation failed."));
  PADDLE_ENFORCE_NOT_NULL(
      pt.data.data(),
      paddle::platform::errors::InvalidArgument(
          "The data contained in the input PaddleTensor is illegal."));

  if (platform::is_cpu_place(place)) {
    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
    std::memcpy(static_cast<void *>(input_ptr), pt.data.data(),
                pt.data.length());
  } else {
#ifdef PADDLE_WITH_CUDA
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto *dev_ctx =
        static_cast<const platform::CUDADeviceContext *>(pool.Get(place));
    auto dst_gpu_place = boost::get<platform::CUDAPlace>(place);
    memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                 platform::CPUPlace(), pt.data.data(), pt.data.length(),
                 dev_ctx->stream());
#else
    PADDLE_THROW(paddle::platform::errors::Fatal(
        "Not compile with CUDA, should not reach here."));
#endif
  }
  // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
  framework::LoD lod;
  for (auto &level : pt.lod) {
    lod.emplace_back(level);
  }
  t->set_lod(lod);
  return true;
}

Y
Yan Chunwei 已提交
130
bool AnalysisPredictor::Init(
131 132
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
133
  VLOG(3) << "Predictor::init()";
134 135
  if (config_.with_profile_) {
    LOG(WARNING) << "Profiler is activated, which might affect the performance";
136 137
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
138
    platform::EnableProfiler(tracking_device);
139 140 141
  } else {
    LOG(INFO) << "Profiler is deactivated, and no profiling report will be "
                 "generated.";
T
tensor-tang 已提交
142 143
  }

144
  // no matter with or without MKLDNN
L
luotao1 已提交
145
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
146

147 148 149 150 151 152 153 154 155 156 157 158 159
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
160
  }
161 162 163 164 165 166 167 168 169

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
170
  if (parent_scope) {
171 172 173
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
174
    scope_ = parent_scope;
175
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
176
  } else {
177
    paddle::framework::InitDevices(false);
Y
Yan Chunwei 已提交
178
    scope_.reset(new paddle::framework::Scope());
179
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
180
  }
181 182 183 184 185
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
186 187
  if (!program) {
    if (!LoadProgramDesc()) return false;
188 189 190 191 192 193 194
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
195 196
    if (!CheckOperatorCompatible()) {
      LOG(WARNING) << "WARNING: Results may be DIFF! "
197 198
                      "Please use the corresponding version of the model and "
                      "prediction library, and do not use the develop branch.";
199
    }
200 201
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

202 203 204 205
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
206
  } else {
207 208
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
209 210
    inference_program_ = program;
  }
M
Michal Gallus 已提交
211

212 213 214 215 216
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
217
  if (config_.use_gpu()) {
218
    status_use_gpu_ = true;
219 220 221 222 223 224 225 226 227 228
    place_ = paddle::platform::CUDAPlace(config_.gpu_device_id());
#ifdef PADDLE_WITH_CUDA
    if (config_.thread_local_stream_enabled()) {
      auto *ctx = static_cast<platform::CUDADeviceContext *>(
          platform::DeviceContextPool::Instance().Get(place_));
      VLOG(3) << "The prediction process will be completed using a separate "
                 "normal-priority stream on each thread.";
      ctx->ResetThreadContext(platform::stream::Priority::kNormal);
    }
#endif
229 230 231 232 233 234 235 236
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
237
                     config_.use_feed_fetch_ops_);
238

239
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
240

241 242 243
  return true;
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
void AnalysisPredictor::MkldnnPreSet(const std::vector<PaddleTensor> &inputs) {
#ifdef PADDLE_WITH_MKLDNN
  VLOG(2) << "AnalysisPredictor::Run get_cur_mkldnn_session_id="
          << platform::get_cur_mkldnn_session_id();
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    VLOG(2) << "In mkldnn cache clear mode.";
    platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_CacheClearing);
    platform::set_cur_input_shape_cache_capacity(
        config_.mkldnn_cache_capacity_);
    // Set current_input_shape for caching dynamic shape.
    std::stringstream ss;
    for (size_t i = 0; i < inputs.size(); ++i) {
      for (size_t j = 0; j < inputs[i].shape.size(); ++j) {
        ss << inputs[i].shape[j] << "-";
      }
    }
    VLOG(2) << "Set input shape=" << ss.str();
    platform::set_cur_input_shape_str(ss.str());
  }
#endif
}

void AnalysisPredictor::MkldnnPostReset() {
#ifdef PADDLE_WITH_MKLDNN
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    paddle::platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_Default);
    platform::set_cur_input_shape_cache_capacity(0);
    platform::set_cur_input_shape_str("");
  }
#endif
}

280 281 282
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
283
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
284 285 286
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPreSet(inputs);
#endif
M
minqiyang 已提交
287
  VLOG(3) << "Predictor::predict";
288 289 290 291
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
292
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
293 294
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
295
    return false;
296
  }
M
Michal Gallus 已提交
297

298 299 300
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
301

302 303 304 305
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
306
  }
Y
Yan Chunwei 已提交
307

M
minqiyang 已提交
308
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
309

Y
Yan Chunwei 已提交
310 311 312 313 314
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
315 316 317
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
318
  tensor_array_batch_cleaner_.ResetNoTensorVars();
319 320 321 322

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
323 324
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPostReset();
T
Tao Luo 已提交
325 326 327 328 329 330 331 332
#endif
#if defined(PADDLE_WITH_MKLML) && defined(_LINUX)
  // Frees unused memory allocated by the Intel® MKL Memory Allocator to
  // avoid memory leak. See:
  // https://software.intel.com/en-us/mkl-developer-reference-c-mkl-free-buffers
  platform::dynload::MKL_Free_Buffers();
// We don't support windows since MKL_Free_Buffers is not in
// mklml_win_2019.0.1.20181227.zip. We will upgrade mklml_win version later.
333
#endif
334 335
  return true;
}
336

337 338
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
339
  VLOG(3) << "Predictor::set_feed";
340 341 342 343 344 345 346 347 348 349
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
350 351
    framework::LoDTensor *input = &feed_tensors_[i];
    if (!PaddleTensorToLoDTensor(inputs[i], input, place_)) {
352 353 354
      return false;
    }
    int idx = -1;
355
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
356 357
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
358 359
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
360 361
      }
      idx = feed_names_[name];
362 363 364
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
365
    framework::SetFeedVariable(scope, *input, "feed", idx);
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
392
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
393 394 395
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
396
    PADDLE_ENFORCE((size_t)idx == i);
397
    framework::FetchType &fetch_var =
398
        framework::GetFetchVariable(*scope, "fetch", idx);
399
    auto &fetch = boost::get<framework::LoDTensor>(fetch_var);
400 401
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
402
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
403
    if (type == framework::proto::VarType::FP32) {
404 405
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
406
    } else if (type == framework::proto::VarType::INT64) {
407 408
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
409 410 411
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
412
    } else {
413
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
414 415
    }
  }
Y
Yan Chunwei 已提交
416 417
  return true;
}
418

419
void AnalysisPredictor::PrepareArgument() {
420
  argument_.SetUseGPU(config_.use_gpu());
421
  argument_.SetUseFcPadding(config_.use_fc_padding());
422
  argument_.SetGPUDeviceId(config_.gpu_device_id());
423
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
424
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
T
Tao Luo 已提交
425
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
426
  // Analyze inference_program
427
  argument_.SetPredictorID(predictor_id_);
428
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
429 430
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
431 432
  } else {
    PADDLE_ENFORCE(
433
        !config_.params_file().empty(),
T
Tao Luo 已提交
434
        "Either model_dir or (param_file, prog_file) should be set.");
435
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
436
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
437

438 439
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
440
  }
441

442
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
443
    LOG(INFO) << "TensorRT subgraph engine is enabled";
444 445 446
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
447
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
448
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
449
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
450
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
451 452 453
    argument_.SetMinInputShape(config_.min_input_shape_);
    argument_.SetMaxInputShape(config_.max_input_shape_);
    argument_.SetOptimInputShape(config_.optim_input_shape_);
454
    argument_.SetCloseTrtPluginFp16(config_.disable_trt_plugin_fp16_);
W
Wojciech Uss 已提交
455
  }
456

石晓伟 已提交
457 458 459 460 461 462 463
  if (config_.lite_engine_enabled()) {
    argument_.SetLitePrecisionMode(config_.lite_precision_mode_);
    argument_.SetLitePassesFilter(config_.lite_passes_filter_);
    argument_.SetLiteOpsFilter(config_.lite_ops_filter_);
    LOG(INFO) << "Lite subgraph engine is enabled";
  }

464
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
465
    LOG(INFO) << "MKLDNN is enabled";
466 467 468
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

469 470 471 472 473 474 475 476 477 478
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

479
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
480 481 482 483
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
484
  argument_.SetDisableLogs(config_.glog_info_disabled());
485
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
486
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
487
  argument_.SetScopeNotOwned(scope_.get());
488 489 490 491 492
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
493 494 495 496 497
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
498
  inference_program_.reset(
499
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
500 501 502 503
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
504
  LOG(INFO) << "======= optimize end =======";
Y
Yan Chunwei 已提交
505
}
506 507

template <>
508 509
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
P
Pei Yang 已提交
510 511 512 513
  if (config.glog_info_disabled()) {
    FLAGS_logtostderr = 1;
    FLAGS_minloglevel = 2;  // GLOG_ERROR
  }
M
minqiyang 已提交
514
  VLOG(3) << "create AnalysisConfig";
515 516
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
517

518
  if (config.use_gpu()) {
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
    static std::once_flag gflags_initialized;
    static bool process_level_allocator_enabled;

    std::call_once(gflags_initialized, [&]() {
      std::vector<std::string> gflags;
      PADDLE_ENFORCE_GE(
          config.memory_pool_init_size_mb(), 0.f,
          platform::errors::InvalidArgument(
              "The size of memory pool should be greater than 0."));
      PADDLE_ENFORCE_GE(
          config.gpu_device_id(), 0,
          platform::errors::InvalidArgument(
              "Invalid device id (%d). The device id should be greater than 0.",
              config.gpu_device_id()));
      gflags.push_back("dummy");

      float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
      if (fraction_of_gpu_memory > 0.95f) {
        LOG(ERROR)
            << "Allocate too much memory for the GPU memory pool, assigned "
            << config.memory_pool_init_size_mb() << " MB";
        LOG(ERROR) << "Try to shink the value by setting "
                      "AnalysisConfig::EnableGpu(...)";
      }
543

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
      if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
        std::string flag = "--fraction_of_gpu_memory_to_use=" +
                           std::to_string(fraction_of_gpu_memory);
        VLOG(3) << "set flag: " << flag;
        gflags.push_back(flag);
        gflags.push_back("--cudnn_deterministic=True");
      }

      if (config.thread_local_stream_enabled()) {
        gflags.push_back("--allocator_strategy=thread_local");
        process_level_allocator_enabled = false;
      } else {
        gflags.push_back("--allocator_strategy=naive_best_fit");
        process_level_allocator_enabled = true;
      }

      if (framework::InitGflags(gflags)) {
        VLOG(3) << "The following gpu analysis configurations only take effect "
                   "for the first predictor: ";
        for (size_t i = 1; i < gflags.size(); ++i) {
          VLOG(3) << gflags[i];
        }
      } else {
        LOG(WARNING) << "The one-time configuration of analysis predictor "
                        "failed, which may be due to native predictor called "
                        "first and its configurations taken effect.";
      }
    });

    if (config.thread_local_stream_enabled() &&
        process_level_allocator_enabled) {
      LOG(FATAL) << " When binding threads and streams, the use of "
                    "process-level allocators will result in undefined result "
                    "errors due to memory asynchronous operations."
                    "The thread and stream binding configuration of all "
                    "predictors should be the same in a single process.";
580 581 582 583
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
584 585
  // Each config can only be used for one predictor.
  config.SetInValid();
586 587 588 589 590 591 592
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
593 594
    return nullptr;
  }
595

G
Gabor Buella 已提交
596
  return predictor;
597 598
}

599 600 601 602 603 604 605 606 607 608 609 610
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

611
void AnalysisPredictor::PrepareFeedFetch() {
612 613
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
614 615 616 617 618 619 620 621
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
622
      idx2feeds_[idx] = op->Output("Out")[0];
623 624
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
625 626
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
627
      }
Y
Yan Chunwei 已提交
628
      fetches_[idx] = op;
N
nhzlx 已提交
629
      idx2fetches_[idx] = op->Input("X")[0];
630 631 632 633
    }
  }
}

634 635 636
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
637
  var->GetMutable<framework::FeedList>();
638
  var = scope->Var("fetch");
639
  var->GetMutable<framework::FetchList>();
640 641
}

N
nhzlx 已提交
642 643 644 645 646 647 648 649
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

650 651 652 653 654 655 656 657 658 659 660 661
std::map<std::string, std::vector<int64_t>>
AnalysisPredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  std::vector<std::string> names = GetInputNames();
  for (std::string name : names) {
    auto *var = inference_program_->Block(0).FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var, "input %s does not exist.", name);
    input_shapes[name] = var->GetShape();
  }
  return input_shapes;
}

N
nhzlx 已提交
662 663 664 665 666 667 668 669
std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

670 671 672 673 674 675 676
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
677 678 679 680 681 682 683
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

684 685 686 687 688 689 690 691 692 693
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
694 695 696 697 698 699
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
700 701 702 703
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
704
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
705
  executor_->Run();
Y
Yan Chunwei 已提交
706
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
707
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
708
  tensor_array_batch_cleaner_.ResetTensorArray();
709 710 711 712

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
T
Tao Luo 已提交
713 714 715 716 717 718 719 720
#if defined(PADDLE_WITH_MKLML) && defined(_LINUX)
  // Frees unused memory allocated by the Intel® MKL Memory Allocator to
  // avoid memory leak. See:
  // https://software.intel.com/en-us/mkl-developer-reference-c-mkl-free-buffers
  platform::dynload::MKL_Free_Buffers();
// We don't support windows since MKL_Free_Buffers is not in
// mklml_win_2019.0.1.20181227.zip. We will upgrade mklml_win version later.
#endif
721 722 723 724 725
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
726
  std::string filename;
727 728 729
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
730 731 732
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
733
    filename = config_.prog_file();
734
  } else {
735
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
736 737 738 739
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
740
    LOG(ERROR) << string::Sprintf(
741 742
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
743 744
    return false;
  }
745 746 747

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
748
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
749 750 751
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
752 753
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
754 755 756 757 758 759 760 761
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
762
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
763
  }
764 765 766 767 768 769 770
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
771

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

792
      if (!config_.params_file().empty()) {
793 794 795 796 797 798
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
799
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
800 801 802 803 804
        op->CheckAttrs();
      }
    }
  }

805
  if (!config_.params_file().empty()) {
806 807 808 809 810 811
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
812
    op->SetAttr("file_path", {config_.params_file()});
813 814 815 816
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
817
  framework::NaiveExecutor e(place_);
818 819 820 821
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

822 823
  return true;
}
824

N
nhzlx 已提交
825
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
826 827 828 829 830 831 832 833
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
834
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
835 836 837 838
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
839 840
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
841
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
842
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
843 844
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
845 846 847
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
848

N
nhzlx 已提交
849
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
850 851 852
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
853

N
nhzlx 已提交
854 855 856 857 858
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
859
      std::string calibration_table_data_path =
N
nhzlx 已提交
860 861 862 863
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
864 865 866 867 868

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
869 870 871 872
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
873
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
874 875
  return true;
}
N
nhzlx 已提交
876
#endif
N
nhzlx 已提交
877

878
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
879
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
880
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
881 882
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
883 884
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
885
#endif
886
  if (config_.with_profile_) {
887 888 889 890 891 892
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
893

894 895 896 897 898 899
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif
900 901
}

902
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
903
  std::lock_guard<std::mutex> lk(clone_mutex_);
904 905 906 907 908
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

909
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
910 911 912
  return inference_program_->Proto()->SerializeAsString();
}

913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
bool AnalysisPredictor::CheckOperatorCompatible() {
  if (!inference_program_) {
    LOG(FATAL) << "Inference program version check failed because the program "
                  "does not exist.";
    return false;
  }
  bool res = true;
  op_compatible_map_.ReadFromProto(*inference_program_->OpCompatibleMap());
  const auto &version = framework::DumpVersion(framework::kCurProgramVersion);
  LOG(INFO) << "MODEL VERSION: "
            << framework::DumpVersion(inference_program_->Version());
  LOG(INFO) << "PREDICTOR VERSION: " << version;
  std::set<std::string> op_types;
  for (size_t i = 0; i < inference_program_->Size(); ++i) {
    const auto &block = inference_program_->Block(i);
    for (const auto *op : block.AllOps()) {
      op_types.insert(op->Type());
    }
  }
  for (const auto type : op_types) {
    auto compatible_type =
        op_compatible_map_.IsRequireMiniVersion(type, version);
    if (compatible_type != framework::OpCompatibleType::compatible) {
936 937 938 939
      if (!framework::kCurProgramVersion) {
        LOG(WARNING) << " - Version incompatible ("
                     << static_cast<int>(compatible_type) << ") " << type;
      }
940 941 942 943 944 945
      res = false;
    }
  }
  return res;
}

946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
985
template <>
986 987 988 989
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
990 991
}

992
}  // namespace paddle
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
1015 1016
USE_TRT_CONVERTER(hard_sigmoid);
USE_TRT_CONVERTER(hard_swish);
1017
USE_TRT_CONVERTER(split);
1018 1019
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
1020
USE_TRT_CONVERTER(leaky_relu);
1021 1022
USE_TRT_CONVERTER(shuffle_channel);
USE_TRT_CONVERTER(swish);
1023
USE_TRT_CONVERTER(instance_norm);
P
Pei Yang 已提交
1024 1025 1026
USE_TRT_CONVERTER(layer_norm);
USE_TRT_CONVERTER(gelu);
USE_TRT_CONVERTER(multihead_matmul);
1027 1028
USE_TRT_CONVERTER(fused_embedding_eltwise_layernorm);
USE_TRT_CONVERTER(skip_layernorm);
1029
USE_TRT_CONVERTER(slice);
1030
USE_TRT_CONVERTER(scale);
1031
#endif