analysis_predictor.cc 27.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20 21
#include <string>
#include <vector>
22
#include "paddle/fluid/framework/feed_fetch_method.h"
23
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
24
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
25
#include "paddle/fluid/framework/ir/pass.h"
26
#include "paddle/fluid/framework/naive_executor.h"
27
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
28
#include "paddle/fluid/framework/var_type_traits.h"
29
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
30
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
31
#include "paddle/fluid/inference/api/helper.h"
32
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
33
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
34
#include "paddle/fluid/inference/utils/singleton.h"
35
#include "paddle/fluid/memory/memcpy.h"
36
#include "paddle/fluid/platform/cpu_helper.h"
37
#include "paddle/fluid/platform/gpu_info.h"
T
tensor-tang 已提交
38 39
#include "paddle/fluid/platform/profiler.h"

Y
Yan Chunwei 已提交
40 41
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
42
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
N
nhzlx 已提交
43

Y
Yan Chunwei 已提交
44 45
#endif

T
tensor-tang 已提交
46
DECLARE_bool(profile);
47 48 49

namespace paddle {

N
nhzlx 已提交
50
using inference::Singleton;
N
nhzlx 已提交
51
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
52
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
53 54
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
55
#endif
56

57 58 59 60
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
61 62
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
63 64 65 66 67 68
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
69
bool AnalysisPredictor::Init(
70 71
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
72
  VLOG(3) << "Predictor::init()";
T
tensor-tang 已提交
73 74 75
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";
76 77
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
78 79 80
    platform::EnableProfiler(tracking_device);
  }

81
  // no matter with or without MKLDNN
L
luotao1 已提交
82
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
83

84 85 86 87 88 89 90 91 92 93 94 95 96
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
97
  }
98 99 100 101 102 103 104 105 106

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
107
  if (parent_scope) {
108 109 110
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
111
    scope_ = parent_scope;
112
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
113 114 115
  } else {
    paddle::framework::InitDevices(false);
    scope_.reset(new paddle::framework::Scope());
116
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
117
  }
118 119 120 121 122
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
123 124
  if (!program) {
    if (!LoadProgramDesc()) return false;
125

126 127 128 129 130 131 132 133 134
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

135 136 137
    // Optimize the program, and load parameters and modify them in the
    // scope_.
    // This will change the scope_ address.
138
    if (config_.ir_optim()) {
139 140 141 142 143 144 145
      status_ir_optim_enabled_ = true;
      OptimizeInferenceProgram();
    } else {
      // Load parameters
      LOG(INFO) << "load parameters ";
      LoadParameters();
    }
Y
Yan Chunwei 已提交
146
  } else {
147 148
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
149 150
    inference_program_ = program;
  }
M
Michal Gallus 已提交
151

152 153 154 155 156
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
157
  if (config_.use_gpu_) {
158
    status_use_gpu_ = true;
159
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
160 161 162 163 164 165 166 167
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
168
                     config_.use_feed_fetch_ops_);
169

170
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
171

172 173 174
  return true;
}

L
luotao1 已提交
175
void AnalysisPredictor::SetMkldnnThreadID(int tid) {
L
luotao1 已提交
176 177 178 179 180 181 182
#ifdef PADDLE_WITH_MKLDNN
  platform::set_cur_thread_id(tid);
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
#endif
}

183 184 185
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
L
luotao1 已提交
186 187 188
  if (UNLIKELY(config_.cpu_math_library_num_threads() > 1)) {
    paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
  }
M
minqiyang 已提交
189
  VLOG(3) << "Predictor::predict";
190 191 192 193 194 195
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
196
    return false;
197
  }
M
Michal Gallus 已提交
198

199 200 201
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
202

203 204 205 206
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
207
  }
Y
Yan Chunwei 已提交
208 209 210 211 212 213

  // Collect variable shapes for memory optimization.
  if (need_collect_var_shapes_for_memory_optim()) {
    CollectVarShapes();
  }

M
minqiyang 已提交
214
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
215

Y
Yan Chunwei 已提交
216 217 218 219 220 221 222
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
  tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  tensor_array_batch_cleaner_.ResetNoTensorVars();
223 224
  return true;
}
225

226 227
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
228
  VLOG(3) << "Predictor::set_feed";
229 230 231 232 233 234 235 236 237 238 239 240 241 242
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
243
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
244
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
245
      input_ptr = input.mutable_data<float>(ddim, place_);
246 247
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
248 249 250 251 252
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

253 254 255 256 257 258
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
259 260 261 262
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
263 264 265
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
266
                   inputs[i].data.length(), dev_ctx->stream());
267 268 269 270
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
271 272 273 274 275 276 277
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
278
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
279 280
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
281 282
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
283 284
      }
      idx = feed_names_[name];
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
315
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
316 317 318
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
319 320 321 322 323
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
324
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
325
    if (type == framework::proto::VarType::FP32) {
326 327
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
328
    } else if (type == framework::proto::VarType::INT64) {
329 330
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
331 332 333
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
334
    } else {
335
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
336 337
    }
  }
Y
Yan Chunwei 已提交
338 339
  return true;
}
340

341
// NOTE All the members in AnalysisConfig should be copied to Argument.
Y
Yan Chunwei 已提交
342
void AnalysisPredictor::OptimizeInferenceProgram() {
343 344
  status_program_optimized_ = true;

345 346
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
Y
Yan Chunwei 已提交
347
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
Y
Yan Chunwei 已提交
348 349 350
  argument_.SetStaticMemoryOptim(config_.static_memory_optim_);
  argument_.SetStaticMemoryOptimForceUpdate(
      config_.static_memory_optim_force_update_);
T
Tao Luo 已提交
351
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
352
  // Analyze inference_program
353 354
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
355 356
  } else {
    PADDLE_ENFORCE(
357
        !config_.params_file().empty(),
T
Tao Luo 已提交
358
        "Either model_dir or (param_file, prog_file) should be set.");
359
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
360
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
361

362 363
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
364
  }
365

366
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
367
    LOG(INFO) << "TensorRT subgraph engine is enabled";
368 369 370
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
371
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
372
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
373
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
W
Wojciech Uss 已提交
374
  }
375

376
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
377
    LOG(INFO) << "MKLDNN is enabled";
378 379 380
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

381
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
382 383 384 385
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
386
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
387
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
388
  argument_.SetScopeNotOwned(scope_.get());
389 390 391 392 393
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
394
  inference_program_.reset(
395
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
396
  LOG(INFO) << "== optimize end ==";
Y
Yan Chunwei 已提交
397
}
398 399

template <>
400 401
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
402
  VLOG(3) << "create AnalysisConfig";
403
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
404
    // 1. GPU memory
405 406 407
    PADDLE_ENFORCE_GT(config.memory_pool_init_size_mb(), 0.f);
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
408
    std::vector<std::string> flags;
409 410 411 412 413 414 415 416 417 418 419

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
420 421
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
422
                         std::to_string(fraction_of_gpu_memory);
423
      flags.push_back(flag);
M
minqiyang 已提交
424
      VLOG(3) << "set flag: " << flag;
425 426 427 428 429
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
430
  if (!dynamic_cast<AnalysisPredictor *>(predictor.get())->Init(nullptr)) {
431 432
    return nullptr;
  }
G
Gabor Buella 已提交
433
  return predictor;
434 435
}

436
void AnalysisPredictor::PrepareFeedFetch() {
437 438
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
439 440 441 442 443 444 445 446
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
447
      idx2feeds_[idx] = op->Output("Out")[0];
448 449
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
450 451
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
452
      }
Y
Yan Chunwei 已提交
453
      fetches_[idx] = op;
N
nhzlx 已提交
454
      idx2fetches_[idx] = op->Input("X")[0];
455 456 457 458
    }
  }
}

459 460 461 462 463 464 465 466
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

483 484 485 486 487 488 489
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
490 491 492 493 494 495 496
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

497 498 499 500 501 502 503 504 505 506
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
507 508 509 510 511 512
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
513 514 515 516 517
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
  executor_->Run();
Y
Yan Chunwei 已提交
518
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
519
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
520
  tensor_array_batch_cleaner_.ResetTensorArray();
521 522 523 524 525
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
526
  std::string filename;
527 528 529
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
530 531 532
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
533
    filename = config_.prog_file();
534
  } else {
535
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
536 537 538 539
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
540
    LOG(ERROR) << string::Sprintf(
541 542
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
543 544
    return false;
  }
545 546 547

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
548
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
549 550 551
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
552 553
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
554 555 556 557 558 559 560 561
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
562
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
563
  }
564 565 566 567 568 569 570
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
571

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

592
      if (!config_.params_file().empty()) {
593 594 595 596 597 598
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
599
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
600 601 602 603 604
        op->CheckAttrs();
      }
    }
  }

605
  if (!config_.params_file().empty()) {
606 607 608 609 610 611
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
612
    op->SetAttr("file_path", {config_.params_file()});
613 614 615 616
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
617
  framework::NaiveExecutor e(place_);
618 619 620 621
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

622 623
  return true;
}
624

N
nhzlx 已提交
625
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
626 627 628 629 630 631 632 633
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
634
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
635 636 637 638
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
639 640
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
641
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
642
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
643 644
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
645 646 647
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
648

N
nhzlx 已提交
649
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
650 651 652
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
653

N
nhzlx 已提交
654 655 656 657 658
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
659
      std::string calibration_table_data_path =
N
nhzlx 已提交
660 661 662 663
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
664 665 666 667 668

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
669 670 671 672
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
673
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
674 675
  return true;
}
N
nhzlx 已提交
676
#endif
N
nhzlx 已提交
677

678
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
679
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
680
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
681 682
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
683 684
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
685
#endif
686 687 688 689 690 691 692
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
693 694 695 696 697 698 699

  // TODO(Superjomn) deduce the directory path.
  std::string out_path = inference::analysis::GetMemoryCachePath(
      config_.model_dir(), config_.prog_file());
  if (need_collect_var_shapes_for_memory_optim()) {
    SerializeBatchVarShapes(out_path);
  }
700 701
}

702
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
703
  std::lock_guard<std::mutex> lk(clone_mutex_);
704 705 706 707 708
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

Y
Yan Chunwei 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
void AnalysisPredictor::CollectVarShapes() {
  VLOG(4) << "Collecting var shapes";
  if (batch_var_shapes_.size() >= max_shape_collect_count_) return;
  std::map<std::string, std::vector<int>> var_shapes;
  for (auto var_name : inference_program_->Block(0).LocalVarNames()) {
    auto *var = sub_scope_->FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->Type() == framework::VarTypeTrait<framework::LoDTensor>::kId ||
        var->Type() == framework::VarTypeTrait<framework::Tensor>::kId) {
      auto &tensor = var->Get<framework::LoDTensor>();
      auto shape = framework::vectorize(tensor.dims());
      var_shapes[var_name].assign(shape.begin(), shape.end());
    }
  }
  batch_var_shapes_.push_back(var_shapes);
  LOG_FIRST_N(INFO, 1) << "Collected " << batch_var_shapes_.size()
                       << " batch of var shapes for analysis";
}

void AnalysisPredictor::SerializeBatchVarShapes(const std::string &path) {
  LOG(INFO) << "serialize batch var shapes to " << path;
  std::ofstream file(path);
  if (!file.is_open()) {
    LOG(ERROR) << "failed to serialize the var shapes to " << path;
    return;
  }

  // The sirialized data format:
  // <tensor_name>:dim0,dim1,dim2,;
  for (auto &batch : batch_var_shapes_) {
    for (auto &ele : batch) {
      file << ele.first << ":";
      for (size_t i = 0; i < ele.second.size() - 1; i++) {
        file << ele.second[i] << ",";
      }
      file << ele.second.back() << ";";
    }
    file << "\n";
  }
}

bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() {
  if (need_collect_var_shapes_ >= 0) return need_collect_var_shapes_;
  bool need = false;
  // check if the cache exists
  if (!config_.enable_memory_optim()) {
    need = false;
Y
Yan Chunwei 已提交
756
  } else if (config_.static_memory_optim_ &&
Y
Yan Chunwei 已提交
757 758 759
             !inference::IsFileExists(inference::analysis::GetMemoryCachePath(
                 config_.model_dir(), config_.prog_file()))) {
    need = true;
Y
Yan Chunwei 已提交
760 761
  } else if (config_.static_memory_optim_ &&
             config_.static_memory_optim_force_update_) {
Y
Yan Chunwei 已提交
762 763 764 765 766 767 768
    need = true;
  }

  need_collect_var_shapes_ = need ? 1 : 0;
  return need;
}

769
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
770 771 772
  return inference_program_->Proto()->SerializeAsString();
}

Y
Yan Chunwei 已提交
773
template <>
774 775 776 777
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
778 779
}

780
}  // namespace paddle
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
803
USE_TRT_CONVERTER(split);
804 805
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
806
USE_TRT_CONVERTER(leaky_relu);
807
#endif