analysis_predictor.cc 32.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <string>
21
#include <utility>
22
#include <vector>
23
#include "paddle/fluid/framework/feed_fetch_method.h"
24
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
26
#include "paddle/fluid/framework/ir/pass.h"
27
#include "paddle/fluid/framework/naive_executor.h"
28
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
29
#include "paddle/fluid/framework/var_type_traits.h"
30
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
31
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
32
#include "paddle/fluid/inference/api/helper.h"
33
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
34
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
35
#include "paddle/fluid/inference/utils/singleton.h"
36
#include "paddle/fluid/memory/memcpy.h"
37
#include "paddle/fluid/platform/cpu_helper.h"
38
#include "paddle/fluid/platform/gpu_info.h"
39
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
40 41
#include "paddle/fluid/platform/profiler.h"

42 43 44 45
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
46 47
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
48
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
49 50
#endif

N
nhzlx 已提交
51
#if PADDLE_WITH_ANAKIN
52
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
N
nhzlx 已提交
53
#endif
54

T
tensor-tang 已提交
55
DECLARE_bool(profile);
56 57 58

namespace paddle {

N
nhzlx 已提交
59
using inference::Singleton;
N
nhzlx 已提交
60
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
61
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
62 63
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
64
#endif
65

66 67 68 69
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
70 71
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
72 73 74 75 76 77
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
78
bool AnalysisPredictor::Init(
79 80
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
81
  VLOG(3) << "Predictor::init()";
T
tensor-tang 已提交
82 83 84
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";
85 86
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
87 88 89
    platform::EnableProfiler(tracking_device);
  }

90
  // no matter with or without MKLDNN
L
luotao1 已提交
91
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
92

93 94 95 96 97 98 99 100 101 102 103 104 105
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
106
  }
107 108 109 110 111 112 113 114 115

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
116
  if (parent_scope) {
117 118 119
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
120
    scope_ = parent_scope;
121
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
122
  } else {
Z
Zhaolong Xing 已提交
123 124 125 126 127
    if (config_.use_gpu_) {
      paddle::framework::InitDevices(false, {config_.device_id_});
    } else {
      paddle::framework::InitDevices(false, {});
    }
Y
Yan Chunwei 已提交
128
    scope_.reset(new paddle::framework::Scope());
129
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
130
  }
131 132 133 134 135
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
136 137
  if (!program) {
    if (!LoadProgramDesc()) return false;
138

139 140 141 142 143 144 145 146 147
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

148 149 150
    // Optimize the program, and load parameters and modify them in the
    // scope_.
    // This will change the scope_ address.
151
    if (config_.ir_optim()) {
152 153 154 155 156 157 158
      status_ir_optim_enabled_ = true;
      OptimizeInferenceProgram();
    } else {
      // Load parameters
      LOG(INFO) << "load parameters ";
      LoadParameters();
    }
Y
Yan Chunwei 已提交
159
  } else {
160 161
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
162 163
    inference_program_ = program;
  }
M
Michal Gallus 已提交
164

165 166 167 168 169
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
170
  if (config_.use_gpu_) {
171
    status_use_gpu_ = true;
172
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
173 174 175 176 177 178 179 180
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
181
                     config_.use_feed_fetch_ops_);
182

183
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
184

185 186 187
  return true;
}

L
luotao1 已提交
188
void AnalysisPredictor::SetMkldnnThreadID(int tid) {
L
luotao1 已提交
189 190 191 192 193 194 195
#ifdef PADDLE_WITH_MKLDNN
  platform::set_cur_thread_id(tid);
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
#endif
}

196 197 198
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
199
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
M
minqiyang 已提交
200
  VLOG(3) << "Predictor::predict";
201 202 203 204 205 206
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
207
    return false;
208
  }
M
Michal Gallus 已提交
209

210 211 212
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
213

214 215 216 217
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
218
  }
Y
Yan Chunwei 已提交
219 220 221 222 223 224

  // Collect variable shapes for memory optimization.
  if (need_collect_var_shapes_for_memory_optim()) {
    CollectVarShapes();
  }

M
minqiyang 已提交
225
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
226

Y
Yan Chunwei 已提交
227 228 229 230 231 232 233
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
  tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  tensor_array_batch_cleaner_.ResetNoTensorVars();
234 235
  return true;
}
236

237 238
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
239
  VLOG(3) << "Predictor::set_feed";
240 241 242 243 244 245 246 247 248 249 250 251 252 253
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
254
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
255
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
256
      input_ptr = input.mutable_data<float>(ddim, place_);
257 258
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
259 260 261 262 263
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

L
liuwei1031 已提交
264 265 266
    PADDLE_ENFORCE_NOT_NULL(input_ptr);
    PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());

267 268 269 270 271 272
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
273 274 275 276
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
277 278 279
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
280
                   inputs[i].data.length(), dev_ctx->stream());
281 282 283 284
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
285 286 287 288 289 290 291
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
292
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
293 294
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
295 296
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
297 298
      }
      idx = feed_names_[name];
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
329
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
330 331 332
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
333 334 335 336 337
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
338
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
339
    if (type == framework::proto::VarType::FP32) {
340 341
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
342
    } else if (type == framework::proto::VarType::INT64) {
343 344
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
345 346 347
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
348
    } else {
349
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
350 351
    }
  }
Y
Yan Chunwei 已提交
352 353
  return true;
}
354

355
void AnalysisPredictor::PrepareArgument() {
356 357
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
Y
Yan Chunwei 已提交
358
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
Y
Yan Chunwei 已提交
359 360 361
  argument_.SetStaticMemoryOptim(config_.static_memory_optim_);
  argument_.SetStaticMemoryOptimForceUpdate(
      config_.static_memory_optim_force_update_);
T
Tao Luo 已提交
362
  argument_.SetModelFromMemory(config_.model_from_memory_);
363
  argument_.SetEngineOptInfo(config_.engine_opt_info_);
Y
Yan Chunwei 已提交
364
  // Analyze inference_program
365 366
  argument_.SetUseAnakin(config_.anakin_engine_enabled());
  argument_.SetPredictorID(predictor_id_);
367 368
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
369 370
  } else {
    PADDLE_ENFORCE(
371
        !config_.params_file().empty(),
T
Tao Luo 已提交
372
        "Either model_dir or (param_file, prog_file) should be set.");
373
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
374
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
375

376 377
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
378
  }
379

380
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
381
    LOG(INFO) << "TensorRT subgraph engine is enabled";
382 383 384
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
385
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
386
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
387
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
W
Wojciech Uss 已提交
388
  }
389

390
  if (config_.anakin_engine_enabled()) {
391
    argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_);
392
    argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_);
393
    argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_);
394 395 396 397
    argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_);
    argument_.SetAnakinAutoConfigLayout(config_.anakin_auto_config_layout_);
    argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_);
    argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_);
398 399 400
    LOG(INFO) << "Anakin subgraph engine is enabled";
  }

401
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
402
    LOG(INFO) << "MKLDNN is enabled";
403 404 405
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

406 407 408 409 410 411 412 413 414 415
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

416
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
417 418 419 420
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
421
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
422
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
423
  argument_.SetScopeNotOwned(scope_.get());
424 425 426 427 428 429 430
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  status_program_optimized_ = true;

  PrepareArgument();
431 432 433 434 435
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
436
  inference_program_.reset(
437
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
438
  LOG(INFO) << "== optimize end ==";
Y
Yan Chunwei 已提交
439
}
440 441

template <>
442 443
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
444
  VLOG(3) << "create AnalysisConfig";
445
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
446
    // 1. GPU memory
447
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
448 449
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
450
    std::vector<std::string> flags;
451 452 453 454 455 456 457 458 459 460 461

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
462 463
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
464
                         std::to_string(fraction_of_gpu_memory);
465
      flags.push_back(flag);
Z
Zhaolong Xing 已提交
466 467
      flags.push_back("--selected_gpus=" +
                      std::to_string(config.gpu_device_id()));
M
minqiyang 已提交
468
      VLOG(3) << "set flag: " << flag;
469 470 471 472 473
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
474 475 476 477 478 479 480
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
481 482
    return nullptr;
  }
483

G
Gabor Buella 已提交
484
  return predictor;
485 486
}

487 488 489 490 491 492 493 494 495 496 497 498
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

499
void AnalysisPredictor::PrepareFeedFetch() {
500 501
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
502 503 504 505 506 507 508 509
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
510
      idx2feeds_[idx] = op->Output("Out")[0];
511 512
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
513 514
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
515
      }
Y
Yan Chunwei 已提交
516
      fetches_[idx] = op;
N
nhzlx 已提交
517
      idx2fetches_[idx] = op->Input("X")[0];
518 519 520 521
    }
  }
}

522 523 524 525 526 527 528 529
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

546 547 548 549 550 551 552
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
553 554 555 556 557 558 559
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

560 561 562 563 564 565 566 567 568 569
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
570 571 572 573 574 575
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
576 577 578 579
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
580
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
581
  executor_->Run();
Y
Yan Chunwei 已提交
582
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
583
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
584
  tensor_array_batch_cleaner_.ResetTensorArray();
585 586 587 588 589
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
590
  std::string filename;
591 592 593
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
594 595 596
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
597
    filename = config_.prog_file();
598
  } else {
599
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
600 601 602 603
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
604
    LOG(ERROR) << string::Sprintf(
605 606
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
607 608
    return false;
  }
609 610 611

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
612
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
613 614 615
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
616 617
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
618 619 620 621 622 623 624 625
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
626
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
627
  }
628 629 630 631 632 633 634
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
635

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

656
      if (!config_.params_file().empty()) {
657 658 659 660 661 662
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
663
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
664 665 666 667 668
        op->CheckAttrs();
      }
    }
  }

669
  if (!config_.params_file().empty()) {
670 671 672 673 674 675
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
676
    op->SetAttr("file_path", {config_.params_file()});
677 678 679 680
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
681
  framework::NaiveExecutor e(place_);
682 683 684 685
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

686 687
  return true;
}
688

N
nhzlx 已提交
689
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
690 691 692 693 694 695 696 697
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
698
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
699 700 701 702
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
703 704
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
705
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
706
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
707 708
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
709 710 711
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
712

N
nhzlx 已提交
713
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
714 715 716
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
717

N
nhzlx 已提交
718 719 720 721 722
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
723
      std::string calibration_table_data_path =
N
nhzlx 已提交
724 725 726 727
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
728 729 730 731 732

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
733 734 735 736
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
737
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
738 739
  return true;
}
N
nhzlx 已提交
740
#endif
N
nhzlx 已提交
741

742
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
743
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
744
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
745 746
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
747 748
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
749
#endif
750 751 752 753 754 755 756
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
757

758 759 760 761 762 763 764
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif

Y
Yan Chunwei 已提交
765 766 767 768 769 770
  // TODO(Superjomn) deduce the directory path.
  std::string out_path = inference::analysis::GetMemoryCachePath(
      config_.model_dir(), config_.prog_file());
  if (need_collect_var_shapes_for_memory_optim()) {
    SerializeBatchVarShapes(out_path);
  }
771 772
}

773
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
774
  std::lock_guard<std::mutex> lk(clone_mutex_);
775 776 777 778 779
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

Y
Yan Chunwei 已提交
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
void AnalysisPredictor::CollectVarShapes() {
  VLOG(4) << "Collecting var shapes";
  if (batch_var_shapes_.size() >= max_shape_collect_count_) return;
  std::map<std::string, std::vector<int>> var_shapes;
  for (auto var_name : inference_program_->Block(0).LocalVarNames()) {
    auto *var = sub_scope_->FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->Type() == framework::VarTypeTrait<framework::LoDTensor>::kId ||
        var->Type() == framework::VarTypeTrait<framework::Tensor>::kId) {
      auto &tensor = var->Get<framework::LoDTensor>();
      auto shape = framework::vectorize(tensor.dims());
      var_shapes[var_name].assign(shape.begin(), shape.end());
    }
  }
  batch_var_shapes_.push_back(var_shapes);
  LOG_FIRST_N(INFO, 1) << "Collected " << batch_var_shapes_.size()
                       << " batch of var shapes for analysis";
}

void AnalysisPredictor::SerializeBatchVarShapes(const std::string &path) {
  LOG(INFO) << "serialize batch var shapes to " << path;
  std::ofstream file(path);
  if (!file.is_open()) {
    LOG(ERROR) << "failed to serialize the var shapes to " << path;
    return;
  }

  // The sirialized data format:
  // <tensor_name>:dim0,dim1,dim2,;
  for (auto &batch : batch_var_shapes_) {
    for (auto &ele : batch) {
      file << ele.first << ":";
      for (size_t i = 0; i < ele.second.size() - 1; i++) {
        file << ele.second[i] << ",";
      }
      file << ele.second.back() << ";";
    }
    file << "\n";
  }
}

bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() {
  if (need_collect_var_shapes_ >= 0) return need_collect_var_shapes_;
  bool need = false;
  // check if the cache exists
  if (!config_.enable_memory_optim()) {
    need = false;
Y
Yan Chunwei 已提交
827
  } else if (config_.static_memory_optim_ &&
Y
Yan Chunwei 已提交
828 829 830
             !inference::IsFileExists(inference::analysis::GetMemoryCachePath(
                 config_.model_dir(), config_.prog_file()))) {
    need = true;
Y
Yan Chunwei 已提交
831 832
  } else if (config_.static_memory_optim_ &&
             config_.static_memory_optim_force_update_) {
Y
Yan Chunwei 已提交
833 834 835 836 837 838 839
    need = true;
  }

  need_collect_var_shapes_ = need ? 1 : 0;
  return need;
}

840
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
841 842 843
  return inference_program_->Proto()->SerializeAsString();
}

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
883
template <>
884 885 886 887
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
888 889
}

890
}  // namespace paddle
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
913
USE_TRT_CONVERTER(split);
914 915
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
916
USE_TRT_CONVERTER(leaky_relu);
917
#endif
918

N
nhzlx 已提交
919
#if PADDLE_WITH_ANAKIN
920
USE_ANAKIN_CONVERTER(mul);
921 922
USE_ANAKIN_CONVERTER(fc);
USE_ANAKIN_CONVERTER(conv2d);
923
USE_ANAKIN_CONVERTER(conv2d_fusion);
924 925 926 927 928 929 930
USE_ANAKIN_CONVERTER(concat);
USE_ANAKIN_CONVERTER(split);
USE_ANAKIN_CONVERTER(relu);
USE_ANAKIN_CONVERTER(sigmoid);
USE_ANAKIN_CONVERTER(tanh);
USE_ANAKIN_CONVERTER(pool2d);
USE_ANAKIN_CONVERTER(elementwise_add);
931
USE_ANAKIN_CONVERTER(elementwise_mul);
932 933 934 935 936 937 938
USE_ANAKIN_CONVERTER(batch_norm);
USE_ANAKIN_CONVERTER(flatten);
USE_ANAKIN_CONVERTER(reshape);
USE_ANAKIN_CONVERTER(transpose);
USE_ANAKIN_CONVERTER(softmax);
USE_ANAKIN_CONVERTER(detection_out);
USE_ANAKIN_CONVERTER(density_prior_box);
939 940
USE_ANAKIN_CONVERTER(dropout);
USE_ANAKIN_CONVERTER(sum);
N
nhzlx 已提交
941
USE_ANAKIN_CONVERTER(prior_box);
942 943 944 945 946
USE_ANAKIN_CONVERTER(leaky_relu);
USE_ANAKIN_CONVERTER(affine_channel);
USE_ANAKIN_CONVERTER(relu6);
USE_ANAKIN_CONVERTER(swish);
USE_ANAKIN_CONVERTER(shuffle_channel);
N
nhzlx 已提交
947
#endif