analysis_predictor.cc 28.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20 21
#include <string>
#include <vector>
22
#include "paddle/fluid/framework/feed_fetch_method.h"
23
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
24
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
25
#include "paddle/fluid/framework/ir/pass.h"
26
#include "paddle/fluid/framework/naive_executor.h"
27
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
28
#include "paddle/fluid/framework/var_type_traits.h"
29
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
30
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
31
#include "paddle/fluid/inference/api/helper.h"
32
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
33
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
34
#include "paddle/fluid/inference/utils/singleton.h"
35
#include "paddle/fluid/memory/memcpy.h"
36
#include "paddle/fluid/platform/cpu_helper.h"
37
#include "paddle/fluid/platform/gpu_info.h"
T
tensor-tang 已提交
38 39
#include "paddle/fluid/platform/profiler.h"

Y
Yan Chunwei 已提交
40 41
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
42
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
43 44
#endif

N
nhzlx 已提交
45
#if PADDLE_WITH_ANAKIN
46
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
N
nhzlx 已提交
47
#endif
48

T
tensor-tang 已提交
49
DECLARE_bool(profile);
50 51 52

namespace paddle {

N
nhzlx 已提交
53
using inference::Singleton;
N
nhzlx 已提交
54
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
55
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
56 57
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
58
#endif
59

60 61 62 63
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
64 65
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
66 67 68 69 70 71
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
72
bool AnalysisPredictor::Init(
73 74
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
75
  VLOG(3) << "Predictor::init()";
T
tensor-tang 已提交
76 77 78
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";
79 80
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
81 82 83
    platform::EnableProfiler(tracking_device);
  }

84
  // no matter with or without MKLDNN
L
luotao1 已提交
85
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
86

87 88 89 90 91 92 93 94 95 96 97 98 99
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
100
  }
101 102 103 104 105 106 107 108 109

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
110
  if (parent_scope) {
111 112 113
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
114
    scope_ = parent_scope;
115
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
116 117 118
  } else {
    paddle::framework::InitDevices(false);
    scope_.reset(new paddle::framework::Scope());
119
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
120
  }
121 122 123 124 125
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
126 127
  if (!program) {
    if (!LoadProgramDesc()) return false;
128

129 130 131 132 133 134 135 136 137
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

138 139 140
    // Optimize the program, and load parameters and modify them in the
    // scope_.
    // This will change the scope_ address.
141
    if (config_.ir_optim()) {
142 143 144 145 146 147 148
      status_ir_optim_enabled_ = true;
      OptimizeInferenceProgram();
    } else {
      // Load parameters
      LOG(INFO) << "load parameters ";
      LoadParameters();
    }
Y
Yan Chunwei 已提交
149
  } else {
150 151
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
152 153
    inference_program_ = program;
  }
M
Michal Gallus 已提交
154

155 156 157 158 159
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
160
  if (config_.use_gpu_) {
161
    status_use_gpu_ = true;
162
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
163 164 165 166 167 168 169 170
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
171
                     config_.use_feed_fetch_ops_);
172

173
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
174

175 176 177
  return true;
}

L
luotao1 已提交
178
void AnalysisPredictor::SetMkldnnThreadID(int tid) {
L
luotao1 已提交
179 180 181 182 183 184 185
#ifdef PADDLE_WITH_MKLDNN
  platform::set_cur_thread_id(tid);
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
#endif
}

186 187 188
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
L
luotao1 已提交
189 190 191
  if (UNLIKELY(config_.cpu_math_library_num_threads() > 1)) {
    paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
  }
M
minqiyang 已提交
192
  VLOG(3) << "Predictor::predict";
193 194 195 196 197 198
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
199
    return false;
200
  }
M
Michal Gallus 已提交
201

202 203 204
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
205

206 207 208 209
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
210
  }
Y
Yan Chunwei 已提交
211 212 213 214 215 216

  // Collect variable shapes for memory optimization.
  if (need_collect_var_shapes_for_memory_optim()) {
    CollectVarShapes();
  }

M
minqiyang 已提交
217
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
218

Y
Yan Chunwei 已提交
219 220 221 222 223 224 225
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
  tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  tensor_array_batch_cleaner_.ResetNoTensorVars();
226 227
  return true;
}
228

229 230
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
231
  VLOG(3) << "Predictor::set_feed";
232 233 234 235 236 237 238 239 240 241 242 243 244 245
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
246
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
247
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
248
      input_ptr = input.mutable_data<float>(ddim, place_);
249 250
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
251 252 253 254 255
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

256 257 258 259 260 261
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
262 263 264 265
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
266 267 268
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
269
                   inputs[i].data.length(), dev_ctx->stream());
270 271 272 273
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
274 275 276 277 278 279 280
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
281
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
282 283
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
284 285
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
286 287
      }
      idx = feed_names_[name];
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
318
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
319 320 321
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
322 323 324 325 326
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
327
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
328
    if (type == framework::proto::VarType::FP32) {
329 330
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
331
    } else if (type == framework::proto::VarType::INT64) {
332 333
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
334 335 336
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
337
    } else {
338
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
339 340
    }
  }
Y
Yan Chunwei 已提交
341 342
  return true;
}
343

344
// NOTE All the members in AnalysisConfig should be copied to Argument.
Y
Yan Chunwei 已提交
345
void AnalysisPredictor::OptimizeInferenceProgram() {
346 347
  status_program_optimized_ = true;

348 349
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
Y
Yan Chunwei 已提交
350
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
Y
Yan Chunwei 已提交
351 352 353
  argument_.SetStaticMemoryOptim(config_.static_memory_optim_);
  argument_.SetStaticMemoryOptimForceUpdate(
      config_.static_memory_optim_force_update_);
T
Tao Luo 已提交
354
  argument_.SetModelFromMemory(config_.model_from_memory_);
355
  argument_.SetEngineOptInfo(config_.engine_opt_info_);
Y
Yan Chunwei 已提交
356
  // Analyze inference_program
357 358
  argument_.SetUseAnakin(config_.anakin_engine_enabled());
  argument_.SetPredictorID(predictor_id_);
359 360
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
361 362
  } else {
    PADDLE_ENFORCE(
363
        !config_.params_file().empty(),
T
Tao Luo 已提交
364
        "Either model_dir or (param_file, prog_file) should be set.");
365
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
366
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
367

368 369
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
370
  }
371

372
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
373
    LOG(INFO) << "TensorRT subgraph engine is enabled";
374 375 376
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
377
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
378
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
379
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
W
Wojciech Uss 已提交
380
  }
381

382
  if (config_.use_gpu() && config_.anakin_engine_enabled()) {
383
    argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_);
384
    argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_);
385 386 387
    LOG(INFO) << "Anakin subgraph engine is enabled";
  }

388
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
389
    LOG(INFO) << "MKLDNN is enabled";
390 391 392
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

393
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
394 395 396 397
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
398
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
399
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
400
  argument_.SetScopeNotOwned(scope_.get());
401 402 403 404 405
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
406
  inference_program_.reset(
407
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
408
  LOG(INFO) << "== optimize end ==";
Y
Yan Chunwei 已提交
409
}
410 411

template <>
412 413
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
414
  VLOG(3) << "create AnalysisConfig";
415
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
416
    // 1. GPU memory
417
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
418 419
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
420
    std::vector<std::string> flags;
421 422 423 424 425 426 427 428 429 430 431

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
432 433
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
434
                         std::to_string(fraction_of_gpu_memory);
435
      flags.push_back(flag);
M
minqiyang 已提交
436
      VLOG(3) << "set flag: " << flag;
437 438 439 440 441
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
442
  if (!dynamic_cast<AnalysisPredictor *>(predictor.get())->Init(nullptr)) {
443 444
    return nullptr;
  }
G
Gabor Buella 已提交
445
  return predictor;
446 447
}

448
void AnalysisPredictor::PrepareFeedFetch() {
449 450
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
451 452 453 454 455 456 457 458
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
459
      idx2feeds_[idx] = op->Output("Out")[0];
460 461
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
462 463
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
464
      }
Y
Yan Chunwei 已提交
465
      fetches_[idx] = op;
N
nhzlx 已提交
466
      idx2fetches_[idx] = op->Input("X")[0];
467 468 469 470
    }
  }
}

471 472 473 474 475 476 477 478
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

495 496 497 498 499 500 501
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
502 503 504 505 506 507 508
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

509 510 511 512 513 514 515 516 517 518
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
519 520 521 522 523 524
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
525 526 527 528 529
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
  executor_->Run();
Y
Yan Chunwei 已提交
530
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
531
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
532
  tensor_array_batch_cleaner_.ResetTensorArray();
533 534 535 536 537
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
538
  std::string filename;
539 540 541
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
542 543 544
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
545
    filename = config_.prog_file();
546
  } else {
547
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
548 549 550 551
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
552
    LOG(ERROR) << string::Sprintf(
553 554
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
555 556
    return false;
  }
557 558 559

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
560
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
561 562 563
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
564 565
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
566 567 568 569 570 571 572 573
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
574
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
575
  }
576 577 578 579 580 581 582
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
583

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

604
      if (!config_.params_file().empty()) {
605 606 607 608 609 610
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
611
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
612 613 614 615 616
        op->CheckAttrs();
      }
    }
  }

617
  if (!config_.params_file().empty()) {
618 619 620 621 622 623
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
624
    op->SetAttr("file_path", {config_.params_file()});
625 626 627 628
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
629
  framework::NaiveExecutor e(place_);
630 631 632 633
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

634 635
  return true;
}
636

N
nhzlx 已提交
637
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
638 639 640 641 642 643 644 645
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
646
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
647 648 649 650
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
651 652
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
653
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
654
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
655 656
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
657 658 659
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
660

N
nhzlx 已提交
661
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
662 663 664
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
665

N
nhzlx 已提交
666 667 668 669 670
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
671
      std::string calibration_table_data_path =
N
nhzlx 已提交
672 673 674 675
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
676 677 678 679 680

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
681 682 683 684
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
685
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
686 687
  return true;
}
N
nhzlx 已提交
688
#endif
N
nhzlx 已提交
689

690
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
691
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
692
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
693 694
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
695 696
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
697
#endif
698 699 700 701 702 703 704
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
705 706 707 708 709 710 711

  // TODO(Superjomn) deduce the directory path.
  std::string out_path = inference::analysis::GetMemoryCachePath(
      config_.model_dir(), config_.prog_file());
  if (need_collect_var_shapes_for_memory_optim()) {
    SerializeBatchVarShapes(out_path);
  }
712 713
}

714
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
715
  std::lock_guard<std::mutex> lk(clone_mutex_);
716 717 718 719 720
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

Y
Yan Chunwei 已提交
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
void AnalysisPredictor::CollectVarShapes() {
  VLOG(4) << "Collecting var shapes";
  if (batch_var_shapes_.size() >= max_shape_collect_count_) return;
  std::map<std::string, std::vector<int>> var_shapes;
  for (auto var_name : inference_program_->Block(0).LocalVarNames()) {
    auto *var = sub_scope_->FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->Type() == framework::VarTypeTrait<framework::LoDTensor>::kId ||
        var->Type() == framework::VarTypeTrait<framework::Tensor>::kId) {
      auto &tensor = var->Get<framework::LoDTensor>();
      auto shape = framework::vectorize(tensor.dims());
      var_shapes[var_name].assign(shape.begin(), shape.end());
    }
  }
  batch_var_shapes_.push_back(var_shapes);
  LOG_FIRST_N(INFO, 1) << "Collected " << batch_var_shapes_.size()
                       << " batch of var shapes for analysis";
}

void AnalysisPredictor::SerializeBatchVarShapes(const std::string &path) {
  LOG(INFO) << "serialize batch var shapes to " << path;
  std::ofstream file(path);
  if (!file.is_open()) {
    LOG(ERROR) << "failed to serialize the var shapes to " << path;
    return;
  }

  // The sirialized data format:
  // <tensor_name>:dim0,dim1,dim2,;
  for (auto &batch : batch_var_shapes_) {
    for (auto &ele : batch) {
      file << ele.first << ":";
      for (size_t i = 0; i < ele.second.size() - 1; i++) {
        file << ele.second[i] << ",";
      }
      file << ele.second.back() << ";";
    }
    file << "\n";
  }
}

bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() {
  if (need_collect_var_shapes_ >= 0) return need_collect_var_shapes_;
  bool need = false;
  // check if the cache exists
  if (!config_.enable_memory_optim()) {
    need = false;
Y
Yan Chunwei 已提交
768
  } else if (config_.static_memory_optim_ &&
Y
Yan Chunwei 已提交
769 770 771
             !inference::IsFileExists(inference::analysis::GetMemoryCachePath(
                 config_.model_dir(), config_.prog_file()))) {
    need = true;
Y
Yan Chunwei 已提交
772 773
  } else if (config_.static_memory_optim_ &&
             config_.static_memory_optim_force_update_) {
Y
Yan Chunwei 已提交
774 775 776 777 778 779 780
    need = true;
  }

  need_collect_var_shapes_ = need ? 1 : 0;
  return need;
}

781
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
782 783 784
  return inference_program_->Proto()->SerializeAsString();
}

Y
Yan Chunwei 已提交
785
template <>
786 787 788 789
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
790 791
}

792
}  // namespace paddle
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
815
USE_TRT_CONVERTER(split);
816 817
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
818
USE_TRT_CONVERTER(leaky_relu);
819
#endif
820

N
nhzlx 已提交
821
#if PADDLE_WITH_ANAKIN
822
USE_ANAKIN_CONVERTER(mul);
823 824
USE_ANAKIN_CONVERTER(fc);
USE_ANAKIN_CONVERTER(conv2d);
825
USE_ANAKIN_CONVERTER(conv2d_fusion);
826 827 828 829 830 831 832
USE_ANAKIN_CONVERTER(concat);
USE_ANAKIN_CONVERTER(split);
USE_ANAKIN_CONVERTER(relu);
USE_ANAKIN_CONVERTER(sigmoid);
USE_ANAKIN_CONVERTER(tanh);
USE_ANAKIN_CONVERTER(pool2d);
USE_ANAKIN_CONVERTER(elementwise_add);
833
USE_ANAKIN_CONVERTER(elementwise_mul);
834 835 836 837 838 839 840
USE_ANAKIN_CONVERTER(batch_norm);
USE_ANAKIN_CONVERTER(flatten);
USE_ANAKIN_CONVERTER(reshape);
USE_ANAKIN_CONVERTER(transpose);
USE_ANAKIN_CONVERTER(softmax);
USE_ANAKIN_CONVERTER(detection_out);
USE_ANAKIN_CONVERTER(density_prior_box);
841 842
USE_ANAKIN_CONVERTER(dropout);
USE_ANAKIN_CONVERTER(sum);
N
nhzlx 已提交
843
#endif