analysis_predictor.cc 32.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <string>
21
#include <utility>
22
#include <vector>
23
#include "paddle/fluid/framework/feed_fetch_method.h"
24
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
26
#include "paddle/fluid/framework/ir/pass.h"
27
#include "paddle/fluid/framework/naive_executor.h"
28
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
29
#include "paddle/fluid/framework/var_type_traits.h"
30
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
31
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
32
#include "paddle/fluid/inference/api/helper.h"
33
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
34
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
35
#include "paddle/fluid/inference/utils/singleton.h"
36
#include "paddle/fluid/memory/memcpy.h"
37
#include "paddle/fluid/platform/cpu_helper.h"
38
#include "paddle/fluid/platform/gpu_info.h"
39
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
40 41
#include "paddle/fluid/platform/profiler.h"

42 43 44 45
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
46 47
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
48
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
49 50
#endif

N
nhzlx 已提交
51
#if PADDLE_WITH_ANAKIN
52
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
N
nhzlx 已提交
53
#endif
54

T
tensor-tang 已提交
55
DECLARE_bool(profile);
56 57 58

namespace paddle {

N
nhzlx 已提交
59
using inference::Singleton;
N
nhzlx 已提交
60
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
61
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
62 63
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
64
#endif
65

66 67 68 69
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
70 71
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
72 73 74 75 76 77
    return true;
  }
  return false;
}
}  // namespace

Y
Yan Chunwei 已提交
78
bool AnalysisPredictor::Init(
79 80
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
81
  VLOG(3) << "Predictor::init()";
T
tensor-tang 已提交
82 83 84
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";
85 86
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
87 88 89
    platform::EnableProfiler(tracking_device);
  }

90
  // no matter with or without MKLDNN
L
luotao1 已提交
91
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
92

93 94 95 96 97 98 99 100 101 102 103 104 105
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
106
  }
107 108 109 110 111 112 113 114 115

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
116
  if (parent_scope) {
117 118 119
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
120
    scope_ = parent_scope;
121
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
122
  } else {
Z
Zhaolong Xing 已提交
123 124 125 126 127
    if (config_.use_gpu_) {
      paddle::framework::InitDevices(false, {config_.device_id_});
    } else {
      paddle::framework::InitDevices(false, {});
    }
Y
Yan Chunwei 已提交
128
    scope_.reset(new paddle::framework::Scope());
129
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
130
  }
131 132 133 134 135
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
136 137
  if (!program) {
    if (!LoadProgramDesc()) return false;
138 139 140 141 142 143 144 145 146
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

147 148 149 150
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
151
  } else {
152 153
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
154 155
    inference_program_ = program;
  }
M
Michal Gallus 已提交
156

157 158 159 160 161
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
162
  if (config_.use_gpu_) {
163
    status_use_gpu_ = true;
164
    place_ = paddle::platform::CUDAPlace(config_.device_id_);
165 166 167 168 169 170 171 172
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
173
                     config_.use_feed_fetch_ops_);
174

175
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
176

177 178 179
  return true;
}

L
luotao1 已提交
180
void AnalysisPredictor::SetMkldnnThreadID(int tid) {
L
luotao1 已提交
181 182 183 184 185 186 187
#ifdef PADDLE_WITH_MKLDNN
  platform::set_cur_thread_id(tid);
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
#endif
}

188 189 190
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
191
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
M
minqiyang 已提交
192
  VLOG(3) << "Predictor::predict";
193 194 195 196
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
197
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
198 199
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
200
    return false;
201
  }
M
Michal Gallus 已提交
202

203 204 205
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
206

207 208 209 210
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
211
  }
Y
Yan Chunwei 已提交
212 213 214 215 216 217

  // Collect variable shapes for memory optimization.
  if (need_collect_var_shapes_for_memory_optim()) {
    CollectVarShapes();
  }

M
minqiyang 已提交
218
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
219

Y
Yan Chunwei 已提交
220 221 222 223 224
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
225 226 227
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
228
  tensor_array_batch_cleaner_.ResetNoTensorVars();
229 230 231 232 233

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);

234 235
  return true;
}
236

237 238
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
239
  VLOG(3) << "Predictor::set_feed";
240 241 242 243 244 245 246 247 248 249 250 251 252 253
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
    auto &input = feed_tensors_[i];
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
254
      input_ptr = input.mutable_data<int64_t>(ddim, place_);
255
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
256
      input_ptr = input.mutable_data<float>(ddim, place_);
257 258
    } else if (inputs[i].dtype == PaddleDType::INT32) {
      input_ptr = input.mutable_data<int32_t>(ddim, place_);
259 260 261 262 263
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

L
liuwei1031 已提交
264 265 266
    PADDLE_ENFORCE_NOT_NULL(input_ptr);
    PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());

267 268 269 270 271 272
    if (platform::is_cpu_place(place_)) {
      // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
      std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
                  inputs[i].data.length());
    } else {
#ifdef PADDLE_WITH_CUDA
Q
qingqing01 已提交
273 274 275 276
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx =
          static_cast<const platform::CUDADeviceContext *>(pool.Get(place_));
277 278 279
      auto dst_gpu_place = boost::get<platform::CUDAPlace>(place_);
      memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                   platform::CPUPlace(), inputs[i].data.data(),
Q
qingqing01 已提交
280
                   inputs[i].data.length(), dev_ctx->stream());
281 282 283 284
#else
      PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
    }
285 286 287 288 289 290 291
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
    int idx = -1;
292
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
293 294
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
295 296
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
297 298
      }
      idx = feed_names_[name];
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
329
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
330 331 332
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
    int idx = boost::get<int>(fetches_[i]->GetAttr("col"));
333 334 335 336 337
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
        framework::GetFetchVariable(*scope, "fetch", idx);
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
338
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
339
    if (type == framework::proto::VarType::FP32) {
340 341
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
342
    } else if (type == framework::proto::VarType::INT64) {
343 344
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
345 346 347
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
348
    } else {
349
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
350 351
    }
  }
Y
Yan Chunwei 已提交
352 353
  return true;
}
354

355
void AnalysisPredictor::PrepareArgument() {
356 357
  argument_.SetUseGPU(config_.use_gpu());
  argument_.SetGPUDeviceId(config_.gpu_device_id());
358
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
359
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
Y
Yan Chunwei 已提交
360 361 362
  argument_.SetStaticMemoryOptim(config_.static_memory_optim_);
  argument_.SetStaticMemoryOptimForceUpdate(
      config_.static_memory_optim_force_update_);
T
Tao Luo 已提交
363
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
364
  // Analyze inference_program
365 366
  argument_.SetUseAnakin(config_.anakin_engine_enabled());
  argument_.SetPredictorID(predictor_id_);
367
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
368 369
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
370 371
  } else {
    PADDLE_ENFORCE(
372
        !config_.params_file().empty(),
T
Tao Luo 已提交
373
        "Either model_dir or (param_file, prog_file) should be set.");
374
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
375
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
376

377 378
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
379
  }
380

381
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
382
    LOG(INFO) << "TensorRT subgraph engine is enabled";
383 384 385
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
386
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
387
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
388
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
389
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
W
Wojciech Uss 已提交
390
  }
391

392
  if (config_.anakin_engine_enabled()) {
393
    argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_);
394
    argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_);
395
    argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_);
396 397 398 399
    argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_);
    argument_.SetAnakinAutoConfigLayout(config_.anakin_auto_config_layout_);
    argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_);
    argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_);
400 401 402
    LOG(INFO) << "Anakin subgraph engine is enabled";
  }

403
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
404
    LOG(INFO) << "MKLDNN is enabled";
405 406 407
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

408 409 410 411 412 413 414 415 416 417
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

418
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
419 420 421 422
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
423
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
424
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
425
  argument_.SetScopeNotOwned(scope_.get());
426 427 428 429 430
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
431 432 433 434 435
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
436
  inference_program_.reset(
437
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
438 439 440 441
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
442
  LOG(INFO) << "== optimize end ==";
Y
Yan Chunwei 已提交
443
}
444 445

template <>
446 447
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
M
minqiyang 已提交
448
  VLOG(3) << "create AnalysisConfig";
449 450
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
451
  if (config.use_gpu()) {
S
Sylwester Fraczek 已提交
452
    // 1. GPU memory
453
    PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f);
454 455
    PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d",
                      config.gpu_device_id());
456
    std::vector<std::string> flags;
457 458 459 460 461 462 463 464 465 466 467

    float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
    if (fraction_of_gpu_memory > 0.95f) {
      LOG(ERROR)
          << "Allocate too much memory for the GPU memory pool, assigned "
          << config.memory_pool_init_size_mb() << " MB";
      LOG(ERROR)
          << "Try to shink the value by setting AnalysisConfig::EnableGpu(...)";
    }

    if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
468 469
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
470
                         std::to_string(fraction_of_gpu_memory);
471
      flags.push_back(flag);
Z
Zhaolong Xing 已提交
472 473
      flags.push_back("--selected_gpus=" +
                      std::to_string(config.gpu_device_id()));
M
minqiyang 已提交
474
      VLOG(3) << "set flag: " << flag;
475 476 477 478 479
      framework::InitGflags(flags);
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
480 481
  // Each config can only be used for one predictor.
  config.SetInValid();
482 483 484 485 486 487 488
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
489 490
    return nullptr;
  }
491

G
Gabor Buella 已提交
492
  return predictor;
493 494
}

495 496 497 498 499 500 501 502 503 504 505 506
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

507
void AnalysisPredictor::PrepareFeedFetch() {
508 509
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
510 511 512 513 514 515 516 517
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
518
      idx2feeds_[idx] = op->Output("Out")[0];
519 520
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
Y
Yan Chunwei 已提交
521 522
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
523
      }
Y
Yan Chunwei 已提交
524
      fetches_[idx] = op;
N
nhzlx 已提交
525
      idx2fetches_[idx] = op->Input("X")[0];
526 527 528 529
    }
  }
}

530 531 532 533 534 535 536 537
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
  var->GetMutable<framework::FeedFetchList>();
  var = scope->Var("fetch");
  var->GetMutable<framework::FeedFetchList>();
}

N
nhzlx 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

554 555 556 557 558 559 560
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
561 562 563 564 565 566 567
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

568 569 570 571 572 573 574 575 576 577
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
578 579 580 581 582 583
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = boost::get<platform::CUDAPlace>(place_);
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
584 585 586 587
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
588
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
589
  executor_->Run();
Y
Yan Chunwei 已提交
590
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
591
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
592
  tensor_array_batch_cleaner_.ResetTensorArray();
593 594 595 596 597

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);

598 599 600 601 602
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
603
  std::string filename;
604 605 606
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
607 608 609
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
610
    filename = config_.prog_file();
611
  } else {
612
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
613 614 615 616
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
617
    LOG(ERROR) << string::Sprintf(
618 619
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
620 621
    return false;
  }
622 623 624

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
625
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
626 627 628
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
629 630
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
631 632 633 634 635 636 637 638
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
639
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
640
  }
641 642 643 644 645 646 647
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
648

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

669
      if (!config_.params_file().empty()) {
670 671 672 673 674 675
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
676
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
677 678 679 680 681
        op->CheckAttrs();
      }
    }
  }

682
  if (!config_.params_file().empty()) {
683 684 685 686 687 688
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
689
    op->SetAttr("file_path", {config_.params_file()});
690 691 692 693
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
694
  framework::NaiveExecutor e(place_);
695 696 697 698
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

699 700
  return true;
}
701

N
nhzlx 已提交
702
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
703 704 705 706 707 708 709 710
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
          boost::get<std::string>(op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
711
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
712 713 714 715
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
716 717
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
718
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
719
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
720 721
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
722 723 724
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
725

N
nhzlx 已提交
726
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
727 728 729
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
730

N
nhzlx 已提交
731 732 733 734 735
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
736
      std::string calibration_table_data_path =
N
nhzlx 已提交
737 738 739 740
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
741 742 743 744 745

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
746 747 748 749
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
750
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
751 752
  return true;
}
N
nhzlx 已提交
753
#endif
N
nhzlx 已提交
754

755
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
756
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
757
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
758 759
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
760 761
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
762
#endif
763 764 765 766 767 768 769
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
770

771 772 773 774 775 776 777
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif

Y
Yan Chunwei 已提交
778 779 780 781 782 783
  // TODO(Superjomn) deduce the directory path.
  std::string out_path = inference::analysis::GetMemoryCachePath(
      config_.model_dir(), config_.prog_file());
  if (need_collect_var_shapes_for_memory_optim()) {
    SerializeBatchVarShapes(out_path);
  }
784 785
}

786
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
787
  std::lock_guard<std::mutex> lk(clone_mutex_);
788 789 790 791 792
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

Y
Yan Chunwei 已提交
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
void AnalysisPredictor::CollectVarShapes() {
  VLOG(4) << "Collecting var shapes";
  if (batch_var_shapes_.size() >= max_shape_collect_count_) return;
  std::map<std::string, std::vector<int>> var_shapes;
  for (auto var_name : inference_program_->Block(0).LocalVarNames()) {
    auto *var = sub_scope_->FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->Type() == framework::VarTypeTrait<framework::LoDTensor>::kId ||
        var->Type() == framework::VarTypeTrait<framework::Tensor>::kId) {
      auto &tensor = var->Get<framework::LoDTensor>();
      auto shape = framework::vectorize(tensor.dims());
      var_shapes[var_name].assign(shape.begin(), shape.end());
    }
  }
  batch_var_shapes_.push_back(var_shapes);
  LOG_FIRST_N(INFO, 1) << "Collected " << batch_var_shapes_.size()
                       << " batch of var shapes for analysis";
}

void AnalysisPredictor::SerializeBatchVarShapes(const std::string &path) {
  LOG(INFO) << "serialize batch var shapes to " << path;
  std::ofstream file(path);
  if (!file.is_open()) {
    LOG(ERROR) << "failed to serialize the var shapes to " << path;
    return;
  }

  // The sirialized data format:
  // <tensor_name>:dim0,dim1,dim2,;
  for (auto &batch : batch_var_shapes_) {
    for (auto &ele : batch) {
      file << ele.first << ":";
      for (size_t i = 0; i < ele.second.size() - 1; i++) {
        file << ele.second[i] << ",";
      }
      file << ele.second.back() << ";";
    }
    file << "\n";
  }
}

bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() {
  if (need_collect_var_shapes_ >= 0) return need_collect_var_shapes_;
  bool need = false;
  // check if the cache exists
  if (!config_.enable_memory_optim()) {
    need = false;
Y
Yan Chunwei 已提交
840
  } else if (config_.static_memory_optim_ &&
Y
Yan Chunwei 已提交
841 842 843
             !inference::IsFileExists(inference::analysis::GetMemoryCachePath(
                 config_.model_dir(), config_.prog_file()))) {
    need = true;
Y
Yan Chunwei 已提交
844 845
  } else if (config_.static_memory_optim_ &&
             config_.static_memory_optim_force_update_) {
Y
Yan Chunwei 已提交
846 847 848 849 850 851 852
    need = true;
  }

  need_collect_var_shapes_ = need ? 1 : 0;
  return need;
}

853
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
854 855 856
  return inference_program_->Proto()->SerializeAsString();
}

857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
896
template <>
897 898 899 900
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
901 902
}

903
}  // namespace paddle
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
926
USE_TRT_CONVERTER(split);
927 928
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
929
USE_TRT_CONVERTER(leaky_relu);
930
#endif
931

N
nhzlx 已提交
932
#if PADDLE_WITH_ANAKIN
933
USE_ANAKIN_CONVERTER(mul);
934 935
USE_ANAKIN_CONVERTER(fc);
USE_ANAKIN_CONVERTER(conv2d);
936
USE_ANAKIN_CONVERTER(conv2d_fusion);
937 938 939 940 941 942 943
USE_ANAKIN_CONVERTER(concat);
USE_ANAKIN_CONVERTER(split);
USE_ANAKIN_CONVERTER(relu);
USE_ANAKIN_CONVERTER(sigmoid);
USE_ANAKIN_CONVERTER(tanh);
USE_ANAKIN_CONVERTER(pool2d);
USE_ANAKIN_CONVERTER(elementwise_add);
944
USE_ANAKIN_CONVERTER(elementwise_mul);
945 946 947 948 949 950 951
USE_ANAKIN_CONVERTER(batch_norm);
USE_ANAKIN_CONVERTER(flatten);
USE_ANAKIN_CONVERTER(reshape);
USE_ANAKIN_CONVERTER(transpose);
USE_ANAKIN_CONVERTER(softmax);
USE_ANAKIN_CONVERTER(detection_out);
USE_ANAKIN_CONVERTER(density_prior_box);
952 953
USE_ANAKIN_CONVERTER(dropout);
USE_ANAKIN_CONVERTER(sum);
N
nhzlx 已提交
954
USE_ANAKIN_CONVERTER(prior_box);
955 956 957 958 959
USE_ANAKIN_CONVERTER(leaky_relu);
USE_ANAKIN_CONVERTER(affine_channel);
USE_ANAKIN_CONVERTER(relu6);
USE_ANAKIN_CONVERTER(swish);
USE_ANAKIN_CONVERTER(shuffle_channel);
N
nhzlx 已提交
960
#endif