analysis_config.cc 34.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include <sstream>
16
#include <string>
17
#include <tuple>
18 19
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
#include "paddle/fluid/inference/api/paddle_pass_builder.h"
20
#include "paddle/fluid/inference/utils/table_printer.h"
21
#include "paddle/fluid/platform/cpu_info.h"
22
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
23 24
#include "paddle/fluid/platform/enforce.h"

25 26 27 28
#ifdef PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/helper.h"
#endif

29
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
30 31 32
DECLARE_uint64(initial_gpu_memory_in_mb);
#endif

33
namespace paddle {
W
wanghuancoder 已提交
34 35
struct MkldnnQuantizerConfig;

36
extern const std::vector<std::string> kTRTSubgraphPasses;
D
denglin-github 已提交
37
extern const std::vector<std::string> kDlnneSubgraphPasses;
石晓伟 已提交
38
extern const std::vector<std::string> kLiteSubgraphPasses;
39

40
PassStrategy *AnalysisConfig::pass_builder() const {
41 42 43 44
  if (!pass_builder_.get()) {
    if (use_gpu_) {
      LOG(INFO) << "Create GPU IR passes";
      pass_builder_.reset(new GpuPassStrategy);
45 46
    } else if (use_xpu_) {
      pass_builder_.reset(new XpuPassStrategy);
W
Wilber 已提交
47 48
    } else if (use_npu_) {
      pass_builder_.reset(new NpuPassStrategy);
J
jianghaicheng 已提交
49 50 51
    } else if (use_ipu_) {
      LOG(INFO) << "Create IPU IR passes";
      pass_builder_.reset(new IpuPassStrategy);
52 53 54 55 56 57 58 59 60 61 62 63
    } else {
      LOG(INFO) << "Create CPU IR passes";
      pass_builder_.reset(new CpuPassStrategy);
    }
  } else if (pass_builder_->use_gpu() ^ use_gpu()) {
    LOG(WARNING) << "The use_gpu flag is not compatible between Config and "
                    "PassBuilder, the flags are "
                 << use_gpu() << " " << pass_builder_->use_gpu();
    LOG(WARNING) << "Please make them compatible, still use the existing "
                    "PassBuilder.";
  }

64 65 66
  return pass_builder_.get();
}

67
AnalysisConfig::AnalysisConfig(const std::string &model_dir) {
68
  model_dir_ = model_dir;
Y
Yan Chunwei 已提交
69 70

  Update();
71
}
72 73
AnalysisConfig::AnalysisConfig(const std::string &prog_file,
                               const std::string &params_file) {
74 75
  prog_file_ = prog_file;
  params_file_ = params_file;
Y
Yan Chunwei 已提交
76 77

  Update();
78
}
79 80
void AnalysisConfig::SetModel(const std::string &prog_file_path,
                              const std::string &params_file_path) {
81 82
  prog_file_ = prog_file_path;
  params_file_ = params_file_path;
Y
Yan Chunwei 已提交
83 84

  Update();
85
}
86

87 88
void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
                                  int device_id) {
89
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
90 91
  use_gpu_ = true;
  memory_pool_init_size_mb_ = memory_pool_init_size_mb;
92
  FLAGS_initial_gpu_memory_in_mb = memory_pool_init_size_mb_;
93
  gpu_device_id_ = device_id;
94
#else
Y
Yan Chunwei 已提交
95
  LOG(ERROR) << "Please compile with gpu to EnableGpu()";
96 97
  use_gpu_ = false;
#endif
Y
Yan Chunwei 已提交
98 99 100

  Update();
}
101

102
void AnalysisConfig::DisableGpu() {
Y
Yan Chunwei 已提交
103 104 105
  use_gpu_ = false;

  Update();
106 107
}

108 109 110 111 112 113 114 115 116 117 118 119 120
void AnalysisConfig::Exp_EnableUseGpuFp16(
    std::unordered_set<std::string> op_list) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  use_gpu_fp16_ = true;
  gpu_fp16_disabled_op_types_.insert(op_list.begin(), op_list.end());
#else
  LOG(ERROR) << "Please compile with gpu to Exp_EnableUseGpuFp16()";
  use_gpu_fp16_ = false;
#endif

  Update();
}

121 122 123 124 125 126
void AnalysisConfig::DisableFCPadding() {
  use_fc_padding_ = false;

  Update();
}

W
Wilber 已提交
127 128 129 130
void AnalysisConfig::EnableXpu(int l3_workspace_size, bool locked,
                               bool autotune, const std::string &autotune_file,
                               const std::string &precision,
                               bool adaptive_seqlen) {
131 132
  use_xpu_ = true;
  xpu_l3_workspace_size_ = l3_workspace_size;
W
Wilber 已提交
133 134 135 136 137
  xpu_locked_ = locked;
  xpu_autotune_ = autotune;
  xpu_autotune_file_ = autotune_file;
  xpu_precision_ = precision;
  xpu_adaptive_seqlen_ = adaptive_seqlen;
138 139 140
  Update();
}

141 142 143 144 145 146 147 148
void AnalysisConfig::SetXpuDeviceId(int device_id) {
  PADDLE_ENFORCE_EQ(use_xpu_, true,
                    platform::errors::PreconditionNotMet(
                        "Should call EnableXpu before SetXpuDeviceId."));
  xpu_device_id_ = device_id;
  Update();
}

W
Wilber 已提交
149 150 151 152 153 154 155 156 157 158 159
void AnalysisConfig::EnableNpu(int device_id) {
#ifdef PADDLE_WITH_ASCEND_CL
  use_npu_ = true;
  npu_device_id_ = device_id;
#else
  LOG(ERROR) << "Please compile with npu to EnableNpu()";
  use_npu_ = false;
#endif

  Update();
}
160 161 162 163

void AnalysisConfig::EnableIpu(int ipu_device_num, int ipu_micro_batch_size,
                               bool ipu_enable_pipelining,
                               int ipu_batches_per_step) {
J
jianghaicheng 已提交
164 165 166
  enable_ir_optim_ = true;

  use_ipu_ = true;
167 168
  ipu_device_num_ = ipu_device_num;
  ipu_micro_batch_size_ = ipu_micro_batch_size;
J
jianghaicheng 已提交
169 170
  ipu_enable_pipelining_ = ipu_enable_pipelining;
  ipu_batches_per_step_ = ipu_batches_per_step;
171 172 173 174 175 176 177 178 179 180 181

  Update();
}

void AnalysisConfig::SetIpuConfig(bool ipu_enable_fp16, int ipu_replica_num,
                                  float ipu_available_memory_proportion,
                                  bool ipu_enable_half_partial) {
  ipu_enable_fp16_ = ipu_enable_fp16;
  ipu_replica_num_ = ipu_replica_num;
  ipu_available_memory_proportion_ = ipu_available_memory_proportion;
  ipu_enable_half_partial_ = ipu_enable_half_partial;
J
jianghaicheng 已提交
182 183 184

  Update();
}
W
Wilber 已提交
185

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
void AnalysisConfig::EnableONNXRuntime() {
#ifdef PADDLE_WITH_ONNXRUNTIME
  use_onnxruntime_ = true;
#else
  LOG(ERROR) << "Please compile with onnxruntime to EnableONNXRuntime()";
  use_onnxruntime_ = false;
#endif

  Update();
}

void AnalysisConfig::DisableONNXRuntime() {
  use_onnxruntime_ = false;
  Update();
}

void AnalysisConfig::EnableORTOptimization() {
#ifdef PADDLE_WITH_ONNXRUNTIME
  enable_ort_optimization_ = true;
#else
  LOG(ERROR) << "Please compile with onnxruntime to EnableORTOptimization()";
  enable_ort_optimization_ = false;
#endif

  Update();
}

213
AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
214 215 216 217 218 219
#define CP_MEMBER(member__) member__ = other.member__;

  // Model related.
  CP_MEMBER(model_dir_);
  CP_MEMBER(model_from_memory_);  // the memory model reuses prog_file_ and
                                  // params_file_ fields.
220

221
  CP_MEMBER(opt_cache_dir_);
W
Wilber 已提交
222 223
  CP_MEMBER(prog_file_);
  CP_MEMBER(params_file_);
224

225
  CP_MEMBER(use_fc_padding_);
226
  // GPU related.
227
  CP_MEMBER(use_gpu_);
228
  CP_MEMBER(use_cudnn_);
229
  CP_MEMBER(gpu_device_id_);
230
  CP_MEMBER(memory_pool_init_size_mb_);
231 232
  CP_MEMBER(use_gpu_fp16_);
  CP_MEMBER(gpu_fp16_disabled_op_types_);
Y
Yan Chunwei 已提交
233 234

  CP_MEMBER(enable_memory_optim_);
S
Sylwester Fraczek 已提交
235
  // TensorRT related.
236 237 238 239
  CP_MEMBER(use_tensorrt_);
  CP_MEMBER(tensorrt_workspace_size_);
  CP_MEMBER(tensorrt_max_batchsize_);
  CP_MEMBER(tensorrt_min_subgraph_size_);
N
nhzlx 已提交
240
  CP_MEMBER(tensorrt_precision_mode_);
241
  CP_MEMBER(trt_disabled_ops_);
242 243
  CP_MEMBER(trt_use_dla_);
  CP_MEMBER(trt_dla_core_);
N
nhzlx 已提交
244
  CP_MEMBER(trt_use_static_engine_);
245
  CP_MEMBER(trt_use_calib_mode_);
246
  CP_MEMBER(trt_use_oss_);
247
  CP_MEMBER(trt_with_interleaved_);
248 249 250 251
  CP_MEMBER(trt_tuned_dynamic_shape_);
  CP_MEMBER(trt_allow_build_at_runtime_);
  CP_MEMBER(collect_shape_range_info_);
  CP_MEMBER(shape_range_info_path_);
252
  CP_MEMBER(trt_use_inspector_);
D
denglin-github 已提交
253 254 255
  // Dlnne related
  CP_MEMBER(use_dlnne_);
  CP_MEMBER(dlnne_min_subgraph_size_);
S
Sylwester Fraczek 已提交
256
  // MKLDNN related.
257 258
  CP_MEMBER(use_mkldnn_);
  CP_MEMBER(mkldnn_enabled_op_types_);
259
  CP_MEMBER(mkldnn_cache_capacity_);
260 261 262
  // Bfloat16 related.
  CP_MEMBER(use_mkldnn_bfloat16_);
  CP_MEMBER(bfloat16_enabled_op_types_);
263
  // Quantization related.
B
baoachun 已提交
264 265 266
  CP_MEMBER(use_mkldnn_int8_);
  CP_MEMBER(quantize_enabled_op_types_);
  CP_MEMBER(quantize_excluded_op_ids_);
267 268
  CP_MEMBER(use_mkldnn_quantizer_);
  CP_MEMBER(mkldnn_quantizer_config_);
269 270 271
  CP_MEMBER(min_input_shape_);
  CP_MEMBER(max_input_shape_);
  CP_MEMBER(optim_input_shape_);
272
  CP_MEMBER(disable_trt_plugin_fp16_);
273

石晓伟 已提交
274 275 276 277
  CP_MEMBER(use_lite_);
  CP_MEMBER(lite_precision_mode_);
  CP_MEMBER(lite_passes_filter_);
  CP_MEMBER(lite_ops_filter_);
278 279
  CP_MEMBER(lite_zero_copy_);

W
Wilber 已提交
280
  // XPU related.
281
  CP_MEMBER(use_xpu_);
W
Wilber 已提交
282
  CP_MEMBER(xpu_device_id_);
283
  CP_MEMBER(xpu_l3_workspace_size_);
W
Wilber 已提交
284 285 286 287 288
  CP_MEMBER(xpu_locked_);
  CP_MEMBER(xpu_autotune_);
  CP_MEMBER(xpu_autotune_file_);
  CP_MEMBER(xpu_precision_);
  CP_MEMBER(xpu_adaptive_seqlen_);
石晓伟 已提交
289

W
Wilber 已提交
290 291 292
  // NPU related.
  CP_MEMBER(use_npu_);
  CP_MEMBER(npu_device_id_);
293
  CP_MEMBER(nnadapter_config_);
W
Wilber 已提交
294

295 296 297
  // profile related.
  CP_MEMBER(with_profile_);

298 299 300
  // glog related.
  CP_MEMBER(with_glog_info_);

301 302 303 304 305 306 307 308 309 310
  // Ir related.
  CP_MEMBER(enable_ir_optim_);
  CP_MEMBER(use_feed_fetch_ops_);
  CP_MEMBER(ir_debug_);
  CP_MEMBER(specify_input_name_);

  CP_MEMBER(cpu_math_library_num_threads_);

  CP_MEMBER(serialized_info_cache_);

311 312
  CP_MEMBER(thread_local_stream_);

J
jianghaicheng 已提交
313 314 315
  // ipu related
  CP_MEMBER(use_ipu_);
  CP_MEMBER(ipu_device_num_);
316
  CP_MEMBER(ipu_micro_batch_size_);
J
jianghaicheng 已提交
317 318
  CP_MEMBER(ipu_enable_pipelining_);
  CP_MEMBER(ipu_batches_per_step_);
319 320 321 322
  CP_MEMBER(ipu_enable_fp16_);
  CP_MEMBER(ipu_replica_num_);
  CP_MEMBER(ipu_available_memory_proportion_);
  CP_MEMBER(ipu_enable_half_partial_);
J
jianghaicheng 已提交
323

324 325 326
  // fleet exe related
  CP_MEMBER(dist_config_);

327
  if (use_gpu_) {
328 329 330
    PADDLE_ENFORCE_EQ(use_xpu_, false,
                      platform::errors::InvalidArgument(
                          "Only one choice can be made between CPU and XPU."));
331 332
    pass_builder_.reset(new GpuPassStrategy(
        *static_cast<GpuPassStrategy *>(other.pass_builder())));
J
jianghaicheng 已提交
333 334 335
  } else if (use_ipu_) {
    pass_builder_.reset(new IpuPassStrategy(
        *static_cast<IpuPassStrategy *>(other.pass_builder())));
336 337 338
  } else if (use_xpu_) {
    pass_builder_.reset(new XpuPassStrategy(
        *static_cast<XpuPassStrategy *>(other.pass_builder())));
W
Wilber 已提交
339 340 341
  } else if (use_npu_) {
    pass_builder_.reset(new NpuPassStrategy(
        *static_cast<NpuPassStrategy *>(other.pass_builder())));
342 343 344 345 346
  } else {
    pass_builder_.reset(new CpuPassStrategy(
        *static_cast<CpuPassStrategy *>(other.pass_builder())));
  }

347
#undef CP_MEMBER
Y
Yan Chunwei 已提交
348

W
Wilber 已提交
349 350 351 352 353
  Update();
  if (use_tensorrt_) {
    // Update() will reset all the passes, when some tensorRT pass is deleted in
    // other.pass_builder(), it will set again, so we just remove the
    // deleted_pass.
354
    pass_builder_->ClearPasses();
W
Wilber 已提交
355
    auto other_passes = other.pass_builder()->AllPasses();
356 357
    for (auto pass : other_passes) {
      pass_builder_->AppendPass(pass);
W
Wilber 已提交
358
    }
359
  }
D
denglin-github 已提交
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
  if (use_dlnne_) {
    auto all_passes = kDlnneSubgraphPasses;
    auto other_passes = other.pass_builder()->AllPasses();
    // We should sort them, because the user may call the SwitchIrDebug
    // interface, which will change the pass.
    std::sort(all_passes.begin(), all_passes.end());
    std::sort(other_passes.begin(), other_passes.end());
    std::vector<std::string> deleted_passes;
    std::set_difference(all_passes.begin(), all_passes.end(),
                        other_passes.begin(), other_passes.end(),
                        std::inserter(deleted_passes, deleted_passes.begin()));
    for (auto ps : deleted_passes) {
      pass_builder_->DeletePass(ps);
    }
  }
375 376
}

377
void AnalysisConfig::EnableCUDNN() {
378
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
379 380 381 382 383 384 385 386 387
  use_cudnn_ = use_gpu_;
#else
  LOG(ERROR) << "Please compile with CUDA first to use cuDNN";
  use_cudnn_ = false;
#endif

  Update();
}

388
void AnalysisConfig::EnableMKLDNN() {
389 390 391 392 393 394
#ifdef PADDLE_WITH_MKLDNN
  use_mkldnn_ = true;
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
  use_mkldnn_ = false;
#endif
Y
Yan Chunwei 已提交
395 396

  Update();
397 398
}

399 400 401 402 403 404 405 406 407
void AnalysisConfig::SetMkldnnCacheCapacity(int capacity) {
#ifdef PADDLE_WITH_MKLDNN
  mkldnn_cache_capacity_ = capacity;
#else
  LOG(ERROR) << "Please compile with MKLDNN first to set MKLDNN Thread Id";
  mkldnn_cache_capacity_ = 0;
#endif
}

408 409 410 411 412 413 414 415 416 417 418 419 420
void AnalysisConfig::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_config_)
    mkldnn_quantizer_config_.reset(new MkldnnQuantizerConfig());
  use_mkldnn_quantizer_ = true;
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  use_mkldnn_quantizer_ = false;
#endif

  Update();
}

421 422
void AnalysisConfig::EnableMkldnnBfloat16() {
#ifdef PADDLE_WITH_MKLDNN
423 424
  if (platform::MayIUse(platform::cpu_isa_t::avx512_core)) {
    use_mkldnn_bfloat16_ = true;
425 426 427 428
    LOG(INFO) << "Hardware support for BFLOAT16"
              << (platform::MayIUse(platform::cpu_isa_t::avx512_bf16)
                      ? " is enabled"
                      : " is disabled. Simulation will be used");
429 430 431 432
  } else {
    LOG(INFO) << "CPU does not support BFLOAT16 calculations";
    use_mkldnn_bfloat16_ = false;
  }
433 434 435 436 437 438 439 440
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnBfloat16";
  use_mkldnn_bfloat16_ = false;
#endif

  Update();
}

B
baoachun 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
void AnalysisConfig::EnableMkldnnInt8(
    const std::unordered_set<std::string> &op_list) {
#ifdef PADDLE_WITH_MKLDNN
  use_mkldnn_int8_ = true;
  use_fc_padding_ = false;
  if (!op_list.empty()) {
    for (auto &type : op_list) {
      if (!quantize_enabled_op_types_.count(type)) {
        LOG(ERROR) << "There are unsupported operators in the configured "
                      "quantization operator list. The unsupported operator "
                      "is: "
                   << type;
        use_mkldnn_int8_ = false;
        break;
      }
    }
    if (use_mkldnn_int8_) {
      quantize_enabled_op_types_.clear();
      quantize_enabled_op_types_.insert(op_list.begin(), op_list.end());
    }
  }
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnInt8";
  use_mkldnn_int8_ = false;
#endif

  Update();
}

470
MkldnnQuantizerConfig *AnalysisConfig::mkldnn_quantizer_config() const {
471
  PADDLE_ENFORCE_NOT_NULL(mkldnn_quantizer_config_,
472 473
                          platform::errors::PreconditionNotMet(
                              "MkldnnQuantizer was not enabled yet."));
474
  return mkldnn_quantizer_config_.get();
475 476
}

477
void AnalysisConfig::EnableTensorRtEngine(
N
nhzlx 已提交
478
    int workspace_size, int max_batch_size, int min_subgraph_size,
479
    AnalysisConfig::Precision precision_mode, bool use_static,
480
    bool use_calib_mode) {
481
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yan Chunwei 已提交
482 483 484 485 486
  if (!use_gpu()) {
    LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first";
    return;
  }

487 488 489
  use_tensorrt_ = true;
  tensorrt_workspace_size_ = workspace_size;
  tensorrt_max_batchsize_ = max_batch_size;
N
nhzlx 已提交
490
  tensorrt_min_subgraph_size_ = min_subgraph_size;
N
nhzlx 已提交
491
  tensorrt_precision_mode_ = precision_mode;
N
nhzlx 已提交
492
  trt_use_static_engine_ = use_static;
493
  trt_use_calib_mode_ = use_calib_mode;
Y
Yan Chunwei 已提交
494

495
  Update();
Y
Yan Chunwei 已提交
496 497 498 499
#else
  LOG(ERROR)
      << "To use TensorRT engine, please compile inference lib with GPU first.";
#endif
500 501
}

D
denglin-github 已提交
502 503 504 505 506 507
void AnalysisConfig::EnableDlnne(int min_subgraph_size) {
  use_dlnne_ = true;
  dlnne_min_subgraph_size_ = min_subgraph_size;
  Update();
}

508 509 510 511 512 513 514 515 516 517 518
void AnalysisConfig::SetTRTDynamicShapeInfo(
    std::map<std::string, std::vector<int>> min_input_shape,
    std::map<std::string, std::vector<int>> max_input_shape,
    std::map<std::string, std::vector<int>> optim_input_shape,
    bool disable_trt_plugin_fp16) {
  min_input_shape_ = min_input_shape;
  max_input_shape_ = max_input_shape;
  optim_input_shape_ = optim_input_shape;
  disable_trt_plugin_fp16_ = disable_trt_plugin_fp16;
}

519 520 521 522 523
void AnalysisConfig::EnableTensorRtDLA(int dla_core) {
  trt_use_dla_ = true;
  trt_dla_core_ = dla_core;
}

524 525
void AnalysisConfig::EnableTensorRtInspector() { trt_use_inspector_ = true; }

526 527 528 529 530
void AnalysisConfig::Exp_DisableTensorRtOPs(
    const std::vector<std::string> &ops) {
  trt_disabled_ops_.insert(trt_disabled_ops_.end(), ops.begin(), ops.end());
}

531
void AnalysisConfig::EnableTensorRtOSS() { trt_use_oss_ = true; }
532

Y
Yan Chunwei 已提交
533
// TODO(Superjomn) refactor this, buggy.
534
void AnalysisConfig::Update() {
535 536 537
  auto info = SerializeInfoCache();
  if (info == serialized_info_cache_) return;

Y
Yan Chunwei 已提交
538
  // Transfer pass_builder and copy the existing compatible passes.
W
Wilber 已提交
539 540
  if (!pass_builder_ || ((use_gpu() ^ pass_builder_->use_gpu())) ||
      ((use_xpu() ^ pass_builder_->use_xpu())) ||
J
jianghaicheng 已提交
541 542
      ((use_npu() ^ pass_builder_->use_npu())) ||
      ((use_ipu() ^ pass_builder_->use_ipu()))) {
Y
Yan Chunwei 已提交
543 544 545 546 547 548 549
    if (use_gpu()) {
      pass_builder_.reset(new GpuPassStrategy);

      if (use_tensorrt_) {
        // Append after the Affine_channel_conv_fuse pass.
        pass_builder()->InsertPass(3, "tensorrt_subgraph_pass");
      }
J
jianghaicheng 已提交
550 551 552
    } else if (use_ipu()) {
      VLOG(1) << "IpuPassStrategy has been used for new.";
      pass_builder_.reset(new IpuPassStrategy);
553 554 555 556 557 558
    } else if (use_xpu()) {
      PADDLE_ENFORCE_EQ(
          use_gpu(), false,
          platform::errors::InvalidArgument(
              "Only one choice can be made between CPU and XPU."));
      pass_builder_.reset(new XpuPassStrategy);
W
Wilber 已提交
559 560 561 562 563 564
    } else if (use_npu()) {
      PADDLE_ENFORCE_EQ(
          use_gpu(), false,
          platform::errors::InvalidArgument(
              "Only one choice can be made between GPU and NPU."));
      pass_builder_.reset(new NpuPassStrategy);
Y
Yan Chunwei 已提交
565 566 567
    } else {
      pass_builder_.reset(new CpuPassStrategy);
    }
568

569
  } else {
Y
Yan Chunwei 已提交
570 571 572
    if (use_gpu()) {
      pass_builder_.reset(new GpuPassStrategy(
          *static_cast<GpuPassStrategy *>(pass_builder_.get())));
J
jianghaicheng 已提交
573 574 575 576
    } else if (use_ipu()) {
      VLOG(1) << "IpuPassStrategy has been used.";
      pass_builder_.reset(new IpuPassStrategy(
          *static_cast<IpuPassStrategy *>(pass_builder_.get())));
577 578 579 580 581 582 583
    } else if (use_xpu()) {
      PADDLE_ENFORCE_EQ(
          use_gpu(), false,
          platform::errors::InvalidArgument(
              "Only one choice can be made between CPU and XPU."));
      pass_builder_.reset(new XpuPassStrategy(
          *static_cast<XpuPassStrategy *>(pass_builder_.get())));
W
Wilber 已提交
584 585 586 587 588 589 590
    } else if (use_npu()) {
      PADDLE_ENFORCE_EQ(
          use_gpu(), false,
          platform::errors::InvalidArgument(
              "Only one choice can be made between GPU and NPU."));
      pass_builder_.reset(new NpuPassStrategy(
          *static_cast<NpuPassStrategy *>(pass_builder_.get())));
Y
Yan Chunwei 已提交
591 592 593 594
    } else {
      pass_builder_.reset(new CpuPassStrategy(
          *static_cast<CpuPassStrategy *>(pass_builder_.get())));
    }
595 596 597
  }

  if (use_tensorrt_) {
598 599
    pass_builder()->ClearPasses();
    for (const auto &pass : kTRTSubgraphPasses) {
600
      if (tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
601
          (pass == "conv_bn_fuse_pass")) {
602 603
        continue;
      }
604
      pass_builder()->AppendPass(pass);
605 606
    }
  }
607

D
denglin-github 已提交
608 609 610 611 612 613 614
  if (use_dlnne_) {
    pass_builder()->ClearPasses();
    for (const auto &pass : kDlnneSubgraphPasses) {
      pass_builder()->AppendPass(pass);
    }
  }

615
  if (use_gpu() && use_cudnn_) {
616
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
617 618 619 620 621 622 623 624
    if (!enable_ir_optim_) {
      LOG(ERROR) << "EnableCUDNN() only works when IR optimization is enabled.";
    } else {
      pass_builder()->EnableCUDNN();
    }
#endif
  }

625 626 627 628 629 630 631 632 633 634 635 636 637 638
  if (use_gpu_fp16_) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    if (!enable_ir_optim_) {
      LOG(ERROR) << "Exp_EnableUseGpuFp16() only works when IR optimization is "
                    "enabled.";
    } else if (!use_gpu()) {
      LOG(ERROR)
          << "Exp_EnableUseGpuFp16() only works when use_gpu is enabled.";
    } else {
      pass_builder()->Exp_EnableUseGpuFp16();
    }
#endif
  }

639
  if (use_mkldnn_) {
W
Wojciech Uss 已提交
640
#ifdef PADDLE_WITH_MKLDNN
641 642 643
    if (!enable_ir_optim_) {
      LOG(ERROR)
          << "EnableMKLDNN() only works when IR optimization is enabled.";
W
Wojciech Uss 已提交
644 645
    } else {
      pass_builder()->EnableMKLDNN();
646 647 648 649
    }
#endif
  }

650 651 652 653 654
  // Quantization passes must come after all other optimization passes
  if (use_mkldnn_quantizer_) {
    if (!enable_ir_optim_) {
      LOG(ERROR) << "EnableMkldnnQuantizer() only works when IR optimization "
                    "is enabled.";
655 656
    }
#ifdef PADDLE_WITH_MKLDNN
657
    pass_builder()->EnableMkldnnQuantizer();
658 659 660
#endif
  }

661 662 663 664 665 666
  if (use_mkldnn_bfloat16_) {
#ifdef PADDLE_WITH_MKLDNN
    pass_builder()->EnableMkldnnBfloat16();
#endif
  }

B
baoachun 已提交
667 668 669 670 671 672 673 674 675 676 677 678 679 680
  if (use_mkldnn_int8_) {
#ifdef PADDLE_WITH_MKLDNN
    if (!enable_ir_optim_) {
      LOG(ERROR) << "EnableMkldnnInt8() only works when IR optimization "
                    "is enabled.";
    } else if (!use_mkldnn_) {
      LOG(ERROR) << "EnableMkldnnInt8() only works when MKLDNN "
                    "is enabled.";
    } else {
      pass_builder()->EnableMkldnnInt8();
    }
#endif
  }

681
#ifdef PADDLE_WITH_MKLDNN
682 683
  // Do not optimize when mkldnn is on
  if (enable_memory_optim_ && !use_mkldnn_) {
684
#else
Y
Yan Chunwei 已提交
685
  if (enable_memory_optim_) {
686 687
#endif
    pass_builder()->AppendAnalysisPass("memory_optimize_pass");
Y
Yan Chunwei 已提交
688 689
  }

石晓伟 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703
  if (use_lite_) {
#ifndef PADDLE_WITH_LITE
    LOG(WARNING) << "You tried to enable the lite subgraph "
                    "but did not have the option -DWITH_LITE compiled.";
#endif
    pass_builder()->ClearPasses();
    for (const auto &pass : kLiteSubgraphPasses) {
      if (std::find(lite_passes_filter_.begin(), lite_passes_filter_.end(),
                    pass) == lite_passes_filter_.end()) {
        pass_builder()->AppendPass(pass);
      }
    }
  }

704
  if (use_xpu_) {
705
#if (defined LITE_SUBGRAPH_WITH_XPU) || (defined PADDLE_WITH_XPU)
706 707 708 709
    PADDLE_ENFORCE_EQ(use_gpu_, false,
                      platform::errors::Unavailable(
                          "Currently, XPU and GPU cannot be enabled in the "
                          "same analysis configuration."));
710 711 712 713 714
#else
    PADDLE_THROW(platform::errors::Unavailable(
        "You tried to use an XPU device, but Paddle was not compiled "
        "with XPU-runtime."));
#endif
715 716
  }

W
Wilber 已提交
717
  if (use_npu_) {
718
#if defined(PADDLE_WITH_ASCEND_CL) || defined(LITE_SUBGRAPH_WITH_NPU)
W
Wilber 已提交
719 720 721 722 723 724 725 726 727 728
    PADDLE_ENFORCE_EQ(use_gpu_, false,
                      platform::errors::Unavailable(
                          "Currently, NPU and GPU cannot be enabled in the "
                          "same analysis configuration."));
#else
    PADDLE_THROW(platform::errors::Unavailable(
        "You tried to use an NPU device, but Paddle was not compiled "
        "with NPU-runtime."));
#endif
  }
J
jianghaicheng 已提交
729 730 731 732 733 734 735
  if (use_ipu_) {
#ifndef PADDLE_WITH_IPU
    PADDLE_THROW(platform::errors::Unavailable(
        "You tried to enable the ipu "
        "but did not have the option -DWITH_IPU compiled."));
#endif
  }
W
Wilber 已提交
736

737 738 739 740 741
  if (ir_debug_) {
    pass_builder()->TurnOnDebug();
  }
}

742
std::string AnalysisConfig::SerializeInfoCache() {
743
  std::stringstream ss;
Y
Yan Chunwei 已提交
744 745 746 747
  ss << model_dir_;
  ss << prog_file_;
  ss << params_file_;

748
  ss << use_gpu_;
749 750
  ss << use_gpu_fp16_;
  for (auto &item : gpu_fp16_disabled_op_types_) ss << item;
751
  ss << use_fc_padding_;
752 753
  ss << gpu_device_id_;
  ss << xpu_device_id_;
754 755 756 757 758
  ss << memory_pool_init_size_mb_;

  ss << use_tensorrt_;
  ss << tensorrt_workspace_size_;
  ss << tensorrt_max_batchsize_;
Y
Yan Chunwei 已提交
759 760
  ss << tensorrt_min_subgraph_size_;

D
denglin-github 已提交
761 762 763
  ss << use_dlnne_;
  ss << dlnne_min_subgraph_size_;

764 765 766
  for (auto &op : trt_disabled_ops_) ss << op.c_str();
  ss << ";";

767 768 769
  ss << trt_use_dla_;
  ss << trt_dla_core_;

Y
Yan Chunwei 已提交
770
  ss << enable_memory_optim_;
771 772

  ss << use_mkldnn_;
773
  ss << mkldnn_cache_capacity_;
Y
Yan Chunwei 已提交
774 775 776
  for (auto &item : mkldnn_enabled_op_types_) ss << item;
  ss << ";";

777
  ss << use_mkldnn_quantizer_;
778
  ss << use_mkldnn_bfloat16_;
779
  for (auto &item : bfloat16_enabled_op_types_) ss << item;
B
baoachun 已提交
780 781 782
  ss << use_mkldnn_int8_;
  for (auto &item : quantize_enabled_op_types_) ss << item;
  for (auto &item : quantize_excluded_op_ids_) ss << item;
783
  ss << ";";
Y
Yan Chunwei 已提交
784 785
  ss << model_from_memory_;

786 787
  ss << with_profile_;

788 789
  ss << with_glog_info_;

790 791 792 793
  ss << enable_ir_optim_;
  ss << use_feed_fetch_ops_;
  ss << ir_debug_;

Y
Yan Chunwei 已提交
794 795
  ss << specify_input_name_;
  ss << cpu_math_library_num_threads_;
石晓伟 已提交
796 797

  ss << use_lite_;
798 799
  ss << use_xpu_;
  ss << xpu_l3_workspace_size_;
W
Wilber 已提交
800 801 802 803 804
  ss << xpu_locked_;
  ss << xpu_autotune_;
  ss << xpu_autotune_file_;
  ss << xpu_precision_;
  ss << xpu_adaptive_seqlen_;
805

W
Wilber 已提交
806 807 808
  ss << use_npu_;
  ss << npu_device_id_;

809 810
  ss << thread_local_stream_;

J
jianghaicheng 已提交
811 812
  ss << use_ipu_;
  ss << ipu_device_num_;
813
  ss << ipu_micro_batch_size_;
J
jianghaicheng 已提交
814 815
  ss << ipu_enable_pipelining_;
  ss << ipu_batches_per_step_;
816 817 818 819
  ss << ipu_enable_fp16_;
  ss << ipu_replica_num_;
  ss << ipu_available_memory_proportion_;
  ss << ipu_enable_half_partial_;
J
jianghaicheng 已提交
820

821 822 823
  return ss.str();
}

824
void AnalysisConfig::SetCpuMathLibraryNumThreads(
825 826
    int cpu_math_library_num_threads) {
  cpu_math_library_num_threads_ = cpu_math_library_num_threads;
Y
Yan Chunwei 已提交
827 828

  Update();
829 830
}

831
float AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
832
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
833 834
  // Get the GPU memory details and calculate the fraction of memory for the
  // GPU memory pool.
835
  size_t gpu_total, gpu_available;
836
  platform::SetDeviceId(gpu_device_id_);
837 838
  platform::GpuMemoryUsage(&gpu_available, &gpu_total);
  double total_gpu_memory = gpu_total / 1024. / 1024.;
839 840
  float fraction_of_gpu_memory =
      static_cast<double>(memory_pool_init_size_mb()) / total_gpu_memory;
841 842 843 844
  VLOG(3) << "total_gpu_memory is " << total_gpu_memory
          << "M, gpu_available is " << gpu_available / 1024. / 1024.
          << "M, memory_pool_init_size is " << memory_pool_init_size_mb()
          << "M.";
845 846 847 848
  return fraction_of_gpu_memory;
#else
  return 0.;
#endif
849 850
}

851 852
void AnalysisConfig::EnableMemoryOptim(bool x) {
  enable_memory_optim_ = x;
Y
Yan Chunwei 已提交
853 854 855
  Update();
}

856
bool AnalysisConfig::enable_memory_optim() const {
Y
Yan Chunwei 已提交
857 858 859
  return enable_memory_optim_;
}

860 861 862 863
void AnalysisConfig::SetModelBuffer(const char *prog_buffer,
                                    size_t prog_buffer_size,
                                    const char *param_buffer,
                                    size_t param_buffer_size) {
864 865
  prog_file_ = std::string(prog_buffer, prog_buffer + prog_buffer_size);
  params_file_ = std::string(param_buffer, param_buffer + param_buffer_size);
T
Tao Luo 已提交
866
  model_from_memory_ = true;
T
Tao Luo 已提交
867 868
}

869
NativeConfig AnalysisConfig::ToNativeConfig() const {
Y
Yan Chunwei 已提交
870 871 872 873 874
  NativeConfig config;
  config.model_dir = model_dir_;
  config.prog_file = prog_file_;
  config.param_file = params_file_;
  config.use_gpu = use_gpu_;
875
  config.device = gpu_device_id_;
Y
Yan Chunwei 已提交
876 877 878 879 880
  config.fraction_of_gpu_memory = fraction_of_gpu_memory_for_pool();
  config.specify_input_name = specify_input_name_;
  return config;
}

Y
Yan Chunwei 已提交
881 882 883 884
void AnalysisConfig::SwitchIrDebug(int x) {
  ir_debug_ = x;
  Update();
}
885 886 887 888 889 890

void AnalysisConfig::EnableProfile() {
  with_profile_ = true;
  Update();
}

891 892 893 894 895
void AnalysisConfig::DisableGlogInfo() {
  with_glog_info_ = false;
  Update();
}

石晓伟 已提交
896
void AnalysisConfig::EnableLiteEngine(
897
    AnalysisConfig::Precision precision_mode, bool zero_copy,
石晓伟 已提交
898 899 900 901 902 903
    const std::vector<std::string> &passes_filter,
    const std::vector<std::string> &ops_filter) {
  use_lite_ = true;
  lite_precision_mode_ = precision_mode;
  lite_passes_filter_ = passes_filter;
  lite_ops_filter_ = ops_filter;
904
  lite_zero_copy_ = zero_copy;
石晓伟 已提交
905 906 907
  Update();
}

908 909 910 911 912 913 914
void AnalysisConfig::PartiallyRelease() {
  prog_file_.clear();
  prog_file_.shrink_to_fit();
  params_file_.clear();
  params_file_.shrink_to_fit();
}

915 916
void AnalysisConfig::EnableGpuMultiStream() { thread_local_stream_ = true; }

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
std::string AnalysisConfig::Summary() {
  const std::vector<std::string> header{"Option", "Value"};
  paddle::inference::TablePrinter os(header);

  if (!model_dir_.empty()) {
    os.InsertRow({"model_dir", model_dir_});
  }
  if (!(prog_file_.empty() && params_file_.empty())) {
    os.InsertRow({"model_file", prog_file_});
    os.InsertRow({"params_file", params_file_});
  }
  if (model_from_memory_) {
    os.InsertRow({"model_from_memory", params_file_});
  }
  os.InsetDivider();

  // cpu info
  os.InsertRow(
      {"cpu_math_thread", std::to_string(cpu_math_library_num_threads_)});
936
  os.InsertRow({"enable_mkldnn", use_mkldnn_ ? "true" : "false"});
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
  os.InsertRow(
      {"mkldnn_cache_capacity", std::to_string(mkldnn_cache_capacity_)});
  os.InsetDivider();

  // gpu info
  os.InsertRow({"use_gpu", use_gpu_ ? "true" : "false"});
  if (use_gpu_) {
    os.InsertRow({"gpu_device_id", std::to_string(gpu_device_id_)});
    os.InsertRow({"memory_pool_init_size",
                  std::to_string(memory_pool_init_size_mb_) + "MB"});
    os.InsertRow(
        {"thread_local_stream", thread_local_stream_ ? "true" : "false"});

    os.InsertRow({"use_tensorrt", use_tensorrt_ ? "true" : "false"});
    if (use_tensorrt_) {
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
#ifdef PADDLE_WITH_TENSORRT
      auto Precision2String =
          [](paddle::AnalysisConfig::Precision prec) -> std::string {
        if (prec == Precision::kFloat32)
          return "fp32";
        else if (prec == Precision::kHalf)
          return "fp16";
        else if (prec == Precision::kInt8)
          return "int8";
        else
          return "None";
      };
      auto version2string =
          [](const std::tuple<int, int, int> &ver) -> std::string {
        std::ostringstream os;
        int major = std::get<0>(ver);
        int minor = std::get<1>(ver);
        int patch = std::get<2>(ver);
        os << major << "." << minor << "." << patch;
        return os.str();
      };
      os.InsertRow(
          {"trt_compile_version",
           version2string(inference::tensorrt::GetTrtCompileVersion())});
      os.InsertRow(
          {"trt_runtime_version",
           version2string(inference::tensorrt::GetTrtRuntimeVersion())});
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
      os.InsertRow({"tensorrt_precision_mode",
                    Precision2String(tensorrt_precision_mode_)});
      os.InsertRow({"tensorrt_workspace_size",
                    std::to_string(tensorrt_workspace_size_)});
      os.InsertRow(
          {"tensorrt_max_batch_size", std::to_string(tensorrt_max_batchsize_)});
      os.InsertRow({"tensorrt_min_subgraph_size",
                    std::to_string(tensorrt_min_subgraph_size_)});
      os.InsertRow({"tensorrt_use_static_engine",
                    trt_use_static_engine_ ? "true" : "false"});
      os.InsertRow(
          {"tensorrt_use_calib_mode", trt_use_calib_mode_ ? "true" : "false"});

      // dynamic_shape
      os.InsertRow({"tensorrt_enable_dynamic_shape",
                    min_input_shape_.empty() ? "false" : "true"});
995 996 997
      os.InsertRow({"tensorrt_tuned_dynamic_shape", trt_tuned_dynamic_shape_
                                                        ? shape_range_info_path_
                                                        : "false"});
998 999

      os.InsertRow({"tensorrt_use_oss", trt_use_oss_ ? "true" : "false"});
1000 1001
      os.InsertRow({"tensorrt_with_interleaved",
                    trt_with_interleaved_ ? "true" : "false"});
1002 1003 1004 1005
      os.InsertRow({"tensorrt_use_dla", trt_use_dla_ ? "true" : "false"});
      if (trt_use_dla_) {
        os.InsertRow({"tensorrt_dla_core", std::to_string(trt_dla_core_)});
      }
1006
#endif
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
    }
  }
  os.InsetDivider();

  // xpu info
  os.InsertRow({"use_xpu", use_xpu_ ? "true" : "false"});
  if (use_xpu_) {
    os.InsertRow({"xpu_device_id", std::to_string(xpu_device_id_)});
    os.InsertRow(
        {"xpu_l3_workspace_size", std::to_string(xpu_l3_workspace_size_)});
  }
  os.InsetDivider();

  if (use_lite_) {
    os.InsertRow({"use_lite", use_lite_ ? "true" : "false"});
  }

  // ir info
  os.InsertRow({"ir_optim", enable_ir_optim_ ? "true" : "false"});
  os.InsertRow({"ir_debug", ir_debug_ ? "true" : "false"});
  os.InsertRow({"memory_optim", enable_memory_optim_ ? "true" : "false"});
  os.InsertRow({"enable_profile", with_profile_ ? "true" : "false"});
  os.InsertRow({"enable_log", with_glog_info_ ? "true" : "false"});
1030 1031
  os.InsertRow({"collect_shape_range_info",
                collect_shape_range_info_ ? shape_range_info_path_ : "false"});
1032 1033 1034 1035

  return os.PrintTable();
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
LiteNNAdapterConfig &LiteNNAdapterConfig::SetDeviceNames(
    const std::vector<std::string> &names) {
  nnadapter_device_names = names;
  return *this;
}

LiteNNAdapterConfig &LiteNNAdapterConfig::SetContextProperties(
    const std::string &properties) {
  nnadapter_context_properties = properties;
  return *this;
}

LiteNNAdapterConfig &LiteNNAdapterConfig::SetModelCacheDir(
    const std::string &dir) {
  nnadapter_model_cache_dir = dir;
  return *this;
}

LiteNNAdapterConfig &LiteNNAdapterConfig::SetModelCacheBuffers(
    const std::string &model_cache_token,
    const std::vector<char> &model_cache_buffer) {
  PADDLE_ENFORCE_EQ(model_cache_token.empty(), false,
                    platform::errors::InvalidArgument(
                        "model_cache_token should not be empty."));
  PADDLE_ENFORCE_EQ(model_cache_buffer.empty(), false,
                    platform::errors::InvalidArgument(
                        "model_cache_buffer should not be empty."));
  PADDLE_ENFORCE_EQ(nnadapter_model_cache_buffers.count(model_cache_token),
                    false, platform::errors::InvalidArgument(
                               "model_cache_token has already been set."));

  nnadapter_model_cache_buffers[model_cache_token] = model_cache_buffer;
  return *this;
}

LiteNNAdapterConfig &LiteNNAdapterConfig::SetSubgraphPartitionConfigPath(
    const std::string &path) {
  nnadapter_subgraph_partition_config_path = path;
  return *this;
}

LiteNNAdapterConfig &LiteNNAdapterConfig::SetSubgraphPartitionConfigBuffer(
    const std::string &buffer) {
  nnadapter_subgraph_partition_config_buffer = buffer;
  return *this;
}
LiteNNAdapterConfig &LiteNNAdapterConfig::Enable() {
  use_nnadapter = true;
  return *this;
}
LiteNNAdapterConfig &LiteNNAdapterConfig::Disable() {
  use_nnadapter = false;
  return *this;
}

1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
void AnalysisConfig::CollectShapeRangeInfo(
    const std::string &shape_range_info_path) {
  LOG(INFO) << "In CollectShapeInfo mode, we will disable optimizations and "
               "collect the shape information of "
            << "all intermediate tensors in the compute graph and calculate "
               "the min_shape, max_shape and opt_shape.";
  collect_shape_range_info_ = true;
  PADDLE_ENFORCE_EQ(shape_range_info_path.empty(), false,
                    platform::errors::InvalidArgument(
                        "The shape_range_info_path should not be empty, please "
                        "re-check the argument."));
  shape_range_info_path_ = shape_range_info_path;
}

const std::string &AnalysisConfig::shape_range_info_path() {
  return shape_range_info_path_;
}

bool AnalysisConfig::shape_range_info_collected() {
  return collect_shape_range_info_;
}

void AnalysisConfig::EnableTunedTensorRtDynamicShape(
    const std::string &shape_range_info_path, bool allow_build_at_runtime) {
  shape_range_info_path_ = shape_range_info_path;
  trt_allow_build_at_runtime_ = allow_build_at_runtime;
  trt_tuned_dynamic_shape_ = true;
}

bool AnalysisConfig::tuned_tensorrt_dynamic_shape() {
  return trt_tuned_dynamic_shape_;
}

bool AnalysisConfig::trt_allow_build_at_runtime() {
  return trt_allow_build_at_runtime_;
}
1127
}  // namespace paddle