engine.cc 32.0 KB
Newer Older
Y
Yan Chunwei 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

N
nhzlx 已提交
3 4
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
Y
Yan Chunwei 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/inference/tensorrt/engine.h"

#include <NvInfer.h>
#include <glog/logging.h>
19

A
Abhinav Arora 已提交
20
#include <string>
W
wanghuancoder 已提交
21

22
#include "NvInferRuntimeCommon.h"
23
#include "cuda_runtime_api.h"  // NOLINT
Y
Yan Chunwei 已提交
24
#include "paddle/fluid/inference/tensorrt/helper.h"
25
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/platform/enforce.h"
27
#include "paddle/phi/common/data_type.h"
Y
Yan Chunwei 已提交
28 29 30 31 32

namespace paddle {
namespace inference {
namespace tensorrt {

33 34 35
int TensorRTEngine::runtime_batch_ = 1;
thread_local int TensorRTEngine::predictor_id_per_thread = -1;

36
void TensorRTEngine::Weight::SetDataType(phi::DataType type) {
37
  nvinfer1::DataType nv_type = nvinfer1::DataType::kFLOAT;
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
  switch (type) {
    case phi::DataType::FLOAT32:
      nv_type = nvinfer1::DataType::kFLOAT;
      break;
    case phi::DataType::FLOAT16:
      nv_type = nvinfer1::DataType::kHALF;
      break;
    case phi::DataType::INT32:
      nv_type = nvinfer1::DataType::kINT32;
      break;
    case phi::DataType::INT8:
      nv_type = nvinfer1::DataType::kINT8;
      break;
#if IS_TRT_VERSION_GE(7000)
    case phi::DataType::BOOL:
      nv_type = nvinfer1::DataType::kBOOL;
      break;
#endif
    default:
      paddle::platform::errors::InvalidArgument(
          "Paddle-TRT loads weighths failed, found not supported data type %s.",
          type);
      break;
  }
  w_.type = nv_type;
}

65 66 67 68 69
void TensorRTEngine::InitNetwork() {
  freshDeviceId();
  infer_builder_.reset(createInferBuilder(&logger_));

  if (with_dynamic_shape_) {
70
    infer_network_.reset(infer_builder_->createNetworkV2(
71 72 73
        1U << static_cast<int>(
            nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
  } else {
74
    infer_network_.reset(infer_builder_->createNetworkV2(0U));
75
  }
76 77

  infer_builder_config_.reset(infer_builder_->createBuilderConfig());
W
wenbin 已提交
78 79 80
  optim_profiles_.resize(max_profile_num_);
  for (int i = 0; i < max_profile_num_; i++)
    optim_profiles_[i] = infer_builder_->createOptimizationProfile();
Y
Yan Chunwei 已提交
81 82
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
nvinfer1::IExecutionContext *TensorRTEngine::context() {
  std::unique_lock<std::mutex> lock(mutex_);
  if (infer_context_.find(predictor_id_per_thread) == infer_context_.end()) {
    PADDLE_ENFORCE_NOT_NULL(
        infer_engine_,
        platform::errors::InvalidArgument(
            "You should build engine first and then set the context."));
    // We may see trt warning: Profile 0 has been chosen by another
    // IExecutionContext...
    // It's ok. We will set it later.
    nvinfer1::IExecutionContext *infer_context{nullptr};
    if (context_memory_sharing_) {
      infer_context =
          infer_engine_->createExecutionContextWithoutDeviceMemory();
    } else {
      infer_context = infer_engine_->createExecutionContext();
    }
    PADDLE_ENFORCE_NOT_NULL(
        infer_context,
        platform::errors::InvalidArgument(
            "TensorRT engine can not build execution context."));
    if (with_dynamic_shape_) {
      // need new profile if it's not the first
      if (cur_profile_num_ > 0) {
        infer_context->setOptimizationProfile(cur_profile_num_);
      }
      profile_index_[predictor_id_per_thread] = cur_profile_num_;
      ++cur_profile_num_;
    }
    infer_context_[predictor_id_per_thread].reset(infer_context);
  }
  return infer_context_[predictor_id_per_thread].get();
}

117 118
void TensorRTEngine::Execute(int batch_size,
                             std::vector<void *> *buffers,
119
                             cudaStream_t stream) {
N
nhzlx 已提交
120
  freshDeviceId();
121
  auto infer_context = context();
122 123 124 125 126 127 128 129 130 131
  if (context_memory_sharing_) {
    void *context_memory{nullptr};
    context_memory =
        inference::Singleton<inference::tensorrt::TRTEngineManager>::Global()
            .getContextMemory(
                predictor_id_per_thread,
                phi::GPUPlace(device_id_),
                phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
    infer_context->setDeviceMemory(context_memory);
  }
132 133 134 135
  if (!with_dynamic_shape()) {
    infer_context->enqueue(batch_size, buffers->data(), stream, nullptr);
  } else {
    infer_context->enqueueV2(buffers->data(), stream, nullptr);
136
  }
N
nhzlx 已提交
137 138 139
  SetRuntimeBatch(batch_size);
}

Y
Yan Chunwei 已提交
140
void TensorRTEngine::FreezeNetwork() {
N
nhzlx 已提交
141
  freshDeviceId();
142
  VLOG(3) << "TRT to freeze network";
143 144 145 146 147 148 149
  PADDLE_ENFORCE_NOT_NULL(infer_builder_,
                          platform::errors::InvalidArgument(
                              "Inference builder of TRT is null. Please make "
                              "sure you call InitNetwork first."));
  PADDLE_ENFORCE_NOT_NULL(network(),
                          platform::errors::InvalidArgument(
                              "Call InitNetwork first to initialize network."));
Y
Yan Chunwei 已提交
150
  // build engine.
151 152 153
  if (!with_dynamic_shape_) {
    infer_builder_->setMaxBatchSize(max_batch_);
  }
154 155 156 157
#if IS_TRT_VERSION_GE(8300)
  infer_builder_config_->setMemoryPoolLimit(
      nvinfer1::MemoryPoolType::kWORKSPACE, max_workspace_);
#else
158
  infer_builder_config_->setMaxWorkspaceSize(max_workspace_);
159
#endif
160 161 162 163 164 165 166

#if IS_TRT_VERSION_GE(8500)
  infer_builder_config_->setPreviewFeature(
      nvinfer1::PreviewFeature::kFASTER_DYNAMIC_SHAPES_0805, true);
#else
#endif

Z
Zhaolong Xing 已提交
167 168 169
  bool enable_fp16 = (precision_ == AnalysisConfig::Precision::kHalf);
  if (enable_fp16) {
    bool support_fp16 = infer_builder_->platformHasFastFp16();
170
    infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16);
Z
Zhaolong Xing 已提交
171 172 173
    if (!support_fp16) {
      LOG(INFO) << "You specify FP16 mode, but the hardware do not support "
                   "FP16 speed up, use FP32 instead.";
174 175
    } else {
      LOG(INFO) << "Run Paddle-TRT FP16 mode";
Z
Zhaolong Xing 已提交
176 177 178
    }
  }

179
  bool enable_int8 = (precision_ == AnalysisConfig::Precision::kInt8);
Z
Zhaolong Xing 已提交
180
  if (enable_int8) {
C
csy0225 已提交
181 182 183
    if (!use_dla_) {
      infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16);
    }
184 185
    infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kINT8);

186
    if (calibrator_) {
187
      infer_builder_config_->setInt8Calibrator(calibrator_);
188
    } else {
189
      infer_builder_config_->setInt8Calibrator(nullptr);
190 191 192 193 194 195 196 197

      for (auto &quant_range : quant_dynamic_range_) {
        auto tensor = quant_range.first;
        float range = quant_range.second;
        tensor->setDynamicRange(-range, range);
      }

      std::unordered_set<nvinfer1::ITensor *> all_t;
198 199
      for (int i = 0; i < network()->getNbLayers(); i++) {
        auto layer = network()->getLayer(i);
200 201 202 203
        for (int j = 0; j < layer->getNbOutputs(); j++) {
          all_t.insert(layer->getOutput(j));
        }
      }
204

205 206
      for (int i = 0; i < network()->getNbInputs(); i++) {
        all_t.insert(network()->getInput(i));
207 208 209 210
      }

      for (auto &t : all_t) {
        if (!quant_dynamic_range_.count(t)) {
T
tianshuo78520a 已提交
211 212 213
          VLOG(3) << "We are in trt int8 mode(not calibration), scale not set"
                  << " for tensor " << t->getName()
                  << ", this might be ok when trt does not need this range";
214 215 216
        }
      }
    }
N
nhzlx 已提交
217
  }
Y
Yan Chunwei 已提交
218

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
  if (use_dla_) {
    if (!enable_int8 && !enable_fp16) {
      LOG(WARNING) << "TensorRT DLA must be used with int8 or fp16, but you "
                      "set float32, so DLA is not used.";
    } else if (infer_builder_->getNbDLACores() == 0) {
      LOG(WARNING)
          << "TensorRT DLA is set by config, but your device does not have "
             "DLA, so DLA is not used.";
    } else {
      if (dla_core_ < 0 || dla_core_ >= infer_builder_->getNbDLACores()) {
        dla_core_ = 0;
        LOG(WARNING) << "Invalid DLACore, must be 0 < DLACore < "
                     << infer_builder_->getNbDLACores() << ", but got "
                     << dla_core_ << ", so use use 0 as default.";
      }
234 235 236
      infer_builder_config_->setDefaultDeviceType(nvinfer1::DeviceType::kDLA);
      infer_builder_config_->setDLACore(dla_core_);
      infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
237 238 239 240 241
      LOG(INFO) << "TensorRT DLA enabled in FreezeNetwork(), DLACore "
                << dla_core_;
    }
  }

242
  if (with_dynamic_shape_) {
243
    LOG(INFO) << "Run Paddle-TRT Dynamic Shape mode.";
W
wenbin 已提交
244 245
    for (int i = 0; i < max_profile_num_; i++) {
      for (auto &input : min_input_shape_) {
246 247
#if IS_TRT_VERSION_LT(7100)
        // trt6/trt7011 will check all_of input > 0
248 249
        if (!(std::all_of(input.second.begin(),
                          input.second.end(),
W
wenbin 已提交
250 251 252 253 254 255 256 257 258
                          [](int x) { return x > 0; }) &&
              std::all_of(max_input_shape_[input.first].begin(),
                          max_input_shape_[input.first].end(),
                          [](int x) { return x > 0; }) &&
              std::all_of(optim_input_shape_[input.first].begin(),
                          optim_input_shape_[input.first].end(),
                          [](int x) { return x > 0; }))) {
          continue;
        }
259
#endif
W
wenbin 已提交
260 261 262 263 264 265
        VLOG(4) << "TRT dynamic_shape set " << input.first
                << " min: " << Vec2Str(input.second)
                << ", max: " << Vec2Str(max_input_shape_[input.first])
                << ", opt: " << Vec2Str(optim_input_shape_[input.first]);

        optim_profiles_[i]->setDimensions(
266 267
            input.first.c_str(),
            nvinfer1::OptProfileSelector::kMIN,
W
wenbin 已提交
268 269
            Vec2TRT_Dims(input.second, input.first, true));
        optim_profiles_[i]->setDimensions(
270 271
            input.first.c_str(),
            nvinfer1::OptProfileSelector::kMAX,
W
wenbin 已提交
272 273
            Vec2TRT_Dims(max_input_shape_[input.first], input.first, true));
        optim_profiles_[i]->setDimensions(
274 275
            input.first.c_str(),
            nvinfer1::OptProfileSelector::kOPT,
W
wenbin 已提交
276 277
            Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true));
      }
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306

      for (int input_id = 0; input_id < network()->getNbInputs(); input_id++) {
        auto input_name = network()->getInput(input_id)->getName();
        if (!itensor_map_.count(input_name)) continue;
        if (!GetITensor(input_name)->isShapeTensor()) continue;
        PADDLE_ENFORCE_EQ(min_shape_tensor_.count(input_name) &&
                              max_shape_tensor_.count(input_name) &&
                              optim_shape_tensor_.count(input_name),
                          true,
                          platform::errors::InvalidArgument(
                              "Fail to find min/max/optim shape value for TRT "
                              "network's shape tensor input named %s.",
                              input_name));
        auto min_vec = min_shape_tensor_.at(input_name);
        optim_profiles_[i]->setShapeValues(input_name,
                                           nvinfer1::OptProfileSelector::kMIN,
                                           min_vec.data(),
                                           min_vec.size());
        optim_profiles_[i]->setShapeValues(input_name,
                                           nvinfer1::OptProfileSelector::kMAX,
                                           max_shape_tensor_[input_name].data(),
                                           min_vec.size());
        optim_profiles_[i]->setShapeValues(
            input_name,
            nvinfer1::OptProfileSelector::kOPT,
            optim_shape_tensor_[input_name].data(),
            min_vec.size());
      }

W
wenbin 已提交
307
      infer_builder_config_->addOptimizationProfile(optim_profiles_[i]);
308
    }
309 310 311 312 313 314
    if (WithFp16() && disable_trt_plugin_fp16()) {
      LOG(INFO) << "NOTE: In order to achieve higher accuracy, you have "
                   "disabled the fp16 mode of TRT Plugin,\n"
                << "you can reopen it with "
                   "'config.SetDynamicShapeInfo(min_shape, max_shape, "
                   "opt_shape, false /*disable_trt_plugin_fp16*/)'";
315
    }
316
  }
317
#if IS_TRT_VERSION_GE(8200)
318 319 320 321
  if (use_inspector_) {
    infer_builder_config_->setProfilingVerbosity(
        nvinfer1::ProfilingVerbosity::kDETAILED);
  }
322 323
#endif

324
#if IS_TRT_VERSION_LT(8000)
325 326
  infer_engine_.reset(infer_builder_->buildEngineWithConfig(
      *network(), *infer_builder_config_));
327
#else
J
JingZhuangzhuang 已提交
328
  infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kSPARSE_WEIGHTS);
Z
zlsh80826 已提交
329
  ihost_memory_.reset(infer_builder_->buildSerializedNetwork(
330 331
      *network(), *infer_builder_config_));
  infer_ptr<nvinfer1::IRuntime> runtime(createInferRuntime(&logger_));
Z
zlsh80826 已提交
332 333
  infer_engine_.reset(runtime->deserializeCudaEngine(ihost_memory_->data(),
                                                     ihost_memory_->size()));
334
#endif
335

336
  PADDLE_ENFORCE_NOT_NULL(
337 338 339 340
      infer_engine_,
      platform::errors::Fatal(
          "Build TensorRT cuda engine failed! Please recheck "
          "you configurations related to paddle-TensorRT."));
341

W
wenbin 已提交
342 343 344 345 346 347
  binding_num_ = infer_engine_->getNbBindings();
  // reset status for dynamic shape clone
  if (max_profile_num_ > 1) {
    infer_context_.clear();
    cur_profile_num_ = 0;
  }
348 349 350 351 352 353
  // for engine context memory sharing
  if (context_memory_sharing_) {
    inference::Singleton<inference::tensorrt::TRTEngineManager>::Global()
        .updateContextMemorySize(infer_engine_->getDeviceMemorySize(),
                                 predictor_id_per_thread);
  }
354 355 356
  if (use_inspector_) {
    GetEngineInfo();
  }
Y
Yan Chunwei 已提交
357 358
}

359
nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name,
Y
Yan Chunwei 已提交
360
                                                nvinfer1::DataType dtype,
361
                                                const nvinfer1::Dims &dims) {
362 363
  PADDLE_ENFORCE_EQ(network() != nullptr,
                    true,
364 365 366
                    platform::errors::InvalidArgument(
                        "The TRT network should be initialized first."));
  auto *input = network()->addInput(name.c_str(), dtype, dims);
367
  PADDLE_ENFORCE_NOT_NULL(
368 369 370 371 372 373 374
      input,
      platform::errors::InvalidArgument("Adding input %s failed in "
                                        "TensorRT inference network. "
                                        "Please recheck your input.",
                                        name));
  PADDLE_ENFORCE_EQ(input->isNetworkInput(),
                    true,
375 376 377 378
                    platform::errors::InvalidArgument(
                        "Input %s is not the input of TRT inference network. "
                        "Please recheck your input.",
                        name));
L
Luo Tao 已提交
379
  TensorRTEngine::SetITensor(name, input);
Y
Yan Chunwei 已提交
380 381 382
  return input;
}

383 384
void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer *layer,
                                   int offset,
385 386
                                   const std::string &name) {
  auto *output = layer->getOutput(offset);
387
  SetITensor(name, output);
388
  PADDLE_ENFORCE_NOT_NULL(
389 390 391
      output,
      platform::errors::InvalidArgument(
          "The output %s of TRT engine should not be null.", name));
Y
Yan Chunwei 已提交
392
  output->setName(name.c_str());
393 394
  PADDLE_ENFORCE_EQ(output->isNetworkInput(),
                    false,
395 396 397 398
                    platform::errors::InvalidArgument(
                        "The output %s of TRT engine should not be the input "
                        "of the network at the same time.",
                        name));
399
  network()->markOutput(*output);
400
  PADDLE_ENFORCE_EQ(
401 402
      output->isNetworkOutput(),
      true,
403 404 405
      platform::errors::InvalidArgument(
          "The output %s of TRT engine should be the output of the network.",
          name));
N
nhzlx 已提交
406 407
}

408 409
void TensorRTEngine::DeclareOutput(const std::string &name) {
  auto *output = TensorRTEngine::GetITensor(name);
410
  PADDLE_ENFORCE_NOT_NULL(
411 412 413
      output,
      platform::errors::InvalidArgument(
          "The output %s of TRT engine should not be null.", name));
L
Luo Tao 已提交
414
  output->setName(name.c_str());
415 416
  PADDLE_ENFORCE_EQ(output->isNetworkInput(),
                    false,
417 418 419 420
                    platform::errors::InvalidArgument(
                        "The output %s of TRT engine should not be the input "
                        "of the network at the same time.",
                        name));
421
  network()->markOutput(*output);
L
Luo Tao 已提交
422
}
423 424 425 426 427 428 429 430

void TensorRTEngine::DeclareOutput(const std::string &name,
                                   nvinfer1::DataType dtype) {
  auto *output = TensorRTEngine::GetITensor(name);
  DeclareOutput(name);
  output->setType(dtype);
}

431 432 433 434 435 436 437 438 439 440 441 442 443
void TensorRTEngine::DeleteITensor(const std::string &name,
                                   nvinfer1::ITensor *tensor) {
  PADDLE_ENFORCE_NOT_NULL(
      tensor,
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be null.", name));
  PADDLE_ENFORCE_EQ(
      true,
      itensor_map_.count(name),
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be null", name));
  itensor_map_.erase(name);
}
L
Luo Tao 已提交
444

445 446
void TensorRTEngine::SetITensor(const std::string &name,
                                nvinfer1::ITensor *tensor) {
447
  PADDLE_ENFORCE_NOT_NULL(
448 449 450
      tensor,
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be null.", name));
451
  PADDLE_ENFORCE_EQ(
452 453
      0,
      itensor_map_.count(name),
454 455
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be duplicated", name));
L
Luo Tao 已提交
456 457 458
  itensor_map_[name] = tensor;
}

459 460 461 462 463
nvinfer1::ITensor *TensorRTEngine::GetITensor(const std::string &name,
                                              bool scalar) {
  if (scalar) {
    return ConvertWeight2ITensor(name, true);
  }
464 465 466 467 468 469 470 471 472 473 474
  if (itensor_map_.count(name)) {
    return itensor_map_[name];
  } else {
    ConvertWeight2ITensor(name);
    return itensor_map_[name];
  }
}

// For cases when input is not middle-tensor , but persistable tensor
// you should call this.
nvinfer1::ITensor *TensorRTEngine::ConvertWeight2ITensor(
475
    const std::string &name, bool scalar) {
476 477 478 479 480 481 482
  auto *var_v = scope_->FindVar(name);
  PADDLE_ENFORCE_NOT_NULL(
      var_v,
      platform::errors::NotFound("You are converting a persistable weight to a "
                                 "tensor, but there is no "
                                 "persistable variable called %s in scope.",
                                 name));
483
  auto *var_t = var_v->GetMutable<phi::DenseTensor>();
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
  auto weight = this->GetTrtWeight(name, *var_t);

  // Now we have create weights, then we need create a itensor
  auto var_dims = var_t->dims();
  nvinfer1::Dims trt_in_shape;
  trt_in_shape.nbDims = var_t->dims().size();
  for (int64_t i = 0; i < trt_in_shape.nbDims; i++) {
    trt_in_shape.d[i] = var_dims[i];
  }
  // In fact , this is not always right, because we can't determine if the 0th
  // dimension is batch. Just for run chenqu's model
  if (!this->with_dynamic_shape()) {
    trt_in_shape.nbDims--;
    for (int i = 0; i < trt_in_shape.nbDims; i++) {
      trt_in_shape.d[i] = trt_in_shape.d[i + 1];
    }
  }
501 502 503 504
  if (scalar) {
    trt_in_shape.nbDims = 0;
    trt_in_shape.d[0] = var_dims[0];
  }
505 506
  nvinfer1::ILayer *layer =
      TRT_ENGINE_ADD_LAYER(this, Constant, trt_in_shape, weight.get());
507 508 509
  if (!scalar) {
    this->SetITensor(name, layer->getOutput(0));
  }
510
  return layer->getOutput(0);
L
Luo Tao 已提交
511 512
}

513 514 515 516 517
std::unordered_map<std::string, nvinfer1::ITensor *>
    *TensorRTEngine::GetITensorMap() {
  return &itensor_map_;
}

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
void TensorRTEngine::Deserialize(const std::string &engine_serialized_data) {
  freshDeviceId();
  infer_ptr<nvinfer1::IRuntime> runtime(createInferRuntime(&logger_));

  if (use_dla_) {
    if (precision_ != AnalysisConfig::Precision::kInt8 &&
        precision_ != AnalysisConfig::Precision::kHalf) {
      LOG(WARNING) << "TensorRT DLA must be used with int8 or fp16, but you "
                      "set float32, so DLA is not used.";
    } else if (runtime->getNbDLACores() == 0) {
      LOG(WARNING)
          << "TensorRT DLA is set by config, but your device does not have "
             "DLA, so DLA is not used.";
    } else {
      if (dla_core_ < 0 || dla_core_ >= runtime->getNbDLACores()) {
        dla_core_ = 0;
        LOG(WARNING) << "Invalid DLACore, must be 0 < DLACore < "
                     << runtime->getNbDLACores() << ", but got " << dla_core_
                     << ", so use use 0 as default.";
      }
      runtime->setDLACore(dla_core_);
      LOG(INFO) << "TensorRT DLA enabled in Deserialize(), DLACore "
                << dla_core_;
    }
  }

  infer_engine_.reset(runtime->deserializeCudaEngine(
      engine_serialized_data.c_str(), engine_serialized_data.size()));

  PADDLE_ENFORCE_NOT_NULL(
      infer_engine_,
      platform::errors::Fatal(
          "Building TRT cuda engine failed when deserializing engine info. "
          "Please check:\n1. Your TRT serialization is generated and loaded "
          "on the same GPU architecture;\n2. The Paddle Inference version of "
          "generating serialization file and doing inference are "
          "consistent."));

  binding_num_ = infer_engine_->getNbBindings();
  // for engine context memory sharing
  if (context_memory_sharing_) {
    inference::Singleton<inference::tensorrt::TRTEngineManager>::Global()
        .updateContextMemorySize(infer_engine_->getDeviceMemorySize(),
                                 predictor_id_per_thread);
  }
563 564 565
  if (use_inspector_) {
    GetEngineInfo();
  }
566 567
}

568 569 570 571
void TensorRTEngine::SetRuntimeBatch(size_t batch_size) {
  runtime_batch_ = batch_size;
}

572 573
// Note: Only for support plugin.
TensorRTEngine::Weight TensorRTEngine::GetFp16TrtWeight(
574
    const std::string &name, const phi::DenseTensor &weight_tensor) {
575 576 577 578 579 580 581 582 583 584 585
  static int name_suffix_counter = 0;
  std::string name_suffix = std::to_string(name_suffix_counter);
  std::string splitter = "__";
  std::string name_with_suffix = name + splitter + name_suffix;
  platform::CPUPlace cpu_place;
  PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                    0,
                    platform::errors::AlreadyExists(
                        "The weight named %s is set into the weight map "
                        "twice in TRT OP converter.",
                        name_with_suffix));
586
  weight_map[name_with_suffix].reset(new phi::DenseTensor());
587 588 589 590 591
  weight_map[name_with_suffix]->Resize(weight_tensor.dims());

  TensorRTEngine::Weight weight;
  weight.SetCount(weight_tensor.numel());

Y
Yuanle Liu 已提交
592
  // if trt not support dtype, we need to cast to fp16.
593
  if (weight_tensor.dtype() == phi::DataType::BFLOAT16) {
594
    phi::DenseTensor bf16_tensor;
595 596 597
    bf16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &bf16_tensor);
598
    weight_map[name_with_suffix]->set_type(phi::DataType::FLOAT16);
599 600 601 602 603 604
    auto *fp16_data = weight_map[name_with_suffix]->mutable_data<float16>(
        platform::CPUPlace());
    auto *bf16_data = bf16_tensor.mutable_data<bfloat16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp16_data[i] = static_cast<float16>(bf16_data[i]);
    }
Y
Yuanle Liu 已提交
605 606
    weight.SetDataType(phi::DataType::FLOAT16);
    weight.SetValues(fp16_data);
607
  } else if (weight_tensor.dtype() == phi::DataType::FLOAT32) {
608
    phi::DenseTensor fp32_tensor;
609 610 611
    fp32_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &fp32_tensor);
612
    weight_map[name_with_suffix]->set_type(phi::DataType::FLOAT16);
613 614 615 616 617 618
    auto *fp16_data = weight_map[name_with_suffix]->mutable_data<float16>(
        platform::CPUPlace());
    auto *fp32_data = fp32_tensor.mutable_data<float>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp16_data[i] = static_cast<float16>(fp32_data[i]);
    }
Y
Yuanle Liu 已提交
619 620 621 622 623 624 625
    weight.SetDataType(phi::DataType::FLOAT16);
    weight.SetValues(fp16_data);
  } else if (weight_tensor.dtype() == phi::DataType::INT64) {
    phi::DenseTensor int64_tensor;
    int64_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &int64_tensor);
626
    weight_map[name_with_suffix]->set_type(phi::DataType::INT32);
Y
Yuanle Liu 已提交
627 628 629 630 631 632 633 634
    auto *int32_data = weight_map[name_with_suffix]->mutable_data<int32_t>(
        platform::CPUPlace());
    auto *int64_data = int64_tensor.mutable_data<int64_t>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      int32_data[i] = int64_data[i];
    }
    weight.SetDataType(phi::DataType::INT32);
    weight.SetValues(int32_data);
635 636 637
  } else {
    paddle::framework::TensorCopySync(
        weight_tensor, cpu_place, weight_map[name_with_suffix].get());
Y
Yuanle Liu 已提交
638 639
    weight.SetDataType(weight_tensor.dtype());
    weight.SetValues(weight_map[name_with_suffix]->data());
640 641 642 643 644 645
  }
  name_suffix_counter += 1;
  return weight;
}

// Note: Only for support plugin.
646
TensorRTEngine::Weight TensorRTEngine::GetFp32TrtWeight(
647
    const std::string &name, const phi::DenseTensor &weight_tensor) {
648 649 650 651
  static int name_suffix_counter = 0;
  std::string name_suffix = std::to_string(name_suffix_counter);
  std::string splitter = "__";
  std::string name_with_suffix = name + splitter + name_suffix;
652
  platform::CPUPlace cpu_place;
653 654 655 656 657 658
  PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                    0,
                    platform::errors::AlreadyExists(
                        "The weight named %s is set into the weight map "
                        "twice in TRT OP converter.",
                        name_with_suffix));
659
  weight_map[name_with_suffix].reset(new phi::DenseTensor());
660 661 662 663 664
  weight_map[name_with_suffix]->Resize(weight_tensor.dims());

  TensorRTEngine::Weight weight;
  weight.SetCount(weight_tensor.numel());

Y
Yuanle Liu 已提交
665
  // if trt not support dtype, we need to cast to fp32.
666
  if (weight_tensor.dtype() == phi::DataType::BFLOAT16) {
667
    phi::DenseTensor bf16_tensor;
668 669 670
    bf16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &bf16_tensor);
671
    weight_map[name_with_suffix]->set_type(phi::DataType::FLOAT32);
672 673 674 675 676 677
    auto *fp32_data =
        weight_map[name_with_suffix]->mutable_data<float>(platform::CPUPlace());
    auto *bf16_data = bf16_tensor.mutable_data<bfloat16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp32_data[i] = static_cast<float>(bf16_data[i]);
    }
Y
Yuanle Liu 已提交
678 679
    weight.SetDataType(phi::DataType::FLOAT32);
    weight.SetValues(fp32_data);
680
  } else if (weight_tensor.dtype() == phi::DataType::FLOAT16) {
681
    phi::DenseTensor fp16_tensor;
682 683 684
    fp16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &fp16_tensor);
685
    weight_map[name_with_suffix]->set_type(phi::DataType::FLOAT32);
686 687 688 689 690 691
    auto *fp32_data =
        weight_map[name_with_suffix]->mutable_data<float>(platform::CPUPlace());
    auto *fp16_data = fp16_tensor.mutable_data<float16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp32_data[i] = static_cast<float>(fp16_data[i]);
    }
Y
Yuanle Liu 已提交
692 693 694 695 696 697 698
    weight.SetDataType(phi::DataType::FLOAT32);
    weight.SetValues(fp32_data);
  } else if (weight_tensor.dtype() == phi::DataType::INT64) {
    phi::DenseTensor int64_tensor;
    int64_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &int64_tensor);
699
    weight_map[name_with_suffix]->set_type(phi::DataType::INT32);
Y
Yuanle Liu 已提交
700 701 702 703 704 705 706 707
    auto *int32_data = weight_map[name_with_suffix]->mutable_data<int32_t>(
        platform::CPUPlace());
    auto *int64_data = int64_tensor.mutable_data<int64_t>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      int32_data[i] = int64_data[i];
    }
    weight.SetDataType(phi::DataType::INT32);
    weight.SetValues(int32_data);
708 709 710
  } else {
    paddle::framework::TensorCopySync(
        weight_tensor, cpu_place, weight_map[name_with_suffix].get());
Y
Yuanle Liu 已提交
711 712
    weight.SetDataType(weight_tensor.dtype());
    weight.SetValues(weight_map[name_with_suffix]->data());
713 714 715
  }
  name_suffix_counter += 1;
  return weight;
716 717
}

718
TensorRTEngine::Weight TensorRTEngine::GetTrtWeight(
719
    const std::string &name, const phi::DenseTensor &weight_tensor) {
720 721 722 723 724 725 726 727 728 729 730 731
  static int name_suffix_counter = 0;
  std::string name_suffix = std::to_string(name_suffix_counter);
  std::string splitter = "__";
  std::string name_with_suffix = name + splitter + name_suffix;
  platform::CPUPlace cpu_place;
  PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                    0,
                    platform::errors::AlreadyExists(
                        "The weight named %s is set into the weight map "
                        "twice in TRT OP converter.",
                        name_with_suffix));

732 733 734 735 736
  if (weight_tensor.place() == PlaceType::kGPU ||
      weight_tensor.dtype() != phi::DataType::FLOAT32) {
    weight_map[name_with_suffix].reset(new phi::DenseTensor());
    weight_map[name_with_suffix]->Resize(weight_tensor.dims());
  }
737 738 739 740 741 742

  TensorRTEngine::Weight weight;
  weight.SetCount(weight_tensor.numel());

  // if trt not support dtype, we need to cast to fp32.
  if (weight_tensor.dtype() == phi::DataType::BFLOAT16) {
743
    phi::DenseTensor bf16_tensor;
744 745 746
    bf16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &bf16_tensor);
747
    weight_map[name_with_suffix]->set_type(phi::DataType::FLOAT32);
748 749 750 751 752 753 754 755 756
    auto *fp32_data =
        weight_map[name_with_suffix]->mutable_data<float>(platform::CPUPlace());
    auto *bf16_data = bf16_tensor.mutable_data<bfloat16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp32_data[i] = static_cast<float>(bf16_data[i]);
    }
    weight.SetDataType(phi::DataType::FLOAT32);
    weight.SetValues(fp32_data);
  } else if (weight_tensor.dtype() == phi::DataType::INT64) {
757
    phi::DenseTensor int64_tensor;
758 759 760
    int64_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &int64_tensor);
761
    weight_map[name_with_suffix]->set_type(phi::DataType::INT32);
Y
Yuanle Liu 已提交
762 763
    auto *int32_data = weight_map[name_with_suffix]->mutable_data<int32_t>(
        platform::CPUPlace());
764 765 766 767
    auto *int64_data = int64_tensor.mutable_data<int64_t>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      int32_data[i] = int64_data[i];
    }
Z
zhoutianzi666 已提交
768
    weight.SetDataType(phi::DataType::INT32);
769 770
    weight.SetValues(int32_data);
  } else {
771 772 773 774 775 776 777 778 779
    if (weight_tensor.place() == PlaceType::kGPU) {
      paddle::framework::TensorCopySync(
          weight_tensor, cpu_place, weight_map[name_with_suffix].get());
      weight.SetDataType(weight_tensor.dtype());
      weight.SetValues(weight_map[name_with_suffix]->data());
    } else {
      weight.SetDataType(weight_tensor.dtype());
      weight.SetValues(weight_tensor.data());
    }
780
  }
781

782 783 784
  name_suffix_counter += 1;
  return weight;
}
785

786 787
int TensorRTEngine::GetRuntimeBatch() { return runtime_batch_; }

788
nvinfer1::IPluginV2Layer *TensorRTEngine::AddPlugin(
789 790
    nvinfer1::ITensor *const *inputs,
    int num_inputs,
791
    plugin::PluginTensorRT *plugin) {
792
  owned_plugin_.emplace_back(plugin);
793
  return network()->addPluginV2(inputs, num_inputs, *plugin);
794 795
}

796
nvinfer1::IPluginV2Layer *TensorRTEngine::AddPluginV2Ext(
797 798
    nvinfer1::ITensor *const *inputs,
    int num_inputs,
799 800 801 802 803
    plugin::PluginTensorRTV2Ext *plugin) {
  owned_plugin_v2ext_.emplace_back(plugin);
  return network()->addPluginV2(inputs, num_inputs, *plugin);
}

804
nvinfer1::IPluginV2Layer *TensorRTEngine::AddPluginV2IOExt(
805 806
    nvinfer1::ITensor *const *inputs,
    int num_inputs,
807 808 809 810 811
    nvinfer1::IPluginV2IOExt *plugin) {
  owned_plugin_v2ioext_.emplace_back(plugin);
  return network()->addPluginV2(inputs, num_inputs, *plugin);
}

N
nhzlx 已提交
812 813 814
void TensorRTEngine::freshDeviceId() {
  int count;
  cudaGetDeviceCount(&count);
815 816
  PADDLE_ENFORCE_LT(device_id_,
                    count,
817 818
                    platform::errors::OutOfRange(
                        "Device id %d exceeds the current device count: %d.",
819 820
                        device_id_,
                        count));
L
Leo Chen 已提交
821
  platform::SetDeviceId(device_id_);
N
nhzlx 已提交
822 823
}

824 825 826 827 828
void TensorRTEngine::GetEngineInfo() {
#if IS_TRT_VERSION_GE(8200)
  LOG(INFO) << "====== engine info ======";
  std::unique_ptr<nvinfer1::IEngineInspector> infer_inspector(
      infer_engine_->createEngineInspector());
829 830
  auto *infer_context = context();
  infer_inspector->setExecutionContext(infer_context);
831
  LOG(INFO) << infer_inspector->getEngineInformation(
832
      nvinfer1::LayerInformationFormat::kJSON);
833 834 835 836 837 838
  LOG(INFO) << "====== engine info end ======";
#else
  LOG(INFO) << "Inspector needs TensorRT version 8.2 and after.";
#endif
}

Y
Yan Chunwei 已提交
839 840 841
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle