engine.cc 31.9 KB
Newer Older
Y
Yan Chunwei 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

N
nhzlx 已提交
3 4
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
Y
Yan Chunwei 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/inference/tensorrt/engine.h"

#include <NvInfer.h>
#include <glog/logging.h>
19

A
Abhinav Arora 已提交
20
#include <string>
W
wanghuancoder 已提交
21

22
#include "NvInferRuntimeCommon.h"
23
#include "cuda_runtime_api.h"  // NOLINT
Y
Yan Chunwei 已提交
24
#include "paddle/fluid/inference/tensorrt/helper.h"
25
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/platform/enforce.h"
27
#include "paddle/phi/common/data_type.h"
Y
Yan Chunwei 已提交
28 29 30 31 32

namespace paddle {
namespace inference {
namespace tensorrt {

33 34 35
int TensorRTEngine::runtime_batch_ = 1;
thread_local int TensorRTEngine::predictor_id_per_thread = -1;

36
void TensorRTEngine::Weight::SetDataType(phi::DataType type) {
37
  nvinfer1::DataType nv_type = nvinfer1::DataType::kFLOAT;
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
  switch (type) {
    case phi::DataType::FLOAT32:
      nv_type = nvinfer1::DataType::kFLOAT;
      break;
    case phi::DataType::FLOAT16:
      nv_type = nvinfer1::DataType::kHALF;
      break;
    case phi::DataType::INT32:
      nv_type = nvinfer1::DataType::kINT32;
      break;
    case phi::DataType::INT8:
      nv_type = nvinfer1::DataType::kINT8;
      break;
#if IS_TRT_VERSION_GE(7000)
    case phi::DataType::BOOL:
      nv_type = nvinfer1::DataType::kBOOL;
      break;
#endif
    default:
      paddle::platform::errors::InvalidArgument(
          "Paddle-TRT loads weighths failed, found not supported data type %s.",
          type);
      break;
  }
  w_.type = nv_type;
}

65 66 67 68 69
void TensorRTEngine::InitNetwork() {
  freshDeviceId();
  infer_builder_.reset(createInferBuilder(&logger_));

  if (with_dynamic_shape_) {
70
    infer_network_.reset(infer_builder_->createNetworkV2(
71 72 73
        1U << static_cast<int>(
            nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
  } else {
74
    infer_network_.reset(infer_builder_->createNetworkV2(0U));
75
  }
76 77

  infer_builder_config_.reset(infer_builder_->createBuilderConfig());
W
wenbin 已提交
78 79 80
  optim_profiles_.resize(max_profile_num_);
  for (int i = 0; i < max_profile_num_; i++)
    optim_profiles_[i] = infer_builder_->createOptimizationProfile();
Y
Yan Chunwei 已提交
81 82
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
nvinfer1::IExecutionContext *TensorRTEngine::context() {
  std::unique_lock<std::mutex> lock(mutex_);
  if (infer_context_.find(predictor_id_per_thread) == infer_context_.end()) {
    PADDLE_ENFORCE_NOT_NULL(
        infer_engine_,
        platform::errors::InvalidArgument(
            "You should build engine first and then set the context."));
    // We may see trt warning: Profile 0 has been chosen by another
    // IExecutionContext...
    // It's ok. We will set it later.
    nvinfer1::IExecutionContext *infer_context{nullptr};
    if (context_memory_sharing_) {
      infer_context =
          infer_engine_->createExecutionContextWithoutDeviceMemory();
    } else {
      infer_context = infer_engine_->createExecutionContext();
    }
    PADDLE_ENFORCE_NOT_NULL(
        infer_context,
        platform::errors::InvalidArgument(
            "TensorRT engine can not build execution context."));
    if (with_dynamic_shape_) {
      // need new profile if it's not the first
      if (cur_profile_num_ > 0) {
        infer_context->setOptimizationProfile(cur_profile_num_);
      }
      profile_index_[predictor_id_per_thread] = cur_profile_num_;
      ++cur_profile_num_;
    }
    infer_context_[predictor_id_per_thread].reset(infer_context);
  }
  return infer_context_[predictor_id_per_thread].get();
}

117 118
void TensorRTEngine::Execute(int batch_size,
                             std::vector<void *> *buffers,
119
                             cudaStream_t stream) {
N
nhzlx 已提交
120
  freshDeviceId();
121
  auto infer_context = context();
122 123 124 125 126 127 128 129 130 131
  if (context_memory_sharing_) {
    void *context_memory{nullptr};
    context_memory =
        inference::Singleton<inference::tensorrt::TRTEngineManager>::Global()
            .getContextMemory(
                predictor_id_per_thread,
                phi::GPUPlace(device_id_),
                phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
    infer_context->setDeviceMemory(context_memory);
  }
132 133 134 135
  if (!with_dynamic_shape()) {
    infer_context->enqueue(batch_size, buffers->data(), stream, nullptr);
  } else {
    infer_context->enqueueV2(buffers->data(), stream, nullptr);
136
  }
N
nhzlx 已提交
137 138 139
  SetRuntimeBatch(batch_size);
}

Y
Yan Chunwei 已提交
140
void TensorRTEngine::FreezeNetwork() {
N
nhzlx 已提交
141
  freshDeviceId();
142
  VLOG(3) << "TRT to freeze network";
143 144 145 146 147 148 149
  PADDLE_ENFORCE_NOT_NULL(infer_builder_,
                          platform::errors::InvalidArgument(
                              "Inference builder of TRT is null. Please make "
                              "sure you call InitNetwork first."));
  PADDLE_ENFORCE_NOT_NULL(network(),
                          platform::errors::InvalidArgument(
                              "Call InitNetwork first to initialize network."));
Y
Yan Chunwei 已提交
150 151
  // build engine.
  infer_builder_->setMaxBatchSize(max_batch_);
152 153 154 155
#if IS_TRT_VERSION_GE(8300)
  infer_builder_config_->setMemoryPoolLimit(
      nvinfer1::MemoryPoolType::kWORKSPACE, max_workspace_);
#else
156
  infer_builder_config_->setMaxWorkspaceSize(max_workspace_);
157
#endif
Z
Zhaolong Xing 已提交
158 159 160
  bool enable_fp16 = (precision_ == AnalysisConfig::Precision::kHalf);
  if (enable_fp16) {
    bool support_fp16 = infer_builder_->platformHasFastFp16();
161
    infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16);
Z
Zhaolong Xing 已提交
162 163 164
    if (!support_fp16) {
      LOG(INFO) << "You specify FP16 mode, but the hardware do not support "
                   "FP16 speed up, use FP32 instead.";
165 166
    } else {
      LOG(INFO) << "Run Paddle-TRT FP16 mode";
Z
Zhaolong Xing 已提交
167 168 169
    }
  }

170
  bool enable_int8 = (precision_ == AnalysisConfig::Precision::kInt8);
Z
Zhaolong Xing 已提交
171
  if (enable_int8) {
C
csy0225 已提交
172 173 174
    if (!use_dla_) {
      infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16);
    }
175 176
    infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kINT8);

177
    if (calibrator_) {
178
      infer_builder_config_->setInt8Calibrator(calibrator_);
179
    } else {
180
      infer_builder_config_->setInt8Calibrator(nullptr);
181 182 183 184 185 186 187 188

      for (auto &quant_range : quant_dynamic_range_) {
        auto tensor = quant_range.first;
        float range = quant_range.second;
        tensor->setDynamicRange(-range, range);
      }

      std::unordered_set<nvinfer1::ITensor *> all_t;
189 190
      for (int i = 0; i < network()->getNbLayers(); i++) {
        auto layer = network()->getLayer(i);
191 192 193 194
        for (int j = 0; j < layer->getNbOutputs(); j++) {
          all_t.insert(layer->getOutput(j));
        }
      }
195

196 197
      for (int i = 0; i < network()->getNbInputs(); i++) {
        all_t.insert(network()->getInput(i));
198 199 200 201
      }

      for (auto &t : all_t) {
        if (!quant_dynamic_range_.count(t)) {
T
tianshuo78520a 已提交
202 203 204
          VLOG(3) << "We are in trt int8 mode(not calibration), scale not set"
                  << " for tensor " << t->getName()
                  << ", this might be ok when trt does not need this range";
205 206 207
        }
      }
    }
N
nhzlx 已提交
208
  }
Y
Yan Chunwei 已提交
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
  if (use_dla_) {
    if (!enable_int8 && !enable_fp16) {
      LOG(WARNING) << "TensorRT DLA must be used with int8 or fp16, but you "
                      "set float32, so DLA is not used.";
    } else if (infer_builder_->getNbDLACores() == 0) {
      LOG(WARNING)
          << "TensorRT DLA is set by config, but your device does not have "
             "DLA, so DLA is not used.";
    } else {
      if (dla_core_ < 0 || dla_core_ >= infer_builder_->getNbDLACores()) {
        dla_core_ = 0;
        LOG(WARNING) << "Invalid DLACore, must be 0 < DLACore < "
                     << infer_builder_->getNbDLACores() << ", but got "
                     << dla_core_ << ", so use use 0 as default.";
      }
225 226 227
      infer_builder_config_->setDefaultDeviceType(nvinfer1::DeviceType::kDLA);
      infer_builder_config_->setDLACore(dla_core_);
      infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
228 229 230 231 232
      LOG(INFO) << "TensorRT DLA enabled in FreezeNetwork(), DLACore "
                << dla_core_;
    }
  }

233
  if (with_dynamic_shape_) {
234
    LOG(INFO) << "Run Paddle-TRT Dynamic Shape mode.";
W
wenbin 已提交
235 236
    for (int i = 0; i < max_profile_num_; i++) {
      for (auto &input : min_input_shape_) {
237
#if IS_TRT_VERSION_LT(7000)
W
wenbin 已提交
238
        // trt6 will check all_of input > 0
239 240
        if (!(std::all_of(input.second.begin(),
                          input.second.end(),
W
wenbin 已提交
241 242 243 244 245 246 247 248 249
                          [](int x) { return x > 0; }) &&
              std::all_of(max_input_shape_[input.first].begin(),
                          max_input_shape_[input.first].end(),
                          [](int x) { return x > 0; }) &&
              std::all_of(optim_input_shape_[input.first].begin(),
                          optim_input_shape_[input.first].end(),
                          [](int x) { return x > 0; }))) {
          continue;
        }
250
#endif
W
wenbin 已提交
251 252 253 254 255 256
        VLOG(4) << "TRT dynamic_shape set " << input.first
                << " min: " << Vec2Str(input.second)
                << ", max: " << Vec2Str(max_input_shape_[input.first])
                << ", opt: " << Vec2Str(optim_input_shape_[input.first]);

        optim_profiles_[i]->setDimensions(
257 258
            input.first.c_str(),
            nvinfer1::OptProfileSelector::kMIN,
W
wenbin 已提交
259 260
            Vec2TRT_Dims(input.second, input.first, true));
        optim_profiles_[i]->setDimensions(
261 262
            input.first.c_str(),
            nvinfer1::OptProfileSelector::kMAX,
W
wenbin 已提交
263 264
            Vec2TRT_Dims(max_input_shape_[input.first], input.first, true));
        optim_profiles_[i]->setDimensions(
265 266
            input.first.c_str(),
            nvinfer1::OptProfileSelector::kOPT,
W
wenbin 已提交
267 268
            Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true));
      }
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

      for (int input_id = 0; input_id < network()->getNbInputs(); input_id++) {
        auto input_name = network()->getInput(input_id)->getName();
        if (!itensor_map_.count(input_name)) continue;
        if (!GetITensor(input_name)->isShapeTensor()) continue;
        PADDLE_ENFORCE_EQ(min_shape_tensor_.count(input_name) &&
                              max_shape_tensor_.count(input_name) &&
                              optim_shape_tensor_.count(input_name),
                          true,
                          platform::errors::InvalidArgument(
                              "Fail to find min/max/optim shape value for TRT "
                              "network's shape tensor input named %s.",
                              input_name));
        auto min_vec = min_shape_tensor_.at(input_name);
        optim_profiles_[i]->setShapeValues(input_name,
                                           nvinfer1::OptProfileSelector::kMIN,
                                           min_vec.data(),
                                           min_vec.size());
        optim_profiles_[i]->setShapeValues(input_name,
                                           nvinfer1::OptProfileSelector::kMAX,
                                           max_shape_tensor_[input_name].data(),
                                           min_vec.size());
        optim_profiles_[i]->setShapeValues(
            input_name,
            nvinfer1::OptProfileSelector::kOPT,
            optim_shape_tensor_[input_name].data(),
            min_vec.size());
      }

W
wenbin 已提交
298
      infer_builder_config_->addOptimizationProfile(optim_profiles_[i]);
299
    }
300 301 302 303 304 305
    if (WithFp16() && disable_trt_plugin_fp16()) {
      LOG(INFO) << "NOTE: In order to achieve higher accuracy, you have "
                   "disabled the fp16 mode of TRT Plugin,\n"
                << "you can reopen it with "
                   "'config.SetDynamicShapeInfo(min_shape, max_shape, "
                   "opt_shape, false /*disable_trt_plugin_fp16*/)'";
306
    }
307
  }
308
#if IS_TRT_VERSION_GE(8200)
309 310 311 312
  if (use_inspector_) {
    infer_builder_config_->setProfilingVerbosity(
        nvinfer1::ProfilingVerbosity::kDETAILED);
  }
313 314
#endif

315
#if IS_TRT_VERSION_LT(8000)
316 317
  infer_engine_.reset(infer_builder_->buildEngineWithConfig(
      *network(), *infer_builder_config_));
318
#else
J
JingZhuangzhuang 已提交
319
  infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kSPARSE_WEIGHTS);
Z
zlsh80826 已提交
320
  ihost_memory_.reset(infer_builder_->buildSerializedNetwork(
321 322
      *network(), *infer_builder_config_));
  infer_ptr<nvinfer1::IRuntime> runtime(createInferRuntime(&logger_));
Z
zlsh80826 已提交
323 324
  infer_engine_.reset(runtime->deserializeCudaEngine(ihost_memory_->data(),
                                                     ihost_memory_->size()));
325
#endif
326

327
  PADDLE_ENFORCE_NOT_NULL(
328 329 330 331
      infer_engine_,
      platform::errors::Fatal(
          "Build TensorRT cuda engine failed! Please recheck "
          "you configurations related to paddle-TensorRT."));
332

W
wenbin 已提交
333 334 335 336 337 338
  binding_num_ = infer_engine_->getNbBindings();
  // reset status for dynamic shape clone
  if (max_profile_num_ > 1) {
    infer_context_.clear();
    cur_profile_num_ = 0;
  }
339 340 341 342 343 344
  // for engine context memory sharing
  if (context_memory_sharing_) {
    inference::Singleton<inference::tensorrt::TRTEngineManager>::Global()
        .updateContextMemorySize(infer_engine_->getDeviceMemorySize(),
                                 predictor_id_per_thread);
  }
W
wenbin 已提交
345

346
  GetEngineInfo();
Y
Yan Chunwei 已提交
347 348
}

349
nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name,
Y
Yan Chunwei 已提交
350
                                                nvinfer1::DataType dtype,
351
                                                const nvinfer1::Dims &dims) {
352 353
  PADDLE_ENFORCE_EQ(network() != nullptr,
                    true,
354 355 356
                    platform::errors::InvalidArgument(
                        "The TRT network should be initialized first."));
  auto *input = network()->addInput(name.c_str(), dtype, dims);
357
  PADDLE_ENFORCE_NOT_NULL(
358 359 360 361 362 363 364
      input,
      platform::errors::InvalidArgument("Adding input %s failed in "
                                        "TensorRT inference network. "
                                        "Please recheck your input.",
                                        name));
  PADDLE_ENFORCE_EQ(input->isNetworkInput(),
                    true,
365 366 367 368
                    platform::errors::InvalidArgument(
                        "Input %s is not the input of TRT inference network. "
                        "Please recheck your input.",
                        name));
L
Luo Tao 已提交
369
  TensorRTEngine::SetITensor(name, input);
Y
Yan Chunwei 已提交
370 371 372
  return input;
}

373 374
void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer *layer,
                                   int offset,
375 376
                                   const std::string &name) {
  auto *output = layer->getOutput(offset);
377
  SetITensor(name, output);
378
  PADDLE_ENFORCE_NOT_NULL(
379 380 381
      output,
      platform::errors::InvalidArgument(
          "The output %s of TRT engine should not be null.", name));
Y
Yan Chunwei 已提交
382
  output->setName(name.c_str());
383 384
  PADDLE_ENFORCE_EQ(output->isNetworkInput(),
                    false,
385 386 387 388
                    platform::errors::InvalidArgument(
                        "The output %s of TRT engine should not be the input "
                        "of the network at the same time.",
                        name));
389
  network()->markOutput(*output);
390
  PADDLE_ENFORCE_EQ(
391 392
      output->isNetworkOutput(),
      true,
393 394 395
      platform::errors::InvalidArgument(
          "The output %s of TRT engine should be the output of the network.",
          name));
N
nhzlx 已提交
396 397
}

398 399
void TensorRTEngine::DeclareOutput(const std::string &name) {
  auto *output = TensorRTEngine::GetITensor(name);
400
  PADDLE_ENFORCE_NOT_NULL(
401 402 403
      output,
      platform::errors::InvalidArgument(
          "The output %s of TRT engine should not be null.", name));
L
Luo Tao 已提交
404
  output->setName(name.c_str());
405 406
  PADDLE_ENFORCE_EQ(output->isNetworkInput(),
                    false,
407 408 409 410
                    platform::errors::InvalidArgument(
                        "The output %s of TRT engine should not be the input "
                        "of the network at the same time.",
                        name));
411
  network()->markOutput(*output);
L
Luo Tao 已提交
412
}
413 414 415 416 417 418 419 420

void TensorRTEngine::DeclareOutput(const std::string &name,
                                   nvinfer1::DataType dtype) {
  auto *output = TensorRTEngine::GetITensor(name);
  DeclareOutput(name);
  output->setType(dtype);
}

421 422 423 424 425 426 427 428 429 430 431 432 433
void TensorRTEngine::DeleteITensor(const std::string &name,
                                   nvinfer1::ITensor *tensor) {
  PADDLE_ENFORCE_NOT_NULL(
      tensor,
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be null.", name));
  PADDLE_ENFORCE_EQ(
      true,
      itensor_map_.count(name),
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be null", name));
  itensor_map_.erase(name);
}
L
Luo Tao 已提交
434

435 436
void TensorRTEngine::SetITensor(const std::string &name,
                                nvinfer1::ITensor *tensor) {
437
  PADDLE_ENFORCE_NOT_NULL(
438 439 440
      tensor,
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be null.", name));
441
  PADDLE_ENFORCE_EQ(
442 443
      0,
      itensor_map_.count(name),
444 445
      platform::errors::InvalidArgument(
          "Tensor named %s of TRT engine should not be duplicated", name));
L
Luo Tao 已提交
446 447 448
  itensor_map_[name] = tensor;
}

449 450 451 452 453
nvinfer1::ITensor *TensorRTEngine::GetITensor(const std::string &name,
                                              bool scalar) {
  if (scalar) {
    return ConvertWeight2ITensor(name, true);
  }
454 455 456 457 458 459 460 461 462 463 464
  if (itensor_map_.count(name)) {
    return itensor_map_[name];
  } else {
    ConvertWeight2ITensor(name);
    return itensor_map_[name];
  }
}

// For cases when input is not middle-tensor , but persistable tensor
// you should call this.
nvinfer1::ITensor *TensorRTEngine::ConvertWeight2ITensor(
465
    const std::string &name, bool scalar) {
466 467 468 469 470 471 472
  auto *var_v = scope_->FindVar(name);
  PADDLE_ENFORCE_NOT_NULL(
      var_v,
      platform::errors::NotFound("You are converting a persistable weight to a "
                                 "tensor, but there is no "
                                 "persistable variable called %s in scope.",
                                 name));
473
  auto *var_t = var_v->GetMutable<phi::DenseTensor>();
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
  auto weight = this->GetTrtWeight(name, *var_t);

  // Now we have create weights, then we need create a itensor
  auto var_dims = var_t->dims();
  nvinfer1::Dims trt_in_shape;
  trt_in_shape.nbDims = var_t->dims().size();
  for (int64_t i = 0; i < trt_in_shape.nbDims; i++) {
    trt_in_shape.d[i] = var_dims[i];
  }
  // In fact , this is not always right, because we can't determine if the 0th
  // dimension is batch. Just for run chenqu's model
  if (!this->with_dynamic_shape()) {
    trt_in_shape.nbDims--;
    for (int i = 0; i < trt_in_shape.nbDims; i++) {
      trt_in_shape.d[i] = trt_in_shape.d[i + 1];
    }
  }
491 492 493 494
  if (scalar) {
    trt_in_shape.nbDims = 0;
    trt_in_shape.d[0] = var_dims[0];
  }
495 496
  nvinfer1::ILayer *layer =
      TRT_ENGINE_ADD_LAYER(this, Constant, trt_in_shape, weight.get());
497 498 499
  if (!scalar) {
    this->SetITensor(name, layer->getOutput(0));
  }
500
  return layer->getOutput(0);
L
Luo Tao 已提交
501 502
}

503 504 505 506 507
std::unordered_map<std::string, nvinfer1::ITensor *>
    *TensorRTEngine::GetITensorMap() {
  return &itensor_map_;
}

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
void TensorRTEngine::Deserialize(const std::string &engine_serialized_data) {
  freshDeviceId();
  infer_ptr<nvinfer1::IRuntime> runtime(createInferRuntime(&logger_));

  if (use_dla_) {
    if (precision_ != AnalysisConfig::Precision::kInt8 &&
        precision_ != AnalysisConfig::Precision::kHalf) {
      LOG(WARNING) << "TensorRT DLA must be used with int8 or fp16, but you "
                      "set float32, so DLA is not used.";
    } else if (runtime->getNbDLACores() == 0) {
      LOG(WARNING)
          << "TensorRT DLA is set by config, but your device does not have "
             "DLA, so DLA is not used.";
    } else {
      if (dla_core_ < 0 || dla_core_ >= runtime->getNbDLACores()) {
        dla_core_ = 0;
        LOG(WARNING) << "Invalid DLACore, must be 0 < DLACore < "
                     << runtime->getNbDLACores() << ", but got " << dla_core_
                     << ", so use use 0 as default.";
      }
      runtime->setDLACore(dla_core_);
      LOG(INFO) << "TensorRT DLA enabled in Deserialize(), DLACore "
                << dla_core_;
    }
  }

  infer_engine_.reset(runtime->deserializeCudaEngine(
      engine_serialized_data.c_str(), engine_serialized_data.size()));

  PADDLE_ENFORCE_NOT_NULL(
      infer_engine_,
      platform::errors::Fatal(
          "Building TRT cuda engine failed when deserializing engine info. "
          "Please check:\n1. Your TRT serialization is generated and loaded "
          "on the same GPU architecture;\n2. The Paddle Inference version of "
          "generating serialization file and doing inference are "
          "consistent."));

  binding_num_ = infer_engine_->getNbBindings();
  // for engine context memory sharing
  if (context_memory_sharing_) {
    inference::Singleton<inference::tensorrt::TRTEngineManager>::Global()
        .updateContextMemorySize(infer_engine_->getDeviceMemorySize(),
                                 predictor_id_per_thread);
  }

  GetEngineInfo();
}

557 558 559 560
void TensorRTEngine::SetRuntimeBatch(size_t batch_size) {
  runtime_batch_ = batch_size;
}

561 562
// Note: Only for support plugin.
TensorRTEngine::Weight TensorRTEngine::GetFp16TrtWeight(
563
    const std::string &name, const phi::DenseTensor &weight_tensor) {
564 565 566 567 568 569 570 571 572 573 574
  static int name_suffix_counter = 0;
  std::string name_suffix = std::to_string(name_suffix_counter);
  std::string splitter = "__";
  std::string name_with_suffix = name + splitter + name_suffix;
  platform::CPUPlace cpu_place;
  PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                    0,
                    platform::errors::AlreadyExists(
                        "The weight named %s is set into the weight map "
                        "twice in TRT OP converter.",
                        name_with_suffix));
575
  weight_map[name_with_suffix].reset(new phi::DenseTensor());
576 577 578 579 580
  weight_map[name_with_suffix]->Resize(weight_tensor.dims());

  TensorRTEngine::Weight weight;
  weight.SetCount(weight_tensor.numel());

Y
Yuanle Liu 已提交
581
  // if trt not support dtype, we need to cast to fp16.
582
  if (weight_tensor.dtype() == phi::DataType::BFLOAT16) {
583
    phi::DenseTensor bf16_tensor;
584 585 586 587 588 589 590 591 592 593 594
    bf16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &bf16_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::FLOAT16);
    auto *fp16_data = weight_map[name_with_suffix]->mutable_data<float16>(
        platform::CPUPlace());
    auto *bf16_data = bf16_tensor.mutable_data<bfloat16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp16_data[i] = static_cast<float16>(bf16_data[i]);
    }
Y
Yuanle Liu 已提交
595 596
    weight.SetDataType(phi::DataType::FLOAT16);
    weight.SetValues(fp16_data);
597
  } else if (weight_tensor.dtype() == phi::DataType::FLOAT32) {
598
    phi::DenseTensor fp32_tensor;
599 600 601 602 603 604 605 606 607 608 609
    fp32_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &fp32_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::FLOAT16);
    auto *fp16_data = weight_map[name_with_suffix]->mutable_data<float16>(
        platform::CPUPlace());
    auto *fp32_data = fp32_tensor.mutable_data<float>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp16_data[i] = static_cast<float16>(fp32_data[i]);
    }
Y
Yuanle Liu 已提交
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
    weight.SetDataType(phi::DataType::FLOAT16);
    weight.SetValues(fp16_data);
  } else if (weight_tensor.dtype() == phi::DataType::INT64) {
    phi::DenseTensor int64_tensor;
    int64_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &int64_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::INT32);
    auto *int32_data = weight_map[name_with_suffix]->mutable_data<int32_t>(
        platform::CPUPlace());
    auto *int64_data = int64_tensor.mutable_data<int64_t>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      int32_data[i] = int64_data[i];
    }
    weight.SetDataType(phi::DataType::INT32);
    weight.SetValues(int32_data);
627 628 629
  } else {
    paddle::framework::TensorCopySync(
        weight_tensor, cpu_place, weight_map[name_with_suffix].get());
Y
Yuanle Liu 已提交
630 631
    weight.SetDataType(weight_tensor.dtype());
    weight.SetValues(weight_map[name_with_suffix]->data());
632 633 634 635 636 637
  }
  name_suffix_counter += 1;
  return weight;
}

// Note: Only for support plugin.
638
TensorRTEngine::Weight TensorRTEngine::GetFp32TrtWeight(
639
    const std::string &name, const phi::DenseTensor &weight_tensor) {
640 641 642 643
  static int name_suffix_counter = 0;
  std::string name_suffix = std::to_string(name_suffix_counter);
  std::string splitter = "__";
  std::string name_with_suffix = name + splitter + name_suffix;
644
  platform::CPUPlace cpu_place;
645 646 647 648 649 650
  PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                    0,
                    platform::errors::AlreadyExists(
                        "The weight named %s is set into the weight map "
                        "twice in TRT OP converter.",
                        name_with_suffix));
651
  weight_map[name_with_suffix].reset(new phi::DenseTensor());
652 653 654 655 656
  weight_map[name_with_suffix]->Resize(weight_tensor.dims());

  TensorRTEngine::Weight weight;
  weight.SetCount(weight_tensor.numel());

Y
Yuanle Liu 已提交
657
  // if trt not support dtype, we need to cast to fp32.
658
  if (weight_tensor.dtype() == phi::DataType::BFLOAT16) {
659
    phi::DenseTensor bf16_tensor;
660 661 662 663 664 665 666 667 668 669 670
    bf16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &bf16_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::FLOAT32);
    auto *fp32_data =
        weight_map[name_with_suffix]->mutable_data<float>(platform::CPUPlace());
    auto *bf16_data = bf16_tensor.mutable_data<bfloat16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp32_data[i] = static_cast<float>(bf16_data[i]);
    }
Y
Yuanle Liu 已提交
671 672
    weight.SetDataType(phi::DataType::FLOAT32);
    weight.SetValues(fp32_data);
673
  } else if (weight_tensor.dtype() == phi::DataType::FLOAT16) {
674
    phi::DenseTensor fp16_tensor;
675 676 677 678 679 680 681 682 683 684 685
    fp16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &fp16_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::FLOAT32);
    auto *fp32_data =
        weight_map[name_with_suffix]->mutable_data<float>(platform::CPUPlace());
    auto *fp16_data = fp16_tensor.mutable_data<float16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp32_data[i] = static_cast<float>(fp16_data[i]);
    }
Y
Yuanle Liu 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
    weight.SetDataType(phi::DataType::FLOAT32);
    weight.SetValues(fp32_data);
  } else if (weight_tensor.dtype() == phi::DataType::INT64) {
    phi::DenseTensor int64_tensor;
    int64_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &int64_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::INT32);
    auto *int32_data = weight_map[name_with_suffix]->mutable_data<int32_t>(
        platform::CPUPlace());
    auto *int64_data = int64_tensor.mutable_data<int64_t>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      int32_data[i] = int64_data[i];
    }
    weight.SetDataType(phi::DataType::INT32);
    weight.SetValues(int32_data);
703 704 705
  } else {
    paddle::framework::TensorCopySync(
        weight_tensor, cpu_place, weight_map[name_with_suffix].get());
Y
Yuanle Liu 已提交
706 707
    weight.SetDataType(weight_tensor.dtype());
    weight.SetValues(weight_map[name_with_suffix]->data());
708 709 710
  }
  name_suffix_counter += 1;
  return weight;
711 712
}

713
TensorRTEngine::Weight TensorRTEngine::GetTrtWeight(
714
    const std::string &name, const phi::DenseTensor &weight_tensor) {
715 716 717 718 719 720 721 722 723 724 725 726
  static int name_suffix_counter = 0;
  std::string name_suffix = std::to_string(name_suffix_counter);
  std::string splitter = "__";
  std::string name_with_suffix = name + splitter + name_suffix;
  platform::CPUPlace cpu_place;
  PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                    0,
                    platform::errors::AlreadyExists(
                        "The weight named %s is set into the weight map "
                        "twice in TRT OP converter.",
                        name_with_suffix));

727 728 729 730 731
  if (weight_tensor.place() == PlaceType::kGPU ||
      weight_tensor.dtype() != phi::DataType::FLOAT32) {
    weight_map[name_with_suffix].reset(new phi::DenseTensor());
    weight_map[name_with_suffix]->Resize(weight_tensor.dims());
  }
732 733 734 735 736 737

  TensorRTEngine::Weight weight;
  weight.SetCount(weight_tensor.numel());

  // if trt not support dtype, we need to cast to fp32.
  if (weight_tensor.dtype() == phi::DataType::BFLOAT16) {
738
    phi::DenseTensor bf16_tensor;
739 740 741 742 743 744 745 746 747 748 749 750 751 752
    bf16_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &bf16_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::FLOAT32);
    auto *fp32_data =
        weight_map[name_with_suffix]->mutable_data<float>(platform::CPUPlace());
    auto *bf16_data = bf16_tensor.mutable_data<bfloat16>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      fp32_data[i] = static_cast<float>(bf16_data[i]);
    }
    weight.SetDataType(phi::DataType::FLOAT32);
    weight.SetValues(fp32_data);
  } else if (weight_tensor.dtype() == phi::DataType::INT64) {
753
    phi::DenseTensor int64_tensor;
754 755 756 757 758
    int64_tensor.clear();
    paddle::framework::TensorCopySync(
        weight_tensor, platform::CPUPlace(), &int64_tensor);
    weight_map[name_with_suffix]->set_type(
        paddle::experimental::DataType::INT32);
Y
Yuanle Liu 已提交
759 760
    auto *int32_data = weight_map[name_with_suffix]->mutable_data<int32_t>(
        platform::CPUPlace());
761 762 763 764
    auto *int64_data = int64_tensor.mutable_data<int64_t>(platform::CPUPlace());
    for (int i = 0; i < weight_tensor.numel(); i++) {
      int32_data[i] = int64_data[i];
    }
Z
zhoutianzi666 已提交
765
    weight.SetDataType(phi::DataType::INT32);
766 767
    weight.SetValues(int32_data);
  } else {
768 769 770 771 772 773 774 775 776
    if (weight_tensor.place() == PlaceType::kGPU) {
      paddle::framework::TensorCopySync(
          weight_tensor, cpu_place, weight_map[name_with_suffix].get());
      weight.SetDataType(weight_tensor.dtype());
      weight.SetValues(weight_map[name_with_suffix]->data());
    } else {
      weight.SetDataType(weight_tensor.dtype());
      weight.SetValues(weight_tensor.data());
    }
777
  }
778

779 780 781
  name_suffix_counter += 1;
  return weight;
}
782

783 784
int TensorRTEngine::GetRuntimeBatch() { return runtime_batch_; }

785
nvinfer1::IPluginV2Layer *TensorRTEngine::AddPlugin(
786 787
    nvinfer1::ITensor *const *inputs,
    int num_inputs,
788
    plugin::PluginTensorRT *plugin) {
789
  owned_plugin_.emplace_back(plugin);
790
  return network()->addPluginV2(inputs, num_inputs, *plugin);
791 792
}

793
nvinfer1::IPluginV2Layer *TensorRTEngine::AddPluginV2Ext(
794 795
    nvinfer1::ITensor *const *inputs,
    int num_inputs,
796 797 798 799 800
    plugin::PluginTensorRTV2Ext *plugin) {
  owned_plugin_v2ext_.emplace_back(plugin);
  return network()->addPluginV2(inputs, num_inputs, *plugin);
}

801
nvinfer1::IPluginV2Layer *TensorRTEngine::AddPluginV2IOExt(
802 803
    nvinfer1::ITensor *const *inputs,
    int num_inputs,
804 805 806 807 808
    nvinfer1::IPluginV2IOExt *plugin) {
  owned_plugin_v2ioext_.emplace_back(plugin);
  return network()->addPluginV2(inputs, num_inputs, *plugin);
}

N
nhzlx 已提交
809 810 811
void TensorRTEngine::freshDeviceId() {
  int count;
  cudaGetDeviceCount(&count);
812 813
  PADDLE_ENFORCE_LT(device_id_,
                    count,
814 815
                    platform::errors::OutOfRange(
                        "Device id %d exceeds the current device count: %d.",
816 817
                        device_id_,
                        count));
L
Leo Chen 已提交
818
  platform::SetDeviceId(device_id_);
N
nhzlx 已提交
819 820
}

821 822 823 824 825
void TensorRTEngine::GetEngineInfo() {
#if IS_TRT_VERSION_GE(8200)
  LOG(INFO) << "====== engine info ======";
  std::unique_ptr<nvinfer1::IEngineInspector> infer_inspector(
      infer_engine_->createEngineInspector());
826 827
  auto *infer_context = context();
  infer_inspector->setExecutionContext(infer_context);
828 829 830 831 832 833 834 835
  LOG(INFO) << infer_inspector->getEngineInformation(
      nvinfer1::LayerInformationFormat::kONELINE);
  LOG(INFO) << "====== engine info end ======";
#else
  LOG(INFO) << "Inspector needs TensorRT version 8.2 and after.";
#endif
}

Y
Yan Chunwei 已提交
836 837 838
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle