engine.h 30.3 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <NvInfer.h>
18

19
#include <cstdint>
20
#include <map>
Y
Yan Chunwei 已提交
21
#include <memory>
22
#include <mutex>  // NOLINT
23
#include <string>
Y
Yan Chunwei 已提交
24
#include <unordered_map>
25
#include <unordered_set>
26
#include <utility>
27
#include <vector>
28 29
#include "NvInferRuntimeCommon.h"
#include "paddle/fluid/framework/lod_tensor.h"
30
#include "paddle/fluid/framework/scope.h"
N
nhzlx 已提交
31
#include "paddle/fluid/framework/tensor.h"
32
#include "paddle/fluid/framework/tensor_util.h"
Z
Zhaolong Xing 已提交
33
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
Y
Yan Chunwei 已提交
34 35
#include "paddle/fluid/inference/engine.h"
#include "paddle/fluid/inference/tensorrt/helper.h"
36
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"
N
nhzlx 已提交
37
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
38
#include "paddle/fluid/inference/utils/singleton.h"
39
#include "paddle/fluid/platform/enforce.h"
40
#include "paddle/phi/common/data_type.h"
41 42
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/stream.h"
43
#include "paddle/utils/any.h"
Y
Yan Chunwei 已提交
44

45 46
DECLARE_bool(trt_ibuilder_cache);

Y
Yan Chunwei 已提交
47 48 49 50
namespace paddle {
namespace inference {
namespace tensorrt {

W
wanghuancoder 已提交
51 52 53 54
namespace plugin {
class PluginTensorRT;
}  // namespace plugin

55 56 57 58 59 60 61 62 63 64
using FluidDT = framework::proto::VarType_Type;
using TRT_DT = nvinfer1::DataType;

namespace {  // NOLINT

TRT_DT FluidDataType2TRT(FluidDT type) {
  switch (type) {
    case FluidDT::VarType_Type_FP32:
      return TRT_DT::kFLOAT;
    case FluidDT::VarType_Type_INT32:
65
    case FluidDT::VarType_Type_INT64:
66
      return TRT_DT::kINT32;
W
wenbin 已提交
67 68
    case FluidDT::VarType_Type_FP16:
      return TRT_DT::kHALF;
69 70 71 72
#if IS_TRT_VERSION_GE(8400)
    case FluidDT::VarType_Type_BOOL:
      return TRT_DT::kBOOL;
#endif
73
    default:
74 75
      PADDLE_THROW(platform::errors::InvalidArgument(
          "unknown fluid datatype in TRT op converter"));
76 77 78 79 80 81
  }
  return TRT_DT::kINT32;
}

// The T can be int32 or int64 type.
template <typename T>
82 83
nvinfer1::Dims Vec2TRT_Dims(const std::vector<T>& shape,
                            std::string input,
84
                            bool with_dynamic_shape = false) {
85 86
  PADDLE_ENFORCE_GT(shape.size(),
                    0UL,
87
                    platform::errors::InvalidArgument(
88
                        "TensorRT's tensor input requires at least 1 "
89
                        "dimensions, but input %s has %d dims.",
90 91
                        input,
                        shape.size()));
W
wenbin 已提交
92

93 94 95 96 97 98 99 100 101 102 103 104 105
  auto ShapeStr = [](const std::vector<T>& shape) {
    std::ostringstream os;
    os << "[";
    for (size_t i = 0; i < shape.size(); ++i) {
      if (i == shape.size() - 1) {
        os << shape[i];
      } else {
        os << shape[i] << ",";
      }
    }
    os << "]";
    return os.str();
  };
106 107
  if (!with_dynamic_shape) {
    if (shape.size() == 4UL) {
108 109 110 111
      if (shape[2] == -1 || shape[3] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
112 113
            input,
            ShapeStr(shape)));
114
      }
115
      return nvinfer1::Dims3(shape[1], shape[2], shape[3]);
W
wenbin 已提交
116 117 118 119 120
    } else if (shape.size() == 5UL) {
      if (shape[2] == -1 || shape[3] == -1 || shape[4] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
121 122
            input,
            ShapeStr(shape)));
W
wenbin 已提交
123 124
      }
      return nvinfer1::Dims4(shape[1], shape[2], shape[3], shape[4]);
125
    } else if (shape.size() == 3UL) {
126 127 128 129
      if (shape[1] == -1 || shape[2] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
130 131
            input,
            ShapeStr(shape)));
132
      }
133
      return nvinfer1::Dims2(shape[1], shape[2]);
134 135 136 137 138
    } else if (shape.size() == 2UL) {
      if (shape[1] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
139 140
            input,
            ShapeStr(shape)));
141 142 143 144 145
      }
      nvinfer1::Dims dims;
      dims.nbDims = 1;
      dims.d[0] = shape[1];
      return dims;
146
    }
147
    // static shape doesn't support 1D op so far.
148 149
    PADDLE_ENFORCE_NE(shape.size(),
                      1UL,
150 151 152
                      platform::errors::InvalidArgument(
                          "The input [%s] shape of trt subgraph is %s."
                          "it's not supported by trt so far",
153 154
                          input,
                          ShapeStr(shape)));
155 156 157 158 159 160 161

    nvinfer1::Dims dims;
    dims.nbDims = shape.size() - 1;
    for (size_t i = 1; i < shape.size(); i++) {
      dims.d[i - 1] = shape[i];
    }
    return dims;
162 163
  } else {
    if (shape.size() == 4UL) {
164
      return nvinfer1::Dims4(shape[0], shape[1], shape[2], shape[3]);
165 166 167
    } else if (shape.size() == 3UL) {
      return nvinfer1::Dims3(shape[0], shape[1], shape[2]);
    }
168 169 170 171 172 173
    nvinfer1::Dims dims;
    dims.nbDims = shape.size();
    for (size_t i = 0; i < shape.size(); i++) {
      dims.d[i] = shape[i];
    }
    return dims;
174 175
  }
}
176
}  // namespace
177

N
nhzlx 已提交
178
class TRTInt8Calibrator;
W
wanghuancoder 已提交
179

Y
Yan Chunwei 已提交
180 181 182
/*
 * TensorRT Engine.
 *
183
 * There are two alternative ways to use it, one is to build from a paddle
184
 * protobuf model, another way is to manually construct the network.
Y
Yan Chunwei 已提交
185
 */
186 187
class TensorRTEngine {
  using DescType = ::paddle::framework::proto::BlockDesc;
188
  using ShapeMapType = std::map<std::string, std::vector<int>>;
189
  using PredictorID = int;
190

Y
Yan Chunwei 已提交
191 192 193 194
 public:
  // Weight is model parameter.
  class Weight {
   public:
195
    Weight() = default;
196
    Weight(nvinfer1::DataType dtype, void* value, size_t num_elem) {
Y
Yan Chunwei 已提交
197 198 199 200
      w_.type = dtype;
      w_.values = value;
      w_.count = num_elem;
    }
201
    const nvinfer1::Weights& get() { return w_; }
Y
Yan Chunwei 已提交
202

203 204 205 206 207 208 209 210
    void SetDataType(nvinfer1::DataType type) { w_.type = type; }

    void SetDataType(phi::DataType type);

    void SetValues(const void* values) { w_.values = values; }

    void SetCount(int64_t num) { w_.count = num; }

211 212
    std::vector<int64_t> dims;

Y
Yan Chunwei 已提交
213 214 215 216
   private:
    nvinfer1::Weights w_;
  };

Z
Zhaolong Xing 已提交
217
  TensorRTEngine(
218
      int max_batch,
219
      int64_t max_workspace,
Z
Zhaolong Xing 已提交
220
      AnalysisConfig::Precision precision = AnalysisConfig::Precision::kFloat32,
221 222
      TRTInt8Calibrator* calibrator = nullptr,
      int device_id = 0,
223 224 225
      const ShapeMapType min_input_shape = {},
      const ShapeMapType max_input_shape = {},
      const ShapeMapType optim_input_shape = {},
226 227 228
      const ShapeMapType min_shape_tensor = {},
      const ShapeMapType max_shape_tensor = {},
      const ShapeMapType optim_shape_tensor = {},
229
      bool disable_trt_plugin_fp16 = false,
230
      phi::DataType model_precision = phi::DataType::FLOAT32,
Z
Zhaolong Xing 已提交
231
      nvinfer1::ILogger& logger = NaiveLogger::Global())
Y
Yan Chunwei 已提交
232 233
      : max_batch_(max_batch),
        max_workspace_(max_workspace),
Z
Zhaolong Xing 已提交
234
        precision_(precision),
N
nhzlx 已提交
235
        calibrator_(calibrator),
N
nhzlx 已提交
236
        device_id_(device_id),
237 238 239
        min_input_shape_(min_input_shape),
        max_input_shape_(max_input_shape),
        optim_input_shape_(optim_input_shape),
240 241 242
        min_shape_tensor_(min_shape_tensor),
        max_shape_tensor_(max_shape_tensor),
        optim_shape_tensor_(optim_shape_tensor),
243
        disable_trt_plugin_fp16_(disable_trt_plugin_fp16),
244
        model_precision_(model_precision),
245 246 247 248
        logger_(logger) {
    if (min_input_shape_.size() != 0 && max_input_shape_.size() != 0 &&
        optim_input_shape_.size() != 0) {
      PADDLE_ENFORCE_EQ(
249 250
          min_input_shape_.size(),
          max_input_shape_.size(),
251 252 253
          platform::errors::InvalidArgument(
              "The min_input_shape_'s size(%d) should be equal to the "
              "size(%d) of max_input_shape_",
254 255
              min_input_shape_.size(),
              max_input_shape_.size()));
256
      PADDLE_ENFORCE_EQ(
257 258
          min_input_shape_.size(),
          optim_input_shape_.size(),
259 260 261
          platform::errors::InvalidArgument(
              "The min_input_shape_'s size(%d) should be equal to the "
              "size(%d) of optim_input_shape_",
262 263
              min_input_shape_.size(),
              optim_input_shape_.size()));
264 265 266 267 268 269 270
#if IS_TRT_VERSION_GE(6000)
      with_dynamic_shape_ = true;
#else
      LOG(WARNING) << "Using dynamic shape of TRT need ensure that the TRT "
                      "version should be at least 6.";
#endif
    }
271
    dy::initLibNvInferPlugins(&logger, "");
272
  }
Y
Yan Chunwei 已提交
273

274 275 276 277 278 279 280 281 282
  ~TensorRTEngine() {
    for (auto& attr : attrs_) {
      if (attr_dels_.find(attr.first) != attr_dels_.end()) {
        attr_dels_[attr.first]();
      }
    }
    attrs_.clear();
    attr_dels_.clear();
  }
Y
Yan Chunwei 已提交
283

284
  // Add an input and set its name, data type and dimension.
Y
Yan Chunwei 已提交
285 286 287 288 289
  nvinfer1::ITensor* DeclareInput(const std::string& name,
                                  nvinfer1::DataType dtype,
                                  const nvinfer1::Dims& dim);
  // Set the offset-th output from a layer as the network's output, and set its
  // name.
290 291
  void DeclareOutput(const nvinfer1::ILayer* layer,
                     int offset,
Y
Yan Chunwei 已提交
292
                     const std::string& name);
L
Luo Tao 已提交
293 294
  // Set the itensor_map_[name] as the network's output, and set its name.
  void DeclareOutput(const std::string& name);
295 296 297
  // Set the itensor_map_[name] as the network's output, and set its name and
  // data type.
  void DeclareOutput(const std::string& name, nvinfer1::DataType dtype);
298
  void ClearTensorMap() { itensor_map_.clear(); }
Y
Yan Chunwei 已提交
299

300
  void DeleteITensor(const std::string& name, nvinfer1::ITensor* tensor);
L
Luo Tao 已提交
301 302
  void SetITensor(const std::string& name, nvinfer1::ITensor* tensor);
  // Get an ITensor called name.
303 304 305
  nvinfer1::ITensor* GetITensor(const std::string& name, bool scalar = false);
  nvinfer1::ITensor* ConvertWeight2ITensor(const std::string& name,
                                           bool scalar = false);
306
  std::unordered_map<std::string, nvinfer1::ITensor*>* GetITensorMap();
Y
Yan Chunwei 已提交
307 308

  nvinfer1::ICudaEngine* engine() { return infer_engine_.get(); }
309
  nvinfer1::IExecutionContext* context();
W
wenbin 已提交
310 311 312 313

  int GetProfileIndex() {
    if (max_profile_num_ > 1) {
      std::unique_lock<std::mutex> lock(mutex_);
314
      return profile_index_[predictor_id_per_thread];
W
wenbin 已提交
315 316 317 318 319 320 321 322 323 324 325
    } else {
      return 0;
    }
  }

  int GetBindingsOffset() {
    return (binding_num_ / max_profile_num_) * GetProfileIndex();
  }

  int GetNbBindings() { return binding_num_; }

326 327 328 329 330
  void ResetContext() {
    PADDLE_ENFORCE_NOT_NULL(
        infer_engine_,
        platform::errors::InvalidArgument(
            "You should build engine first and then set the context."));
331 332 333
    std::unique_lock<std::mutex> lock(mutex_);
    infer_context_[predictor_id_per_thread].reset(nullptr);
    infer_context_.erase(predictor_id_per_thread);
334
    cur_profile_num_ = 0;
335
  }
N
nhzlx 已提交
336 337

  nvinfer1::IHostMemory* Serialize() {
338 339 340 341
    PADDLE_ENFORCE_NOT_NULL(
        infer_engine_,
        platform::errors::InvalidArgument(
            "The TensorRT engine must be built first before serialization"));
Z
zlsh80826 已提交
342
#if IS_TRT_VERSION_LT(8000)
N
nhzlx 已提交
343
    ihost_memory_.reset(infer_engine_->serialize());
Z
zlsh80826 已提交
344 345 346 347 348 349
#else
    PADDLE_ENFORCE_NOT_NULL(
        ihost_memory_,
        platform::errors::InvalidArgument(
            "TensorRT >= 8.0 requires that buildSerializedNetwork is called"));
#endif
N
nhzlx 已提交
350 351 352
    return ihost_memory_.get();
  }

353
  void Deserialize(const std::string& engine_serialized_data);
N
nhzlx 已提交
354

355 356
  void SetRuntimeBatch(size_t batch_size);
  int GetRuntimeBatch();
357 358 359 360 361 362 363

  bool WithFp16() {
    bool enable_fp16 = (precision_ == AnalysisConfig::Precision::kHalf);
    bool support_fp16 = infer_builder_->platformHasFastFp16();
    return enable_fp16 && support_fp16;
  }

N
nhzlx 已提交
364
  int GetDeviceId() { return device_id_; }
365

366
  nvinfer1::IPluginV2Layer* AddPlugin(nvinfer1::ITensor* const* inputs,
367 368
                                      int num_inputs,
                                      plugin::PluginTensorRT*);
369 370 371 372 373

  nvinfer1::IPluginV2Layer* AddPluginV2Ext(nvinfer1::ITensor* const* inputs,
                                           int num_inputs,
                                           plugin::PluginTensorRTV2Ext* plugin);

374 375 376 377
  nvinfer1::IPluginV2Layer* AddPluginV2IOExt(nvinfer1::ITensor* const* inputs,
                                             int num_inputs,
                                             nvinfer1::IPluginV2IOExt* plugin);

378 379 380
  void SetTensorDynamicRange(nvinfer1::ITensor* tensor, float range) {
    quant_dynamic_range_[tensor] = range;
  }
381

382 383
  // Get fp16 trt weight. If src weight is not fp16, we will cast.
  Weight GetFp16TrtWeight(const std::string& name,
384
                          const phi::DenseTensor& weight_tensor);
385

386 387
  // Get fp32 trt weight. If src weight is not fp32, we will cast.
  Weight GetFp32TrtWeight(const std::string& name,
388
                          const phi::DenseTensor& weight_tensor);
389 390 391

  // if the src weight type is fp16, then return fp16 trt weight, etc.
  Weight GetTrtWeight(const std::string& name,
392
                      const phi::DenseTensor& weight_tensor);
393

394 395 396 397 398 399 400 401
  float GetTensorDynamicRange(nvinfer1::ITensor* tensor) {
    return quant_dynamic_range_[tensor];
  }

  bool DynamicRangeIsSet(nvinfer1::ITensor* tensor) {
    return quant_dynamic_range_.count(tensor);
  }

N
nhzlx 已提交
402 403 404 405 406
  // A pointer to CPU memory is needed of the TRT weight.
  // Before TRT runs, fluid loads weight into GPU storage.
  // so we need to copy the weights from GPU to CPU in our op converter.
  // We use a map to store these weights for the weight memory is not released
  // in advance, which affecting the construction of TRT Op.
407
  std::unordered_map<std::string /*name*/, std::unique_ptr<phi::DenseTensor>>
N
nhzlx 已提交
408
      weight_map;
Y
Yan Chunwei 已提交
409

410 411 412
  // When setting weight_map, a self-increasing suffix is needed for the names
  // so as to avoid repeatedly setting weights with the same name.
  void SetWeights(std::string w_name,
413
                  std::unique_ptr<phi::DenseTensor> w_tensor) {
414 415
    static int suffix_counter = 0;
    std::string suffix = std::to_string(suffix_counter);
P
Pei Yang 已提交
416
    std::string splitter = "__";
417 418 419 420 421 422 423 424
    std::string name_with_suffix = w_name + splitter + suffix;
    PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                      0,
                      platform::errors::AlreadyExists(
                          "The weight named %s is set into the weight map "
                          "twice in TRT OP converter.",
                          name_with_suffix));
    weight_map[name_with_suffix] = std::move(w_tensor);
425 426 427
    suffix_counter += 1;
  }

428
  void SetUseOSS(bool use_varseqlen) { use_varseqlen_ = use_varseqlen; }
429 430
  void SetUseDLA(bool use_dla) { use_dla_ = use_dla; }
  void SetDLACore(int dla_core) { dla_core_ = dla_core; }
431
  void SetWithErnie(bool with_ernie) { with_ernie_ = with_ernie; }
432 433 434
  void SetWithInterleaved(bool with_interleaved) {
    with_interleaved_ = with_interleaved;
  }
435 436 437 438 439 440
  void SetTransformerPosid(std::string tensorrt_transformer_posid) {
    tensorrt_transformer_posid_ = tensorrt_transformer_posid;
  }
  void SetTransformerMaskid(std::string tensorrt_transformer_maskid) {
    tensorrt_transformer_maskid_ = tensorrt_transformer_maskid;
  }
441 442 443 444 445 446
  void ClearWeights() {
    for (auto& weight_pair : weight_map) {
      weight_pair.second.reset(nullptr);
    }
  }

447 448 449 450 451 452 453
  // NOTE: The func bellow was modified to adapt the dynamic shape.
  // Initialize the inference network, so that TensorRT layers can add to this
  // network.
  void InitNetwork();
  // After finishing adding ops, freeze this network and creates the execution
  // environment.
  void FreezeNetwork();
454 455
  void Execute(int batch_size,
               std::vector<void*>* buffers,
456 457
               cudaStream_t stream = nullptr);

458
  nvinfer1::INetworkDefinition* network() { return infer_network_.get(); }
459 460 461 462

  ShapeMapType min_input_shape() { return min_input_shape_; }
  ShapeMapType max_input_shape() { return max_input_shape_; }
  ShapeMapType optim_input_shape() { return optim_input_shape_; }
463 464 465
  ShapeMapType min_shape_tensor() { return min_shape_tensor_; }
  ShapeMapType max_shape_tensor() { return max_shape_tensor_; }
  ShapeMapType optim_shape_tensor() { return optim_shape_tensor_; }
466 467 468 469 470 471 472 473 474

  bool AdjustDynamicShapeRange(const ShapeMapType& runtime_input_shape,
                               std::vector<std::string>* changed) {
    bool ret = false;
    changed->clear();
    for (const auto& it : runtime_input_shape) {
      auto name = it.first;
      auto input_shape = it.second;
      PADDLE_ENFORCE_EQ(
475 476
          min_input_shape_.count(name),
          true,
477 478
          platform::errors::InvalidArgument(
              "TRT dynamic_shape min_input_shape %s not found.", name));
479 480
      PADDLE_ENFORCE_EQ(min_input_shape_[name].size(),
                        input_shape.size(),
481 482 483 484
                        platform::errors::InvalidArgument(
                            "TRT dynamic_shape min_input_shape %s size not "
                            "equal, the min_input_shape[%s].size()=%d"
                            ", but the runtime_input_shape[%s].size()=%d.",
485 486 487 488
                            name,
                            name,
                            min_input_shape_[name].size(),
                            name,
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
                            input_shape.size()));
      auto bak_min_shape = min_input_shape_[name];
      auto bak_max_shape = max_input_shape_[name];
      bool min_change = false;
      bool max_change = false;
      for (size_t d = 0; d < input_shape.size(); ++d) {
        if (input_shape[d] < min_input_shape_[name][d]) {
          ret = true;
          min_change = true;
          min_input_shape_[name][d] = input_shape[d];
        }
        if (input_shape[d] > max_input_shape_[name][d]) {
          ret = true;
          max_change = true;
          max_input_shape_[name][d] = input_shape[d];
        }
      }

      if (min_change)
        LOG(INFO) << "refactor shape range: " << name << ", min_shape from "
                  << Vec2Str(bak_min_shape) << " to "
                  << Vec2Str(min_input_shape_[name]);
      if (max_change)
        LOG(INFO) << "refactor shape range: " << name << ", max_shape from "
                  << Vec2Str(bak_max_shape) << " to "
                  << Vec2Str(max_input_shape_[name]);
      if (min_change || max_change) changed->push_back(name);
    }
    return ret;
  }

520
  bool use_varseqlen() { return use_varseqlen_; }
521
  bool with_ernie() { return with_ernie_; }
522
  bool with_interleaved() { return with_interleaved_; }
523 524 525 526 527 528
  std::string tensorrt_transformer_posid() {
    return tensorrt_transformer_posid_;
  }
  std::string tensorrt_transformer_maskid() {
    return tensorrt_transformer_maskid_;
  }
529
  bool disable_trt_plugin_fp16() { return disable_trt_plugin_fp16_; }
530
  bool with_dynamic_shape() { return with_dynamic_shape_; }
531
  AnalysisConfig::Precision precision() { return precision_; }
532

533
#if IS_TRT_VERSION_GE(6000)
534
  nvinfer1::IPluginV2Layer* AddDynamicPlugin(
535 536
      nvinfer1::ITensor* const* inputs,
      int num_inputs,
537
      plugin::DynamicPluginTensorRT* plugin) {
538 539 540 541 542
    owned_pluginv2_.emplace_back(plugin);
    return network()->addPluginV2(inputs, num_inputs, *plugin);
  }
#endif

543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
  bool Has(const std::string& attr_name) const {
    return attrs_.count(attr_name) > 0;
  }

  void Erase(const std::string& attr_name) {
    if (!Has(attr_name)) {
      return;
    }
    if (attr_dels_.find(attr_name) != attr_dels_.end()) {
      attr_dels_[attr_name]();
      attr_dels_.erase(attr_name);
    }
    attrs_.erase(attr_name);
  }

  // Set a pointer to the attribute. Engine takes ownership of the attribute.
  template <typename AttrType>
  void Set(const std::string& attr_name, AttrType* attr) {
    if (attrs_.count(attr_name) == 0) {
      PADDLE_ENFORCE_EQ(
563 564
          attrs_.count(attr_name),
          0,
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
          platform::errors::AlreadyExists(
              "Attribute %s already set in trt engine.", attr_name));
    } else {
      VLOG(3) << "Setting the attribute " << attr_name << " for trt engine "
              << this;
    }
    attrs_[attr_name] = attr;
    attr_dels_[attr_name] = [attr, attr_name]() {
      VLOG(3) << "deleting " << attr_name;
      delete attr;
    };
  }

  // Set a pointer to the attribute. Engine doesn't take ownership. Caller
  // should delete the attribute.
  template <typename AttrType>
  void SetNotOwned(const std::string& attr_name, AttrType* attr) {
    PADDLE_ENFORCE_EQ(
583 584
        attrs_.count(attr_name),
        0,
585 586 587 588 589 590 591 592
        platform::errors::AlreadyExists(
            "Attribute %s already set in trt engine.", attr_name));
    attrs_[attr_name] = attr;
  }

  // Get a reference to the attributed previously set.
  template <typename AttrType>
  AttrType& Get(const std::string& attr_name) const {
593 594
    PADDLE_ENFORCE_NE(attrs_.find(attr_name),
                      attrs_.end(),
595 596 597
                      platform::errors::InvalidArgument(
                          "Attribute %s not found in trt engine.", attr_name));
    try {
598 599
      return *paddle::any_cast<AttrType*>(attrs_.at(attr_name));
    } catch (paddle::bad_any_cast&) {
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
      auto TypeToString = [](const std::type_info& info) -> std::string {
        if (std::type_index(info) == std::type_index(typeid(bool*))) {
          return "bool";
        } else if (std::type_index(info) == std::type_index(typeid(int*))) {
          return "int";
        } else if (std::type_index(info) ==
                   std::type_index(typeid(const int*))) {
          return "const int";
        } else if (std::type_index(info) ==
                   std::type_index(typeid(std::string*))) {
          return "std::string";
        }
        return info.name();
      };

      PADDLE_THROW(platform::errors::InvalidArgument(
616 617
          "Invalid type for attritube %s, expected: %s, actual: %s.",
          attr_name,
618 619 620 621 622
          TypeToString(typeid(AttrType*)),
          TypeToString(attrs_.at(attr_name).type())));
    }
  }

W
wenbin 已提交
623
  void SetProfileNum(int num) { max_profile_num_ = num; }
624 625 626 627

  void GetEngineInfo();

  void SetUseInspector(bool use_inspector) { use_inspector_ = use_inspector; }
628
  void SetScope(const framework::Scope& scope) { scope_ = &scope; }
629

630 631 632 633
  void SetContextMemorySharing(bool context_memory_sharing) {
    context_memory_sharing_ = context_memory_sharing;
  }

Y
Yan Chunwei 已提交
634
 private:
N
nhzlx 已提交
635 636 637 638
  // Each ICudaEngine object is bound to a specific GPU when it is instantiated,
  // ensure that the thread is associated with the correct device by calling
  // freshDeviceId().
  void freshDeviceId();
639 640
  // Used for convert weight into Itensor
  const framework::Scope* scope_;
N
nhzlx 已提交
641

Y
Yan Chunwei 已提交
642 643
  // the max batch size
  int max_batch_;
644 645
  // the runtime batch size
  static int runtime_batch_;
Y
Yan Chunwei 已提交
646
  // the max memory size the engine uses
647
  int64_t max_workspace_;
648

Z
Zhaolong Xing 已提交
649
  AnalysisConfig::Precision precision_;
N
nhzlx 已提交
650 651 652
  TRTInt8Calibrator* calibrator_;
  // batch size of the current data, will be updated each Executation.
  int batch_size_{-1};
N
nhzlx 已提交
653

654 655 656
  // use for engine context memory sharing
  bool context_memory_sharing_{false};

N
nhzlx 已提交
657
  int device_id_;
W
wenbin 已提交
658 659
  int max_profile_num_{1};
  int cur_profile_num_{0};
660
  std::unordered_map<PredictorID, int> profile_index_;
661 662 663
  ShapeMapType min_input_shape_;
  ShapeMapType max_input_shape_;
  ShapeMapType optim_input_shape_;
664 665 666
  ShapeMapType min_shape_tensor_;
  ShapeMapType max_shape_tensor_;
  ShapeMapType optim_shape_tensor_;
667
  bool disable_trt_plugin_fp16_{false};
668
  phi::DataType model_precision_{phi::DataType::FLOAT32};
669
  bool use_varseqlen_{false};
670 671
  bool use_dla_{false};
  int dla_core_{0};
672
  bool with_ernie_{false};
673
  bool with_interleaved_{false};
674 675
  std::string tensorrt_transformer_posid_;
  std::string tensorrt_transformer_maskid_;
Y
Yan Chunwei 已提交
676 677 678
  nvinfer1::ILogger& logger_;

  // max data size for the buffers.
L
Luo Tao 已提交
679 680
  std::unordered_map<std::string /*name*/, nvinfer1::ITensor* /*ITensor*/>
      itensor_map_;
681

682
  std::vector<std::unique_ptr<plugin::PluginTensorRT>> owned_plugin_;
683
  std::vector<std::unique_ptr<plugin::PluginTensorRTV2Ext>> owned_plugin_v2ext_;
684
  std::vector<std::unique_ptr<nvinfer1::IPluginV2IOExt>> owned_plugin_v2ioext_;
Y
Yan Chunwei 已提交
685 686 687 688 689

  // TensorRT related internal members
  infer_ptr<nvinfer1::IBuilder> infer_builder_;
  infer_ptr<nvinfer1::INetworkDefinition> infer_network_;
  infer_ptr<nvinfer1::ICudaEngine> infer_engine_;
690
  std::unordered_map<PredictorID, infer_ptr<nvinfer1::IExecutionContext>>
691
      infer_context_;
N
nhzlx 已提交
692
  infer_ptr<nvinfer1::IHostMemory> ihost_memory_;
693
  std::unordered_map<nvinfer1::ITensor*, float> quant_dynamic_range_;
694

695
  std::unordered_map<std::string, paddle::any> attrs_;
696 697
  std::unordered_map<std::string, std::function<void(void)>> attr_dels_;

698 699 700
  // For dynamic shape
  bool with_dynamic_shape_{false};
#if IS_TRT_VERSION_GE(6000)
W
wenbin 已提交
701
  int binding_num_;
702
  infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_;
W
wenbin 已提交
703
  std::vector<nvinfer1::IOptimizationProfile*> optim_profiles_;
704
  std::vector<std::unique_ptr<plugin::DynamicPluginTensorRT>> owned_pluginv2_;
705
#endif
706
  std::mutex mutex_;
707
  bool use_inspector_;
708 709 710

 public:
  thread_local static int predictor_id_per_thread;
Y
Yan Chunwei 已提交
711 712
};  // class TensorRTEngine

713
// Add a layer__ into engine__ with args ARGS.
Y
Yan Chunwei 已提交
714 715 716 717 718 719 720 721 722
// For example:
//
// Reference
// https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#charRNN_define_network
//
// will add a fully connected layer into the engine.
// TensorRT has too many layers, so that is not wise to add member functions for
// them, and an macro like this is more extensible when underlying TensorRT
// library add new layer supports.
723
#define TRT_ENGINE_ADD_LAYER(engine__, layer__, ...) \
Z
zhoutianzi666 已提交
724
  engine__->network()->add##layer__(__VA_ARGS__)
Y
Yan Chunwei 已提交
725

726
class TRTEngineManager {
727 728 729
  using PredictorID = int;
  using AllocationPtr = phi::Allocator::AllocationPtr;

730
 public:
731 732 733 734 735 736 737 738 739
  TRTEngineManager() {
    // createInferBuilder loads trt kernels and take a few second
    // But as long as one IBuilder lives, trt kernel will not be unloaded
    // Hence, a persistent IBuilder to avoid TensorRT unload/reload kernels
    if (FLAGS_trt_ibuilder_cache) {
      holder_.reset(createInferBuilder(&NaiveLogger::Global()));
    }
  }

740 741 742 743 744
  bool Empty() const {
    std::lock_guard<std::mutex> lock(mutex_);
    return engines_.size() == 0;
  }

745
  bool Has(const std::string& name) const {
746
    std::lock_guard<std::mutex> lock(mutex_);
747 748 749 750 751
    if (engines_.count(name) == 0) return false;
    return engines_.at(name).get() != nullptr;
  }

  TensorRTEngine* Get(const std::string& name) const {
752
    std::lock_guard<std::mutex> lock(mutex_);
753 754 755
    return engines_.at(name).get();
  }

Z
Zhaolong Xing 已提交
756
  TensorRTEngine* Create(
757 758
      std::string name,
      int max_batch,
759
      int64_t max_workspace,
Z
Zhaolong Xing 已提交
760
      AnalysisConfig::Precision precision = AnalysisConfig::Precision::kFloat32,
761 762
      TRTInt8Calibrator* calibrator = nullptr,
      int device_id = 0,
763 764 765
      const std::map<std::string, std::vector<int>> min_input_shape = {},
      const std::map<std::string, std::vector<int>> max_input_shape = {},
      const std::map<std::string, std::vector<int>> optim_input_shape = {},
766 767 768
      const std::map<std::string, std::vector<int>> min_shape_tensor = {},
      const std::map<std::string, std::vector<int>> max_shape_tensor = {},
      const std::map<std::string, std::vector<int>> optim_shape_tensor = {},
769
      bool disable_trt_plugin_fp16 = false,
770
      phi::DataType model_precision = phi::DataType::FLOAT32,
Z
Zhaolong Xing 已提交
771
      nvinfer1::ILogger& logger = NaiveLogger::Global()) {
772 773 774 775 776 777 778 779
    auto* p = new TensorRTEngine(max_batch,
                                 max_workspace,
                                 precision,
                                 calibrator,
                                 device_id,
                                 min_input_shape,
                                 max_input_shape,
                                 optim_input_shape,
780 781 782
                                 min_shape_tensor,
                                 max_shape_tensor,
                                 optim_shape_tensor,
783
                                 disable_trt_plugin_fp16,
784
                                 model_precision,
785
                                 logger);
786
    std::lock_guard<std::mutex> lock(mutex_);
787 788 789 790 791
    engines_[name].reset(p);
    return p;
  }

  void DeleteAll() {
792
    std::lock_guard<std::mutex> lock(mutex_);
793 794 795
    for (auto& item : engines_) {
      item.second.reset(nullptr);
    }
796
    engines_.clear();
797 798
  }

W
Wilber 已提交
799
  void DeleteKey(const std::string& key) {
800
    std::lock_guard<std::mutex> lock(mutex_);
W
Wilber 已提交
801 802 803 804 805 806 807
    auto iter = engines_.find(key);
    if (iter != engines_.end()) {
      iter->second.reset(nullptr);
      engines_.erase(iter);
    }
  }

808
  void updateContextMemorySize(size_t mem_size, PredictorID predictor_id) {
Y
Yuanle Liu 已提交
809 810 811
    VLOG(3) << "TensorRT engine context memory size is "
            << mem_size / 1024.0 / 1024.0 << "MiB in predictor id "
            << predictor_id;
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
    bool size_updated{false};

    {
      std::lock_guard<std::mutex> lock(mutex_);
      if (max_ctx_mem_size_ < mem_size) {
        max_ctx_mem_size_ = mem_size;
        size_updated = true;
      }
    }

    if (size_updated) {
      releaseContextMemory(predictor_id);
    }
  }

  void* getContextMemory(PredictorID predictor_id,
                         const phi::GPUPlace& place,
                         const phi::Stream& stream) {
    std::lock_guard<std::mutex> lock(mutex_);
    static auto alignment = getAlignmentSize(place);
    if (context_memorys_.count(predictor_id) == 0) {
      auto context_memory =
          memory::Alloc(place, max_ctx_mem_size_ + alignment, stream);
      context_memorys_[predictor_id] = std::move(context_memory);
    }
    return getAlignedMemory(context_memorys_[predictor_id]->ptr(), alignment);
  }

  void releaseContextMemory(PredictorID predictor_id) {
    std::lock_guard<std::mutex> lock(mutex_);
    if (context_memorys_.count(predictor_id)) {
      context_memorys_[predictor_id].reset(nullptr);
      context_memorys_.erase(predictor_id);
    }
  }

848
 private:
849 850 851 852 853 854 855 856 857 858 859 860
  size_t getAlignmentSize(const phi::GPUPlace& place) {
    const auto& prop = platform::GetDeviceProperties(place.GetDeviceId());
    return prop.textureAlignment;
  }

  void* getAlignedMemory(void* addr, size_t alignment) {
    return reinterpret_cast<void*>(uintptr_t(addr) & (~(alignment - 1)));
  }

  mutable std::mutex mutex_;
  size_t max_ctx_mem_size_{0};
  std::unordered_map<PredictorID, AllocationPtr> context_memorys_;
861
  std::unordered_map<std::string, std::unique_ptr<TensorRTEngine>> engines_;
862
  infer_ptr<nvinfer1::IBuilder> holder_;
863 864
};

Y
Yan Chunwei 已提交
865 866 867
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle