engine.h 29.6 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <NvInfer.h>
18

19
#include <cstdint>
20
#include <map>
Y
Yan Chunwei 已提交
21
#include <memory>
22
#include <mutex>  // NOLINT
23
#include <string>
Y
Yan Chunwei 已提交
24
#include <unordered_map>
25
#include <unordered_set>
26
#include <utility>
27
#include <vector>
28 29
#include "NvInferRuntimeCommon.h"
#include "paddle/fluid/framework/lod_tensor.h"
30
#include "paddle/fluid/framework/scope.h"
N
nhzlx 已提交
31
#include "paddle/fluid/framework/tensor.h"
32
#include "paddle/fluid/framework/tensor_util.h"
Z
Zhaolong Xing 已提交
33
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
Y
Yan Chunwei 已提交
34 35
#include "paddle/fluid/inference/engine.h"
#include "paddle/fluid/inference/tensorrt/helper.h"
36
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"
N
nhzlx 已提交
37
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
38
#include "paddle/fluid/inference/utils/singleton.h"
39
#include "paddle/fluid/platform/enforce.h"
40
#include "paddle/phi/common/data_type.h"
41 42
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/stream.h"
43
#include "paddle/utils/any.h"
Y
Yan Chunwei 已提交
44 45 46 47 48

namespace paddle {
namespace inference {
namespace tensorrt {

W
wanghuancoder 已提交
49 50 51 52
namespace plugin {
class PluginTensorRT;
}  // namespace plugin

53 54 55 56 57 58 59 60 61 62 63
using FluidDT = framework::proto::VarType_Type;
using TRT_DT = nvinfer1::DataType;

namespace {  // NOLINT

TRT_DT FluidDataType2TRT(FluidDT type) {
  switch (type) {
    case FluidDT::VarType_Type_FP32:
      return TRT_DT::kFLOAT;
    case FluidDT::VarType_Type_INT32:
      return TRT_DT::kINT32;
W
wenbin 已提交
64 65
    case FluidDT::VarType_Type_FP16:
      return TRT_DT::kHALF;
66 67 68 69 70 71 72 73 74 75
    default:
      return TRT_DT::kINT32;
  }
  PADDLE_THROW(platform::errors::InvalidArgument(
      "unknown fluid datatype in TRT op converter"));
  return TRT_DT::kINT32;
}

// The T can be int32 or int64 type.
template <typename T>
76 77
nvinfer1::Dims Vec2TRT_Dims(const std::vector<T>& shape,
                            std::string input,
78
                            bool with_dynamic_shape = false) {
79 80
  PADDLE_ENFORCE_GT(shape.size(),
                    0UL,
81
                    platform::errors::InvalidArgument(
82
                        "TensorRT's tensor input requires at least 1 "
83
                        "dimensions, but input %s has %d dims.",
84 85
                        input,
                        shape.size()));
W
wenbin 已提交
86

87 88 89 90 91 92 93 94 95 96 97 98 99
  auto ShapeStr = [](const std::vector<T>& shape) {
    std::ostringstream os;
    os << "[";
    for (size_t i = 0; i < shape.size(); ++i) {
      if (i == shape.size() - 1) {
        os << shape[i];
      } else {
        os << shape[i] << ",";
      }
    }
    os << "]";
    return os.str();
  };
100 101
  if (!with_dynamic_shape) {
    if (shape.size() == 4UL) {
102 103 104 105
      if (shape[2] == -1 || shape[3] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
106 107
            input,
            ShapeStr(shape)));
108
      }
109
      return nvinfer1::Dims3(shape[1], shape[2], shape[3]);
W
wenbin 已提交
110 111 112 113 114
    } else if (shape.size() == 5UL) {
      if (shape[2] == -1 || shape[3] == -1 || shape[4] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
115 116
            input,
            ShapeStr(shape)));
W
wenbin 已提交
117 118
      }
      return nvinfer1::Dims4(shape[1], shape[2], shape[3], shape[4]);
119
    } else if (shape.size() == 3UL) {
120 121 122 123
      if (shape[1] == -1 || shape[2] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
124 125
            input,
            ShapeStr(shape)));
126
      }
127
      return nvinfer1::Dims2(shape[1], shape[2]);
128 129 130 131 132
    } else if (shape.size() == 2UL) {
      if (shape[1] == -1) {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "The input [%s] shape of trt subgraph is %s, please enable "
            "trt dynamic_shape mode by SetTRTDynamicShapeInfo.",
133 134
            input,
            ShapeStr(shape)));
135 136 137 138 139
      }
      nvinfer1::Dims dims;
      dims.nbDims = 1;
      dims.d[0] = shape[1];
      return dims;
140
    }
141
    // static shape doesn't support 1D op so far.
142 143
    PADDLE_ENFORCE_NE(shape.size(),
                      1UL,
144 145 146
                      platform::errors::InvalidArgument(
                          "The input [%s] shape of trt subgraph is %s."
                          "it's not supported by trt so far",
147 148
                          input,
                          ShapeStr(shape)));
149 150 151 152 153 154 155

    nvinfer1::Dims dims;
    dims.nbDims = shape.size() - 1;
    for (size_t i = 1; i < shape.size(); i++) {
      dims.d[i - 1] = shape[i];
    }
    return dims;
156 157
  } else {
    if (shape.size() == 4UL) {
158
      return nvinfer1::Dims4(shape[0], shape[1], shape[2], shape[3]);
159 160 161
    } else if (shape.size() == 3UL) {
      return nvinfer1::Dims3(shape[0], shape[1], shape[2]);
    }
162 163 164 165 166 167
    nvinfer1::Dims dims;
    dims.nbDims = shape.size();
    for (size_t i = 0; i < shape.size(); i++) {
      dims.d[i] = shape[i];
    }
    return dims;
168 169
  }
}
170
}  // namespace
171

N
nhzlx 已提交
172
class TRTInt8Calibrator;
W
wanghuancoder 已提交
173

Y
Yan Chunwei 已提交
174 175 176
/*
 * TensorRT Engine.
 *
177
 * There are two alternative ways to use it, one is to build from a paddle
178
 * protobuf model, another way is to manually construct the network.
Y
Yan Chunwei 已提交
179
 */
180 181
class TensorRTEngine {
  using DescType = ::paddle::framework::proto::BlockDesc;
182
  using ShapeMapType = std::map<std::string, std::vector<int>>;
183
  using PredictorID = int;
184

Y
Yan Chunwei 已提交
185 186 187 188
 public:
  // Weight is model parameter.
  class Weight {
   public:
189
    Weight() = default;
190
    Weight(nvinfer1::DataType dtype, void* value, size_t num_elem) {
Y
Yan Chunwei 已提交
191 192 193 194
      w_.type = dtype;
      w_.values = value;
      w_.count = num_elem;
    }
195
    const nvinfer1::Weights& get() { return w_; }
Y
Yan Chunwei 已提交
196

197 198 199 200 201 202 203 204
    void SetDataType(nvinfer1::DataType type) { w_.type = type; }

    void SetDataType(phi::DataType type);

    void SetValues(const void* values) { w_.values = values; }

    void SetCount(int64_t num) { w_.count = num; }

205 206
    std::vector<int64_t> dims;

Y
Yan Chunwei 已提交
207 208 209 210
   private:
    nvinfer1::Weights w_;
  };

Z
Zhaolong Xing 已提交
211
  TensorRTEngine(
212
      int max_batch,
213
      int64_t max_workspace,
Z
Zhaolong Xing 已提交
214
      AnalysisConfig::Precision precision = AnalysisConfig::Precision::kFloat32,
215 216
      TRTInt8Calibrator* calibrator = nullptr,
      int device_id = 0,
217 218 219
      const ShapeMapType min_input_shape = {},
      const ShapeMapType max_input_shape = {},
      const ShapeMapType optim_input_shape = {},
220 221 222
      const ShapeMapType min_shape_tensor = {},
      const ShapeMapType max_shape_tensor = {},
      const ShapeMapType optim_shape_tensor = {},
223
      bool disable_trt_plugin_fp16 = false,
224
      phi::DataType model_precision = phi::DataType::FLOAT32,
Z
Zhaolong Xing 已提交
225
      nvinfer1::ILogger& logger = NaiveLogger::Global())
Y
Yan Chunwei 已提交
226 227
      : max_batch_(max_batch),
        max_workspace_(max_workspace),
Z
Zhaolong Xing 已提交
228
        precision_(precision),
N
nhzlx 已提交
229
        calibrator_(calibrator),
N
nhzlx 已提交
230
        device_id_(device_id),
231 232 233
        min_input_shape_(min_input_shape),
        max_input_shape_(max_input_shape),
        optim_input_shape_(optim_input_shape),
234 235 236
        min_shape_tensor_(min_shape_tensor),
        max_shape_tensor_(max_shape_tensor),
        optim_shape_tensor_(optim_shape_tensor),
237
        disable_trt_plugin_fp16_(disable_trt_plugin_fp16),
238
        model_precision_(model_precision),
239 240 241 242
        logger_(logger) {
    if (min_input_shape_.size() != 0 && max_input_shape_.size() != 0 &&
        optim_input_shape_.size() != 0) {
      PADDLE_ENFORCE_EQ(
243 244
          min_input_shape_.size(),
          max_input_shape_.size(),
245 246 247
          platform::errors::InvalidArgument(
              "The min_input_shape_'s size(%d) should be equal to the "
              "size(%d) of max_input_shape_",
248 249
              min_input_shape_.size(),
              max_input_shape_.size()));
250
      PADDLE_ENFORCE_EQ(
251 252
          min_input_shape_.size(),
          optim_input_shape_.size(),
253 254 255
          platform::errors::InvalidArgument(
              "The min_input_shape_'s size(%d) should be equal to the "
              "size(%d) of optim_input_shape_",
256 257
              min_input_shape_.size(),
              optim_input_shape_.size()));
258 259 260 261 262 263 264
#if IS_TRT_VERSION_GE(6000)
      with_dynamic_shape_ = true;
#else
      LOG(WARNING) << "Using dynamic shape of TRT need ensure that the TRT "
                      "version should be at least 6.";
#endif
    }
265
    dy::initLibNvInferPlugins(&logger, "");
266
  }
Y
Yan Chunwei 已提交
267

268 269 270 271 272 273 274 275 276
  ~TensorRTEngine() {
    for (auto& attr : attrs_) {
      if (attr_dels_.find(attr.first) != attr_dels_.end()) {
        attr_dels_[attr.first]();
      }
    }
    attrs_.clear();
    attr_dels_.clear();
  }
Y
Yan Chunwei 已提交
277

278
  // Add an input and set its name, data type and dimension.
Y
Yan Chunwei 已提交
279 280 281 282 283
  nvinfer1::ITensor* DeclareInput(const std::string& name,
                                  nvinfer1::DataType dtype,
                                  const nvinfer1::Dims& dim);
  // Set the offset-th output from a layer as the network's output, and set its
  // name.
284 285
  void DeclareOutput(const nvinfer1::ILayer* layer,
                     int offset,
Y
Yan Chunwei 已提交
286
                     const std::string& name);
L
Luo Tao 已提交
287 288
  // Set the itensor_map_[name] as the network's output, and set its name.
  void DeclareOutput(const std::string& name);
289
  void ClearTensorMap() { itensor_map_.clear(); }
Y
Yan Chunwei 已提交
290

291
  void DeleteITensor(const std::string& name, nvinfer1::ITensor* tensor);
L
Luo Tao 已提交
292 293 294
  void SetITensor(const std::string& name, nvinfer1::ITensor* tensor);
  // Get an ITensor called name.
  nvinfer1::ITensor* GetITensor(const std::string& name);
295
  nvinfer1::ITensor* ConvertWeight2ITensor(const std::string& name);
296
  std::unordered_map<std::string, nvinfer1::ITensor*>* GetITensorMap();
Y
Yan Chunwei 已提交
297 298

  nvinfer1::ICudaEngine* engine() { return infer_engine_.get(); }
299
  nvinfer1::IExecutionContext* context();
W
wenbin 已提交
300 301 302 303

  int GetProfileIndex() {
    if (max_profile_num_ > 1) {
      std::unique_lock<std::mutex> lock(mutex_);
304
      return profile_index_[predictor_id_per_thread];
W
wenbin 已提交
305 306 307 308 309 310 311 312 313 314 315
    } else {
      return 0;
    }
  }

  int GetBindingsOffset() {
    return (binding_num_ / max_profile_num_) * GetProfileIndex();
  }

  int GetNbBindings() { return binding_num_; }

316 317 318 319 320
  void ResetContext() {
    PADDLE_ENFORCE_NOT_NULL(
        infer_engine_,
        platform::errors::InvalidArgument(
            "You should build engine first and then set the context."));
321 322 323
    std::unique_lock<std::mutex> lock(mutex_);
    infer_context_[predictor_id_per_thread].reset(nullptr);
    infer_context_.erase(predictor_id_per_thread);
324
  }
N
nhzlx 已提交
325 326

  nvinfer1::IHostMemory* Serialize() {
327 328 329 330
    PADDLE_ENFORCE_NOT_NULL(
        infer_engine_,
        platform::errors::InvalidArgument(
            "The TensorRT engine must be built first before serialization"));
Z
zlsh80826 已提交
331
#if IS_TRT_VERSION_LT(8000)
N
nhzlx 已提交
332
    ihost_memory_.reset(infer_engine_->serialize());
Z
zlsh80826 已提交
333 334 335 336 337 338
#else
    PADDLE_ENFORCE_NOT_NULL(
        ihost_memory_,
        platform::errors::InvalidArgument(
            "TensorRT >= 8.0 requires that buildSerializedNetwork is called"));
#endif
N
nhzlx 已提交
339 340 341
    return ihost_memory_.get();
  }

342
  void Deserialize(const std::string& engine_serialized_data);
N
nhzlx 已提交
343

344 345
  void SetRuntimeBatch(size_t batch_size);
  int GetRuntimeBatch();
346 347 348 349 350 351 352

  bool WithFp16() {
    bool enable_fp16 = (precision_ == AnalysisConfig::Precision::kHalf);
    bool support_fp16 = infer_builder_->platformHasFastFp16();
    return enable_fp16 && support_fp16;
  }

N
nhzlx 已提交
353
  int GetDeviceId() { return device_id_; }
354

355
  nvinfer1::IPluginV2Layer* AddPlugin(nvinfer1::ITensor* const* inputs,
356 357
                                      int num_inputs,
                                      plugin::PluginTensorRT*);
358 359 360 361 362

  nvinfer1::IPluginV2Layer* AddPluginV2Ext(nvinfer1::ITensor* const* inputs,
                                           int num_inputs,
                                           plugin::PluginTensorRTV2Ext* plugin);

363 364 365 366
  nvinfer1::IPluginV2Layer* AddPluginV2IOExt(nvinfer1::ITensor* const* inputs,
                                             int num_inputs,
                                             nvinfer1::IPluginV2IOExt* plugin);

367 368 369
  void SetTensorDynamicRange(nvinfer1::ITensor* tensor, float range) {
    quant_dynamic_range_[tensor] = range;
  }
370

371 372
  // Get fp16 trt weight. If src weight is not fp16, we will cast.
  Weight GetFp16TrtWeight(const std::string& name,
373
                          const phi::DenseTensor& weight_tensor);
374

375 376
  // Get fp32 trt weight. If src weight is not fp32, we will cast.
  Weight GetFp32TrtWeight(const std::string& name,
377
                          const phi::DenseTensor& weight_tensor);
378 379 380

  // if the src weight type is fp16, then return fp16 trt weight, etc.
  Weight GetTrtWeight(const std::string& name,
381
                      const phi::DenseTensor& weight_tensor);
382

383 384 385 386 387 388 389 390
  float GetTensorDynamicRange(nvinfer1::ITensor* tensor) {
    return quant_dynamic_range_[tensor];
  }

  bool DynamicRangeIsSet(nvinfer1::ITensor* tensor) {
    return quant_dynamic_range_.count(tensor);
  }

N
nhzlx 已提交
391 392 393 394 395
  // A pointer to CPU memory is needed of the TRT weight.
  // Before TRT runs, fluid loads weight into GPU storage.
  // so we need to copy the weights from GPU to CPU in our op converter.
  // We use a map to store these weights for the weight memory is not released
  // in advance, which affecting the construction of TRT Op.
396
  std::unordered_map<std::string /*name*/, std::unique_ptr<phi::DenseTensor>>
N
nhzlx 已提交
397
      weight_map;
Y
Yan Chunwei 已提交
398

399 400 401
  // When setting weight_map, a self-increasing suffix is needed for the names
  // so as to avoid repeatedly setting weights with the same name.
  void SetWeights(std::string w_name,
402
                  std::unique_ptr<phi::DenseTensor> w_tensor) {
403 404
    static int suffix_counter = 0;
    std::string suffix = std::to_string(suffix_counter);
P
Pei Yang 已提交
405
    std::string splitter = "__";
406 407 408 409 410 411 412 413
    std::string name_with_suffix = w_name + splitter + suffix;
    PADDLE_ENFORCE_EQ(weight_map.count(name_with_suffix),
                      0,
                      platform::errors::AlreadyExists(
                          "The weight named %s is set into the weight map "
                          "twice in TRT OP converter.",
                          name_with_suffix));
    weight_map[name_with_suffix] = std::move(w_tensor);
414 415 416
    suffix_counter += 1;
  }

417
  void SetUseOSS(bool use_varseqlen) { use_varseqlen_ = use_varseqlen; }
418 419
  void SetUseDLA(bool use_dla) { use_dla_ = use_dla; }
  void SetDLACore(int dla_core) { dla_core_ = dla_core; }
420
  void SetWithErnie(bool with_ernie) { with_ernie_ = with_ernie; }
421 422 423
  void SetWithInterleaved(bool with_interleaved) {
    with_interleaved_ = with_interleaved;
  }
424 425 426 427 428 429
  void SetTransformerPosid(std::string tensorrt_transformer_posid) {
    tensorrt_transformer_posid_ = tensorrt_transformer_posid;
  }
  void SetTransformerMaskid(std::string tensorrt_transformer_maskid) {
    tensorrt_transformer_maskid_ = tensorrt_transformer_maskid;
  }
430 431 432 433 434 435
  void ClearWeights() {
    for (auto& weight_pair : weight_map) {
      weight_pair.second.reset(nullptr);
    }
  }

436 437 438 439 440 441 442
  // NOTE: The func bellow was modified to adapt the dynamic shape.
  // Initialize the inference network, so that TensorRT layers can add to this
  // network.
  void InitNetwork();
  // After finishing adding ops, freeze this network and creates the execution
  // environment.
  void FreezeNetwork();
443 444
  void Execute(int batch_size,
               std::vector<void*>* buffers,
445 446
               cudaStream_t stream = nullptr);

447
  nvinfer1::INetworkDefinition* network() { return infer_network_.get(); }
448 449 450 451

  ShapeMapType min_input_shape() { return min_input_shape_; }
  ShapeMapType max_input_shape() { return max_input_shape_; }
  ShapeMapType optim_input_shape() { return optim_input_shape_; }
452 453 454
  ShapeMapType min_shape_tensor() { return min_shape_tensor_; }
  ShapeMapType max_shape_tensor() { return max_shape_tensor_; }
  ShapeMapType optim_shape_tensor() { return optim_shape_tensor_; }
455 456 457 458 459 460 461 462 463

  bool AdjustDynamicShapeRange(const ShapeMapType& runtime_input_shape,
                               std::vector<std::string>* changed) {
    bool ret = false;
    changed->clear();
    for (const auto& it : runtime_input_shape) {
      auto name = it.first;
      auto input_shape = it.second;
      PADDLE_ENFORCE_EQ(
464 465
          min_input_shape_.count(name),
          true,
466 467
          platform::errors::InvalidArgument(
              "TRT dynamic_shape min_input_shape %s not found.", name));
468 469
      PADDLE_ENFORCE_EQ(min_input_shape_[name].size(),
                        input_shape.size(),
470 471 472 473
                        platform::errors::InvalidArgument(
                            "TRT dynamic_shape min_input_shape %s size not "
                            "equal, the min_input_shape[%s].size()=%d"
                            ", but the runtime_input_shape[%s].size()=%d.",
474 475 476 477
                            name,
                            name,
                            min_input_shape_[name].size(),
                            name,
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
                            input_shape.size()));
      auto bak_min_shape = min_input_shape_[name];
      auto bak_max_shape = max_input_shape_[name];
      bool min_change = false;
      bool max_change = false;
      for (size_t d = 0; d < input_shape.size(); ++d) {
        if (input_shape[d] < min_input_shape_[name][d]) {
          ret = true;
          min_change = true;
          min_input_shape_[name][d] = input_shape[d];
        }
        if (input_shape[d] > max_input_shape_[name][d]) {
          ret = true;
          max_change = true;
          max_input_shape_[name][d] = input_shape[d];
        }
      }

      if (min_change)
        LOG(INFO) << "refactor shape range: " << name << ", min_shape from "
                  << Vec2Str(bak_min_shape) << " to "
                  << Vec2Str(min_input_shape_[name]);
      if (max_change)
        LOG(INFO) << "refactor shape range: " << name << ", max_shape from "
                  << Vec2Str(bak_max_shape) << " to "
                  << Vec2Str(max_input_shape_[name]);
      if (min_change || max_change) changed->push_back(name);
    }
    return ret;
  }

509
  bool use_varseqlen() { return use_varseqlen_; }
510
  bool with_ernie() { return with_ernie_; }
511
  bool with_interleaved() { return with_interleaved_; }
512 513 514 515 516 517
  std::string tensorrt_transformer_posid() {
    return tensorrt_transformer_posid_;
  }
  std::string tensorrt_transformer_maskid() {
    return tensorrt_transformer_maskid_;
  }
518
  bool disable_trt_plugin_fp16() { return disable_trt_plugin_fp16_; }
519
  bool with_dynamic_shape() { return with_dynamic_shape_; }
520
  AnalysisConfig::Precision precision() { return precision_; }
521

522
#if IS_TRT_VERSION_GE(6000)
523
  nvinfer1::IPluginV2Layer* AddDynamicPlugin(
524 525
      nvinfer1::ITensor* const* inputs,
      int num_inputs,
526
      plugin::DynamicPluginTensorRT* plugin) {
527 528 529 530 531
    owned_pluginv2_.emplace_back(plugin);
    return network()->addPluginV2(inputs, num_inputs, *plugin);
  }
#endif

532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
  bool Has(const std::string& attr_name) const {
    return attrs_.count(attr_name) > 0;
  }

  void Erase(const std::string& attr_name) {
    if (!Has(attr_name)) {
      return;
    }
    if (attr_dels_.find(attr_name) != attr_dels_.end()) {
      attr_dels_[attr_name]();
      attr_dels_.erase(attr_name);
    }
    attrs_.erase(attr_name);
  }

  // Set a pointer to the attribute. Engine takes ownership of the attribute.
  template <typename AttrType>
  void Set(const std::string& attr_name, AttrType* attr) {
    if (attrs_.count(attr_name) == 0) {
      PADDLE_ENFORCE_EQ(
552 553
          attrs_.count(attr_name),
          0,
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
          platform::errors::AlreadyExists(
              "Attribute %s already set in trt engine.", attr_name));
    } else {
      VLOG(3) << "Setting the attribute " << attr_name << " for trt engine "
              << this;
    }
    attrs_[attr_name] = attr;
    attr_dels_[attr_name] = [attr, attr_name]() {
      VLOG(3) << "deleting " << attr_name;
      delete attr;
    };
  }

  // Set a pointer to the attribute. Engine doesn't take ownership. Caller
  // should delete the attribute.
  template <typename AttrType>
  void SetNotOwned(const std::string& attr_name, AttrType* attr) {
    PADDLE_ENFORCE_EQ(
572 573
        attrs_.count(attr_name),
        0,
574 575 576 577 578 579 580 581
        platform::errors::AlreadyExists(
            "Attribute %s already set in trt engine.", attr_name));
    attrs_[attr_name] = attr;
  }

  // Get a reference to the attributed previously set.
  template <typename AttrType>
  AttrType& Get(const std::string& attr_name) const {
582 583
    PADDLE_ENFORCE_NE(attrs_.find(attr_name),
                      attrs_.end(),
584 585 586
                      platform::errors::InvalidArgument(
                          "Attribute %s not found in trt engine.", attr_name));
    try {
587 588
      return *paddle::any_cast<AttrType*>(attrs_.at(attr_name));
    } catch (paddle::bad_any_cast&) {
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
      auto TypeToString = [](const std::type_info& info) -> std::string {
        if (std::type_index(info) == std::type_index(typeid(bool*))) {
          return "bool";
        } else if (std::type_index(info) == std::type_index(typeid(int*))) {
          return "int";
        } else if (std::type_index(info) ==
                   std::type_index(typeid(const int*))) {
          return "const int";
        } else if (std::type_index(info) ==
                   std::type_index(typeid(std::string*))) {
          return "std::string";
        }
        return info.name();
      };

      PADDLE_THROW(platform::errors::InvalidArgument(
605 606
          "Invalid type for attritube %s, expected: %s, actual: %s.",
          attr_name,
607 608 609 610 611
          TypeToString(typeid(AttrType*)),
          TypeToString(attrs_.at(attr_name).type())));
    }
  }

W
wenbin 已提交
612
  void SetProfileNum(int num) { max_profile_num_ = num; }
613 614 615 616

  void GetEngineInfo();

  void SetUseInspector(bool use_inspector) { use_inspector_ = use_inspector; }
617
  void SetScope(const framework::Scope& scope) { scope_ = &scope; }
618

619 620 621 622
  void SetContextMemorySharing(bool context_memory_sharing) {
    context_memory_sharing_ = context_memory_sharing;
  }

Y
Yan Chunwei 已提交
623
 private:
N
nhzlx 已提交
624 625 626 627
  // Each ICudaEngine object is bound to a specific GPU when it is instantiated,
  // ensure that the thread is associated with the correct device by calling
  // freshDeviceId().
  void freshDeviceId();
628 629
  // Used for convert weight into Itensor
  const framework::Scope* scope_;
N
nhzlx 已提交
630

Y
Yan Chunwei 已提交
631 632
  // the max batch size
  int max_batch_;
633 634
  // the runtime batch size
  static int runtime_batch_;
Y
Yan Chunwei 已提交
635
  // the max memory size the engine uses
636
  int64_t max_workspace_;
637

Z
Zhaolong Xing 已提交
638
  AnalysisConfig::Precision precision_;
N
nhzlx 已提交
639 640 641
  TRTInt8Calibrator* calibrator_;
  // batch size of the current data, will be updated each Executation.
  int batch_size_{-1};
N
nhzlx 已提交
642

643 644 645
  // use for engine context memory sharing
  bool context_memory_sharing_{false};

N
nhzlx 已提交
646
  int device_id_;
W
wenbin 已提交
647 648
  int max_profile_num_{1};
  int cur_profile_num_{0};
649
  std::unordered_map<PredictorID, int> profile_index_;
650 651 652
  ShapeMapType min_input_shape_;
  ShapeMapType max_input_shape_;
  ShapeMapType optim_input_shape_;
653 654 655
  ShapeMapType min_shape_tensor_;
  ShapeMapType max_shape_tensor_;
  ShapeMapType optim_shape_tensor_;
656
  bool disable_trt_plugin_fp16_{false};
657
  phi::DataType model_precision_{phi::DataType::FLOAT32};
658
  bool use_varseqlen_{false};
659 660
  bool use_dla_{false};
  int dla_core_{0};
661
  bool with_ernie_{false};
662
  bool with_interleaved_{false};
663 664
  std::string tensorrt_transformer_posid_;
  std::string tensorrt_transformer_maskid_;
Y
Yan Chunwei 已提交
665 666 667
  nvinfer1::ILogger& logger_;

  // max data size for the buffers.
L
Luo Tao 已提交
668 669
  std::unordered_map<std::string /*name*/, nvinfer1::ITensor* /*ITensor*/>
      itensor_map_;
670

671
  std::vector<std::unique_ptr<plugin::PluginTensorRT>> owned_plugin_;
672
  std::vector<std::unique_ptr<plugin::PluginTensorRTV2Ext>> owned_plugin_v2ext_;
673
  std::vector<std::unique_ptr<nvinfer1::IPluginV2IOExt>> owned_plugin_v2ioext_;
Y
Yan Chunwei 已提交
674 675 676 677

  // TensorRT related internal members
  template <typename T>
  struct Destroyer {
678 679 680 681 682
    void operator()(T* x) {
      if (x) {
        x->destroy();
      }
    }
Y
Yan Chunwei 已提交
683 684 685 686 687 688
  };
  template <typename T>
  using infer_ptr = std::unique_ptr<T, Destroyer<T>>;
  infer_ptr<nvinfer1::IBuilder> infer_builder_;
  infer_ptr<nvinfer1::INetworkDefinition> infer_network_;
  infer_ptr<nvinfer1::ICudaEngine> infer_engine_;
689
  std::unordered_map<PredictorID, infer_ptr<nvinfer1::IExecutionContext>>
690
      infer_context_;
N
nhzlx 已提交
691
  infer_ptr<nvinfer1::IHostMemory> ihost_memory_;
692
  std::unordered_map<nvinfer1::ITensor*, float> quant_dynamic_range_;
693

694
  std::unordered_map<std::string, paddle::any> attrs_;
695 696
  std::unordered_map<std::string, std::function<void(void)>> attr_dels_;

697 698 699
  // For dynamic shape
  bool with_dynamic_shape_{false};
#if IS_TRT_VERSION_GE(6000)
W
wenbin 已提交
700
  int binding_num_;
701
  infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_;
W
wenbin 已提交
702
  std::vector<nvinfer1::IOptimizationProfile*> optim_profiles_;
703
  std::vector<std::unique_ptr<plugin::DynamicPluginTensorRT>> owned_pluginv2_;
704
#endif
705
  std::mutex mutex_;
706
  bool use_inspector_;
707 708 709

 public:
  thread_local static int predictor_id_per_thread;
Y
Yan Chunwei 已提交
710 711
};  // class TensorRTEngine

712
// Add a layer__ into engine__ with args ARGS.
Y
Yan Chunwei 已提交
713 714 715 716 717 718 719 720 721
// For example:
//
// Reference
// https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#charRNN_define_network
//
// will add a fully connected layer into the engine.
// TensorRT has too many layers, so that is not wise to add member functions for
// them, and an macro like this is more extensible when underlying TensorRT
// library add new layer supports.
722
#define TRT_ENGINE_ADD_LAYER(engine__, layer__, ...) \
Z
zhoutianzi666 已提交
723
  engine__->network()->add##layer__(__VA_ARGS__)
Y
Yan Chunwei 已提交
724

725
class TRTEngineManager {
726 727 728
  using PredictorID = int;
  using AllocationPtr = phi::Allocator::AllocationPtr;

729
 public:
730 731 732 733 734
  bool Empty() const {
    std::lock_guard<std::mutex> lock(mutex_);
    return engines_.size() == 0;
  }

735
  bool Has(const std::string& name) const {
736
    std::lock_guard<std::mutex> lock(mutex_);
737 738 739 740 741
    if (engines_.count(name) == 0) return false;
    return engines_.at(name).get() != nullptr;
  }

  TensorRTEngine* Get(const std::string& name) const {
742
    std::lock_guard<std::mutex> lock(mutex_);
743 744 745
    return engines_.at(name).get();
  }

Z
Zhaolong Xing 已提交
746
  TensorRTEngine* Create(
747 748
      std::string name,
      int max_batch,
749
      int64_t max_workspace,
Z
Zhaolong Xing 已提交
750
      AnalysisConfig::Precision precision = AnalysisConfig::Precision::kFloat32,
751 752
      TRTInt8Calibrator* calibrator = nullptr,
      int device_id = 0,
753 754 755
      const std::map<std::string, std::vector<int>> min_input_shape = {},
      const std::map<std::string, std::vector<int>> max_input_shape = {},
      const std::map<std::string, std::vector<int>> optim_input_shape = {},
756 757 758
      const std::map<std::string, std::vector<int>> min_shape_tensor = {},
      const std::map<std::string, std::vector<int>> max_shape_tensor = {},
      const std::map<std::string, std::vector<int>> optim_shape_tensor = {},
759
      bool disable_trt_plugin_fp16 = false,
760
      phi::DataType model_precision = phi::DataType::FLOAT32,
Z
Zhaolong Xing 已提交
761
      nvinfer1::ILogger& logger = NaiveLogger::Global()) {
762 763 764 765 766 767 768 769
    auto* p = new TensorRTEngine(max_batch,
                                 max_workspace,
                                 precision,
                                 calibrator,
                                 device_id,
                                 min_input_shape,
                                 max_input_shape,
                                 optim_input_shape,
770 771 772
                                 min_shape_tensor,
                                 max_shape_tensor,
                                 optim_shape_tensor,
773
                                 disable_trt_plugin_fp16,
774
                                 model_precision,
775
                                 logger);
776
    std::lock_guard<std::mutex> lock(mutex_);
777 778 779 780 781
    engines_[name].reset(p);
    return p;
  }

  void DeleteAll() {
782
    std::lock_guard<std::mutex> lock(mutex_);
783 784 785
    for (auto& item : engines_) {
      item.second.reset(nullptr);
    }
786
    engines_.clear();
787 788
  }

W
Wilber 已提交
789
  void DeleteKey(const std::string& key) {
790
    std::lock_guard<std::mutex> lock(mutex_);
W
Wilber 已提交
791 792 793 794 795 796 797
    auto iter = engines_.find(key);
    if (iter != engines_.end()) {
      iter->second.reset(nullptr);
      engines_.erase(iter);
    }
  }

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
  void updateContextMemorySize(size_t mem_size, PredictorID predictor_id) {
    bool size_updated{false};

    {
      std::lock_guard<std::mutex> lock(mutex_);
      if (max_ctx_mem_size_ < mem_size) {
        max_ctx_mem_size_ = mem_size;
        size_updated = true;
      }
    }

    if (size_updated) {
      releaseContextMemory(predictor_id);
    }
  }

  void* getContextMemory(PredictorID predictor_id,
                         const phi::GPUPlace& place,
                         const phi::Stream& stream) {
    std::lock_guard<std::mutex> lock(mutex_);
    static auto alignment = getAlignmentSize(place);
    if (context_memorys_.count(predictor_id) == 0) {
      auto context_memory =
          memory::Alloc(place, max_ctx_mem_size_ + alignment, stream);
      // context_memory_[predictor_id].reset(context_memory.release());
      context_memorys_[predictor_id] = std::move(context_memory);
    }
    return getAlignedMemory(context_memorys_[predictor_id]->ptr(), alignment);
  }

  void releaseContextMemory(PredictorID predictor_id) {
    std::lock_guard<std::mutex> lock(mutex_);
    if (context_memorys_.count(predictor_id)) {
      context_memorys_[predictor_id].reset(nullptr);
      context_memorys_.erase(predictor_id);
    }
  }

836
 private:
837 838 839 840 841 842 843 844 845 846 847 848
  size_t getAlignmentSize(const phi::GPUPlace& place) {
    const auto& prop = platform::GetDeviceProperties(place.GetDeviceId());
    return prop.textureAlignment;
  }

  void* getAlignedMemory(void* addr, size_t alignment) {
    return reinterpret_cast<void*>(uintptr_t(addr) & (~(alignment - 1)));
  }

  mutable std::mutex mutex_;
  size_t max_ctx_mem_size_{0};
  std::unordered_map<PredictorID, AllocationPtr> context_memorys_;
849 850 851
  std::unordered_map<std::string, std::unique_ptr<TensorRTEngine>> engines_;
};

Y
Yan Chunwei 已提交
852 853 854
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle