engine.cc 5.3 KB
Newer Older
Y
Yan Chunwei 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

N
nhzlx 已提交
3 4
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
Y
Yan Chunwei 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/inference/tensorrt/engine.h"

#include <NvInfer.h>
#include <cuda.h>
#include <glog/logging.h>
A
Abhinav Arora 已提交
20
#include <string>
Y
Yan Chunwei 已提交
21
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
22 23 24 25 26 27 28
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace inference {
namespace tensorrt {

29 30
int TensorRTEngine::runtime_batch_ = 1;

31
void TensorRTEngine::Build(const DescType &paddle_model) {
Y
Yan Chunwei 已提交
32 33 34
  PADDLE_ENFORCE(false, "not implemented");
}

35 36
void TensorRTEngine::Execute(int batch_size, std::vector<void *> *buffers,
                             cudaStream_t stream) {
N
nhzlx 已提交
37
  freshDeviceId();
N
nhzlx 已提交
38
  batch_size_ = batch_size;
39 40
  infer_context_->enqueue(batch_size, buffers->data(), stream, nullptr);
  cudaStreamSynchronize(stream);
N
nhzlx 已提交
41 42 43
  SetRuntimeBatch(batch_size);
}

Y
Yan Chunwei 已提交
44
void TensorRTEngine::FreezeNetwork() {
N
nhzlx 已提交
45
  freshDeviceId();
46
  VLOG(3) << "TRT to freeze network";
Y
Yan Chunwei 已提交
47 48 49 50 51 52 53
  PADDLE_ENFORCE(infer_builder_ != nullptr,
                 "Call InitNetwork first to initialize network.");
  PADDLE_ENFORCE(infer_network_ != nullptr,
                 "Call InitNetwork first to initialize network.");
  // build engine.
  infer_builder_->setMaxBatchSize(max_batch_);
  infer_builder_->setMaxWorkspaceSize(max_workspace_);
N
nhzlx 已提交
54
  if (enable_int8_) {
N
nhzlx 已提交
55 56 57 58 59 60
    infer_builder_->setInt8Mode(true);
    PADDLE_ENFORCE(
        calibrator_ != nullptr,
        "The precision mode is 'INT8', the calibrator should not be nullptr");
    infer_builder_->setInt8Calibrator(calibrator_);
  }
Y
Yan Chunwei 已提交
61 62 63 64 65 66 67

  infer_engine_.reset(infer_builder_->buildCudaEngine(*infer_network_));
  PADDLE_ENFORCE(infer_engine_ != nullptr, "build cuda engine failed!");

  infer_context_.reset(infer_engine_->createExecutionContext());
}

68
nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name,
Y
Yan Chunwei 已提交
69
                                                nvinfer1::DataType dtype,
70
                                                const nvinfer1::Dims &dims) {
Y
Yan Chunwei 已提交
71 72 73 74
  PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate input name %s",
                    name);

  PADDLE_ENFORCE(infer_network_ != nullptr, "should initnetwork first");
75
  auto *input = infer_network_->addInput(name.c_str(), dtype, dims);
Y
Yan Chunwei 已提交
76
  PADDLE_ENFORCE(input, "infer network add input %s failed", name);
Y
Yan Chunwei 已提交
77
  buffer_sizes_[name] = kDataTypeSize[static_cast<int>(dtype)] *
78
                        analysis::AccuDims(dims.d, dims.nbDims) * max_batch_;
79
  PADDLE_ENFORCE(input->isNetworkInput());
L
Luo Tao 已提交
80
  TensorRTEngine::SetITensor(name, input);
Y
Yan Chunwei 已提交
81 82 83
  return input;
}

84 85
void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer *layer, int offset,
                                   const std::string &name) {
Y
Yan Chunwei 已提交
86 87 88
  PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s",
                    name);

89
  auto *output = layer->getOutput(offset);
90
  SetITensor(name, output);
Y
Yan Chunwei 已提交
91 92
  PADDLE_ENFORCE(output != nullptr);
  output->setName(name.c_str());
93
  PADDLE_ENFORCE(!output->isNetworkInput());
Y
Yan Chunwei 已提交
94
  infer_network_->markOutput(*output);
95
  PADDLE_ENFORCE(output->isNetworkOutput());
Y
Yan Chunwei 已提交
96 97 98 99 100
  // output buffers' size can only be decided latter, set zero here to mark this
  // and will reset latter.
  buffer_sizes_[name] = 0;
}

N
nhzlx 已提交
101 102 103 104
bool TensorRTEngine::HasDeclared(const std::string &name) {
  return buffer_sizes_.count(name) > 0;
}

105
void TensorRTEngine::DeclareOutput(const std::string &name) {
L
Luo Tao 已提交
106 107 108
  PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s",
                    name);

109
  auto *output = TensorRTEngine::GetITensor(name);
L
Luo Tao 已提交
110 111
  PADDLE_ENFORCE(output != nullptr);
  output->setName(name.c_str());
112
  PADDLE_ENFORCE(!output->isNetworkInput());
L
Luo Tao 已提交
113 114 115 116 117 118
  infer_network_->markOutput(*output);
  // output buffers' size can only be decided latter, set zero here to mark this
  // and will reset latter.
  buffer_sizes_[name] = 0;
}

119 120
void TensorRTEngine::SetITensor(const std::string &name,
                                nvinfer1::ITensor *tensor) {
L
Luo Tao 已提交
121
  PADDLE_ENFORCE(tensor != nullptr);
Y
Yan Chunwei 已提交
122
  PADDLE_ENFORCE_EQ(0, itensor_map_.count(name), "duplicate ITensor name %s",
L
Luo Tao 已提交
123 124 125 126
                    name);
  itensor_map_[name] = tensor;
}

127
nvinfer1::ITensor *TensorRTEngine::GetITensor(const std::string &name) {
Y
Yan Chunwei 已提交
128
  PADDLE_ENFORCE(itensor_map_.count(name), "no ITensor %s", name);
L
Luo Tao 已提交
129 130 131
  return itensor_map_[name];
}

132 133 134 135 136 137
void TensorRTEngine::SetRuntimeBatch(size_t batch_size) {
  runtime_batch_ = batch_size;
}

int TensorRTEngine::GetRuntimeBatch() { return runtime_batch_; }

N
nhzlx 已提交
138
nvinfer1::IPluginLayer *TensorRTEngine::AddPlugin(
139 140
    nvinfer1::ITensor *const *inputs, int num_inputs,
    plugin::PluginTensorRT *plugin) {
141
  owned_plugin_.emplace_back(plugin);
142
  return infer_network_.get()->addPluginExt(inputs, num_inputs, *plugin);
143 144
}

N
nhzlx 已提交
145 146 147 148 149 150 151
void TensorRTEngine::freshDeviceId() {
  int count;
  cudaGetDeviceCount(&count);
  PADDLE_ENFORCE_LT(device_id_, count);
  cudaSetDevice(device_id_);
}

Y
Yan Chunwei 已提交
152 153 154
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle