提交 ec213730 编写于 作者: N nhzlx

fix trt stream bug.

BUG: After continuing to input different data, the output cannot be aligned
test=develop
上级 e2ba9668
...@@ -29,9 +29,9 @@ TEST(OpConverter, ConvertBlock) { ...@@ -29,9 +29,9 @@ TEST(OpConverter, ConvertBlock) {
// init trt engine // init trt engine
cudaStream_t stream_; cudaStream_t stream_;
std::unique_ptr<TensorRTEngine> engine_; std::unique_ptr<TensorRTEngine> engine_;
engine_.reset(new TensorRTEngine(5, 1 << 15, &stream_));
engine_->InitNetwork();
PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0); PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0);
engine_.reset(new TensorRTEngine(5, 1 << 15, stream_));
engine_->InitNetwork();
engine_->DeclareInput("conv2d-X", nvinfer1::DataType::kFLOAT, engine_->DeclareInput("conv2d-X", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims3(2, 5, 5)); nvinfer1::Dims3(2, 5, 5));
......
...@@ -78,11 +78,9 @@ class TRTConvertValidation { ...@@ -78,11 +78,9 @@ class TRTConvertValidation {
scope_(scope), scope_(scope),
if_add_batch_(if_add_batch), if_add_batch_(if_add_batch),
max_batch_size_(max_batch_size) { max_batch_size_(max_batch_size) {
// create engine.
engine_.reset(new TensorRTEngine(max_batch_size, workspace_size, &stream_));
engine_->InitNetwork();
PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0); PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0);
engine_.reset(new TensorRTEngine(max_batch_size, workspace_size, stream_));
engine_->InitNetwork();
} }
// Declare a Variable as input with random initialization. // Declare a Variable as input with random initialization.
...@@ -175,7 +173,7 @@ class TRTConvertValidation { ...@@ -175,7 +173,7 @@ class TRTConvertValidation {
op_->Run(scope_, place); op_->Run(scope_, place);
// Execute TRT. // Execute TRT.
engine_->Execute(batch_size); engine_->Execute(batch_size);
cudaStreamSynchronize(*engine_->stream()); cudaStreamSynchronize(engine_->stream());
ASSERT_FALSE(op_desc_->OutputArgumentNames().empty()); ASSERT_FALSE(op_desc_->OutputArgumentNames().empty());
const size_t output_space_size = 3000; const size_t output_space_size = 3000;
...@@ -184,7 +182,7 @@ class TRTConvertValidation { ...@@ -184,7 +182,7 @@ class TRTConvertValidation {
std::vector<float> fluid_out; std::vector<float> fluid_out;
std::vector<float> trt_out(output_space_size); std::vector<float> trt_out(output_space_size);
engine_->GetOutputInCPU(output, &trt_out[0], output_space_size); engine_->GetOutputInCPU(output, &trt_out[0], output_space_size);
cudaStreamSynchronize(*engine_->stream()); cudaStreamSynchronize(engine_->stream());
auto* var = scope_.FindVar(output); auto* var = scope_.FindVar(output);
auto tensor = var->GetMutable<framework::LoDTensor>(); auto tensor = var->GetMutable<framework::LoDTensor>();
......
...@@ -42,14 +42,13 @@ void TensorRTEngine::Execute(int batch_size) { ...@@ -42,14 +42,13 @@ void TensorRTEngine::Execute(int batch_size) {
PADDLE_ENFORCE(buf.device == DeviceType::GPU); PADDLE_ENFORCE(buf.device == DeviceType::GPU);
buffers.push_back(buf.buffer); buffers.push_back(buf.buffer);
} }
PADDLE_ENFORCE_NOT_NULL(stream_); infer_context_->enqueue(batch_size, buffers.data(), stream_, nullptr);
infer_context_->enqueue(batch_size, buffers.data(), *stream_, nullptr); cudaStreamSynchronize(stream_);
cudaStreamSynchronize(*stream_);
SetRuntimeBatch(batch_size); SetRuntimeBatch(batch_size);
} }
TensorRTEngine::~TensorRTEngine() { TensorRTEngine::~TensorRTEngine() {
cudaStreamSynchronize(*stream_); cudaStreamSynchronize(stream_);
// clean buffer // clean buffer
for (auto &buf : buffers_) { for (auto &buf : buffers_) {
if (buf.device == DeviceType::GPU && buf.buffer != nullptr) { if (buf.device == DeviceType::GPU && buf.buffer != nullptr) {
...@@ -173,7 +172,7 @@ void TensorRTEngine::GetOutputInGPU(const std::string &name, void *dst, ...@@ -173,7 +172,7 @@ void TensorRTEngine::GetOutputInGPU(const std::string &name, void *dst,
auto &buf = buffer(name); auto &buf = buffer(name);
PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before"); PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before");
PADDLE_ENFORCE_EQ(cudaMemcpyAsync(dst, buf.buffer, dst_size, PADDLE_ENFORCE_EQ(cudaMemcpyAsync(dst, buf.buffer, dst_size,
cudaMemcpyDeviceToDevice, *stream_), cudaMemcpyDeviceToDevice, stream_),
0); 0);
} }
...@@ -194,7 +193,7 @@ void TensorRTEngine::GetOutputInCPU(const std::string &name, void *dst, ...@@ -194,7 +193,7 @@ void TensorRTEngine::GetOutputInCPU(const std::string &name, void *dst,
auto &buf = buffer(name); auto &buf = buffer(name);
PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before"); PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before");
PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buf.buffer, dst_size, PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buf.buffer, dst_size,
cudaMemcpyDeviceToHost, *stream_)); cudaMemcpyDeviceToHost, stream_));
} }
Buffer &TensorRTEngine::buffer(const std::string &name) { Buffer &TensorRTEngine::buffer(const std::string &name) {
...@@ -211,12 +210,11 @@ void TensorRTEngine::SetInputFromCPU(const std::string &name, const void *data, ...@@ -211,12 +210,11 @@ void TensorRTEngine::SetInputFromCPU(const std::string &name, const void *data,
auto &buf = buffer(name); auto &buf = buffer(name);
PADDLE_ENFORCE_NOT_NULL(buf.buffer); PADDLE_ENFORCE_NOT_NULL(buf.buffer);
PADDLE_ENFORCE_NOT_NULL(data); PADDLE_ENFORCE_NOT_NULL(data);
PADDLE_ENFORCE_NOT_NULL(stream_);
PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small"); PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small");
PADDLE_ENFORCE(buf.device == DeviceType::GPU); PADDLE_ENFORCE(buf.device == DeviceType::GPU);
buf.size = size; buf.size = size;
PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size, PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size,
cudaMemcpyHostToDevice, *stream_)); cudaMemcpyHostToDevice, stream_));
} }
void TensorRTEngine::SetInputFromGPU(const std::string &name, const void *data, void TensorRTEngine::SetInputFromGPU(const std::string &name, const void *data,
...@@ -227,7 +225,7 @@ void TensorRTEngine::SetInputFromGPU(const std::string &name, const void *data, ...@@ -227,7 +225,7 @@ void TensorRTEngine::SetInputFromGPU(const std::string &name, const void *data,
PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small"); PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small");
PADDLE_ENFORCE(buf.device == DeviceType::GPU); PADDLE_ENFORCE(buf.device == DeviceType::GPU);
PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size, PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size,
cudaMemcpyDeviceToDevice, *stream_)); cudaMemcpyDeviceToDevice, stream_));
} }
void TensorRTEngine::SetITensor(const std::string &name, void TensorRTEngine::SetITensor(const std::string &name,
......
...@@ -54,17 +54,14 @@ class TensorRTEngine : public EngineBase { ...@@ -54,17 +54,14 @@ class TensorRTEngine : public EngineBase {
nvinfer1::Weights w_; nvinfer1::Weights w_;
}; };
TensorRTEngine(int max_batch, int max_workspace, TensorRTEngine(int max_batch, int max_workspace, cudaStream_t stream,
cudaStream_t* stream = nullptr, int device = 0, int device = 0,
nvinfer1::ILogger& logger = NaiveLogger::Global()) nvinfer1::ILogger& logger = NaiveLogger::Global())
: max_batch_(max_batch), : max_batch_(max_batch),
max_workspace_(max_workspace), max_workspace_(max_workspace),
stream_(stream ? stream : &default_stream_), stream_(stream),
logger_(logger), logger_(logger),
device_(device) { device_(device) {}
freshDeviceId();
cudaStreamCreate(stream_);
}
virtual ~TensorRTEngine(); virtual ~TensorRTEngine();
...@@ -102,7 +99,7 @@ class TensorRTEngine : public EngineBase { ...@@ -102,7 +99,7 @@ class TensorRTEngine : public EngineBase {
// NOTE this should be used after calling `FreezeNetwork`. // NOTE this should be used after calling `FreezeNetwork`.
Buffer& buffer(const std::string& name) override; Buffer& buffer(const std::string& name) override;
cudaStream_t* stream() { return stream_; } cudaStream_t stream() { return stream_; }
// Fill an input from CPU memory with name and size. // Fill an input from CPU memory with name and size.
void SetInputFromCPU(const std::string& name, const void* data, size_t size); void SetInputFromCPU(const std::string& name, const void* data, size_t size);
...@@ -158,9 +155,8 @@ class TensorRTEngine : public EngineBase { ...@@ -158,9 +155,8 @@ class TensorRTEngine : public EngineBase {
// batch size of the current data, will be updated each Executation. // batch size of the current data, will be updated each Executation.
int batch_size_{-1}; int batch_size_{-1};
cudaStream_t* stream_; cudaStream_t stream_;
// If stream_ is not set from outside, hold its own stream.
cudaStream_t default_stream_;
nvinfer1::ILogger& logger_; nvinfer1::ILogger& logger_;
std::vector<Buffer> buffers_; std::vector<Buffer> buffers_;
...@@ -208,38 +204,6 @@ class TensorRTEngine : public EngineBase { ...@@ -208,38 +204,6 @@ class TensorRTEngine : public EngineBase {
#define TRT_ENGINE_ADD_LAYER(engine__, layer__, ARGS...) \ #define TRT_ENGINE_ADD_LAYER(engine__, layer__, ARGS...) \
engine__->network()->add##layer__(ARGS); engine__->network()->add##layer__(ARGS);
/*
* Helper to control the TensorRT engine's creation and deletion.
*/
class TRT_EngineManager {
public:
bool HasEngine(const std::string& name) const {
return engines_.count(name) != 0;
}
// Get an engine called `name`.
TensorRTEngine* Get(const std::string& name) const {
return engines_.at(name).get();
}
// Create or get an engine called `name`
TensorRTEngine* Create(int max_batch, int max_workspace, cudaStream_t* stream,
const std::string& name, int gpu_device = 0) {
auto* p = new TensorRTEngine(max_batch, max_workspace, stream, gpu_device);
engines_[name].reset(p);
return p;
}
void DeleteALl() {
for (auto& item : engines_) {
item.second.reset(nullptr);
}
}
private:
std::unordered_map<std::string, std::unique_ptr<TensorRTEngine>> engines_;
};
} // namespace tensorrt } // namespace tensorrt
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
...@@ -27,8 +27,8 @@ namespace tensorrt { ...@@ -27,8 +27,8 @@ namespace tensorrt {
class TensorRTEngineTest : public ::testing::Test { class TensorRTEngineTest : public ::testing::Test {
protected: protected:
void SetUp() override { void SetUp() override {
// ASSERT_EQ(0, cudaStreamCreate(&stream_)); ASSERT_EQ(0, cudaStreamCreate(&stream_));
engine_ = new TensorRTEngine(10, 1 << 10, &stream_); engine_ = new TensorRTEngine(10, 1 << 10, stream_);
engine_->InitNetwork(); engine_->InitNetwork();
} }
......
...@@ -96,9 +96,13 @@ class TensorRTEngineOp : public framework::OperatorBase { ...@@ -96,9 +96,13 @@ class TensorRTEngineOp : public framework::OperatorBase {
void RunTrt(const framework::Scope &scope, void RunTrt(const framework::Scope &scope,
const platform::Place &dev_place) const { const platform::Place &dev_place) const {
int runtime_batch = 1; int runtime_batch = 1;
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(dev_place);
auto stream =
reinterpret_cast<const platform::CUDADeviceContext &>(dev_ctx).stream();
if (trt_engine_.get() == nullptr) { if (trt_engine_.get() == nullptr) {
trt_engine_.reset(new TensorRTEngine( trt_engine_.reset(new TensorRTEngine(
max_batch_size_, workspace_size_, nullptr, max_batch_size_, workspace_size_, stream,
boost::get<platform::CUDAPlace>(dev_place).device)); boost::get<platform::CUDAPlace>(dev_place).device));
Prepare(scope, dev_place, trt_engine_.get()); Prepare(scope, dev_place, trt_engine_.get());
} }
...@@ -126,6 +130,7 @@ class TensorRTEngineOp : public framework::OperatorBase { ...@@ -126,6 +130,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
} }
} }
cudaStreamSynchronize(stream);
PADDLE_ENFORCE_LE(runtime_batch, max_batch_size_); PADDLE_ENFORCE_LE(runtime_batch, max_batch_size_);
// Execute the engine. // Execute the engine.
engine->Execute(runtime_batch); engine->Execute(runtime_batch);
...@@ -163,7 +168,7 @@ class TensorRTEngineOp : public framework::OperatorBase { ...@@ -163,7 +168,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
output_index += 1; output_index += 1;
} }
cudaStreamSynchronize(*engine->stream()); cudaStreamSynchronize(stream);
} }
void Prepare(const framework::Scope &scope, const platform::Place &dev_place, void Prepare(const framework::Scope &scope, const platform::Place &dev_place,
......
...@@ -99,7 +99,7 @@ TEST(TensorRTEngineOp, manual) { ...@@ -99,7 +99,7 @@ TEST(TensorRTEngineOp, manual) {
SetAttr<std::string>(engine_op_desc.Proto(), "subgraph", SetAttr<std::string>(engine_op_desc.Proto(), "subgraph",
block_->SerializeAsString()); block_->SerializeAsString());
SetAttr<int>(engine_op_desc.Proto(), "max_batch_size", 2); SetAttr<int>(engine_op_desc.Proto(), "max_batch_size", 2);
SetAttr<int>(engine_op_desc.Proto(), "workspace_size", 2 << 10); SetAttr<int>(engine_op_desc.Proto(), "workspace_size", 2 << 20);
SetAttr<std::string>(engine_op_desc.Proto(), "engine_uniq_key", "a_engine"); SetAttr<std::string>(engine_op_desc.Proto(), "engine_uniq_key", "a_engine");
SetAttr<std::vector<std::string>>(engine_op_desc.Proto(), "parameters", SetAttr<std::vector<std::string>>(engine_op_desc.Proto(), "parameters",
std::vector<std::string>({})); std::vector<std::string>({}));
...@@ -193,7 +193,7 @@ void Execute(int batch_size, int input_dim, int output_dim, int nlayers = 1) { ...@@ -193,7 +193,7 @@ void Execute(int batch_size, int input_dim, int output_dim, int nlayers = 1) {
SetAttr<std::string>(engine_op_desc.Proto(), "subgraph", SetAttr<std::string>(engine_op_desc.Proto(), "subgraph",
block_->SerializeAsString()); block_->SerializeAsString());
SetAttr<int>(engine_op_desc.Proto(), "max_batch_size", batch_size); SetAttr<int>(engine_op_desc.Proto(), "max_batch_size", batch_size);
SetAttr<int>(engine_op_desc.Proto(), "workspace_size", 2 << 10); SetAttr<int>(engine_op_desc.Proto(), "workspace_size", 2 << 20);
SetAttr<std::vector<std::string>>( SetAttr<std::vector<std::string>>(
engine_op_desc.Proto(), "parameters", engine_op_desc.Proto(), "parameters",
std::vector<std::string>({"y0", "y1", "y2", "y3"})); std::vector<std::string>({"y0", "y1", "y2", "y3"}));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册