From 020198043bebc759166318d302970c3687f9fa08 Mon Sep 17 00:00:00 2001 From: Yuanle Liu Date: Wed, 28 Dec 2022 10:46:22 +0800 Subject: [PATCH] update some trt log (#49330) --- .../inference/analysis/ir_passes/tensorrt_subgraph_pass.cc | 4 ++-- paddle/fluid/inference/api/analysis_predictor.h | 7 +++++++ paddle/fluid/inference/tensorrt/engine.cc | 1 - paddle/fluid/inference/tensorrt/engine.h | 3 ++- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 84eac39a31..7f5134ae48 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -272,7 +272,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp( if (x->Var()->GetDataType() == framework::proto::VarType::INT64) { std::string tmp_name = x->Name() + "_cast_to_INT32"; LOG(WARNING) - << "tensorrt_subgraph's input named " << tmp_name + << "tensorrt_subgraph's input named " << x->Name() << " having int64 dtype in pdmodel description, we will cast them to " "int32 dtype to feed them into paddle-trt."; /* @@ -395,7 +395,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp( map_origin_outputs_dtype[name]) == framework::proto::VarType::INT64) { std::string tmp_name = name + "_cast_to_INT64"; - LOG(WARNING) << "tensorrt_subgraph's output named " << tmp_name + LOG(WARNING) << "tensorrt_subgraph's output named " << name << " having int64 dtype in pdmodel description, but in fact " "it is int32 " "dtype after executing this tensorrt_subgraph, so we " diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index 8decb80288..f853738160 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -105,7 +105,14 @@ class AnalysisPredictor : public PaddlePredictor { } auto trt_identifier = config_.trt_engine_memory_sharing_identifier_; if (trt_identifier > 0) { + // NOTE(liuyuanle): For convenience, we set the id of the predictor to + // negative sharing_identifier directly. In the future, this may affect + // the meaning of negative predictor id. predictor_id_ = -trt_identifier; + LOG(WARNING) + << "Since the engine context memory of multiple predictors " + "is enabled in Paddle-TRT, we set the id of current predictor to " + "negative sharing_identifier you specified."; } else { predictor_id_ = inference::GetUniqueId(); } diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index f480f791f9..8ff93e57f8 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -75,7 +75,6 @@ void TensorRTEngine::InitNetwork() { } infer_builder_config_.reset(infer_builder_->createBuilderConfig()); - // optim_profile_ = infer_builder_->createOptimizationProfile(); optim_profiles_.resize(max_profile_num_); for (int i = 0; i < max_profile_num_; i++) optim_profiles_[i] = infer_builder_->createOptimizationProfile(); diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index e80f2e6ce5..908e3729d5 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -802,6 +802,8 @@ class TRTEngineManager { } void updateContextMemorySize(size_t mem_size, PredictorID predictor_id) { + VLOG(3) << "TensorRT engine context memory size is " << mem_size + << " in predictor id " << predictor_id; bool size_updated{false}; { @@ -825,7 +827,6 @@ class TRTEngineManager { if (context_memorys_.count(predictor_id) == 0) { auto context_memory = memory::Alloc(place, max_ctx_mem_size_ + alignment, stream); - // context_memory_[predictor_id].reset(context_memory.release()); context_memorys_[predictor_id] = std::move(context_memory); } return getAlignedMemory(context_memorys_[predictor_id]->ptr(), alignment); -- GitLab