未验证 提交 02019804 编写于 作者: Y Yuanle Liu 提交者: GitHub

update some trt log (#49330)

上级 e2b2f7d0
......@@ -272,7 +272,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
if (x->Var()->GetDataType() == framework::proto::VarType::INT64) {
std::string tmp_name = x->Name() + "_cast_to_INT32";
LOG(WARNING)
<< "tensorrt_subgraph's input named " << tmp_name
<< "tensorrt_subgraph's input named " << x->Name()
<< " having int64 dtype in pdmodel description, we will cast them to "
"int32 dtype to feed them into paddle-trt.";
/*
......@@ -395,7 +395,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
map_origin_outputs_dtype[name]) ==
framework::proto::VarType::INT64) {
std::string tmp_name = name + "_cast_to_INT64";
LOG(WARNING) << "tensorrt_subgraph's output named " << tmp_name
LOG(WARNING) << "tensorrt_subgraph's output named " << name
<< " having int64 dtype in pdmodel description, but in fact "
"it is int32 "
"dtype after executing this tensorrt_subgraph, so we "
......
......@@ -105,7 +105,14 @@ class AnalysisPredictor : public PaddlePredictor {
}
auto trt_identifier = config_.trt_engine_memory_sharing_identifier_;
if (trt_identifier > 0) {
// NOTE(liuyuanle): For convenience, we set the id of the predictor to
// negative sharing_identifier directly. In the future, this may affect
// the meaning of negative predictor id.
predictor_id_ = -trt_identifier;
LOG(WARNING)
<< "Since the engine context memory of multiple predictors "
"is enabled in Paddle-TRT, we set the id of current predictor to "
"negative sharing_identifier you specified.";
} else {
predictor_id_ = inference::GetUniqueId();
}
......
......@@ -75,7 +75,6 @@ void TensorRTEngine::InitNetwork() {
}
infer_builder_config_.reset(infer_builder_->createBuilderConfig());
// optim_profile_ = infer_builder_->createOptimizationProfile();
optim_profiles_.resize(max_profile_num_);
for (int i = 0; i < max_profile_num_; i++)
optim_profiles_[i] = infer_builder_->createOptimizationProfile();
......
......@@ -802,6 +802,8 @@ class TRTEngineManager {
}
void updateContextMemorySize(size_t mem_size, PredictorID predictor_id) {
VLOG(3) << "TensorRT engine context memory size is " << mem_size
<< " in predictor id " << predictor_id;
bool size_updated{false};
{
......@@ -825,7 +827,6 @@ class TRTEngineManager {
if (context_memorys_.count(predictor_id) == 0) {
auto context_memory =
memory::Alloc(place, max_ctx_mem_size_ + alignment, stream);
// context_memory_[predictor_id].reset(context_memory.release());
context_memorys_[predictor_id] = std::move(context_memory);
}
return getAlignedMemory(context_memorys_[predictor_id]->ptr(), alignment);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册