From b3888614aaf4455ed705c5dcc5607540a96c66e6 Mon Sep 17 00:00:00 2001 From: gaoziyuan <88373061+gzy19990617@users.noreply.github.com> Date: Wed, 8 Feb 2023 12:48:45 +0800 Subject: [PATCH] [Paddle-TRT] remove engine info from RumImpl process (#50181) * remove_engine_info * remove_engine_info * remove_engine_info * change trtlayerinformation line to json --------- Co-authored-by: gaoziyuan --- paddle/fluid/inference/tensorrt/engine.cc | 12 +++++++----- paddle/fluid/operators/tensorrt/tensorrt_engine_op.h | 5 ----- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 0c25b321c1d..812e024a1a5 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -344,8 +344,9 @@ void TensorRTEngine::FreezeNetwork() { .updateContextMemorySize(infer_engine_->getDeviceMemorySize(), predictor_id_per_thread); } - - GetEngineInfo(); + if (use_inspector_) { + GetEngineInfo(); + } } nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name, @@ -552,8 +553,9 @@ void TensorRTEngine::Deserialize(const std::string &engine_serialized_data) { .updateContextMemorySize(infer_engine_->getDeviceMemorySize(), predictor_id_per_thread); } - - GetEngineInfo(); + if (use_inspector_) { + GetEngineInfo(); + } } void TensorRTEngine::SetRuntimeBatch(size_t batch_size) { @@ -828,7 +830,7 @@ void TensorRTEngine::GetEngineInfo() { auto *infer_context = context(); infer_inspector->setExecutionContext(infer_context); LOG(INFO) << infer_inspector->getEngineInformation( - nvinfer1::LayerInformationFormat::kONELINE); + nvinfer1::LayerInformationFormat::kJSON); LOG(INFO) << "====== engine info end ======"; #else LOG(INFO) << "Inspector needs TensorRT version 8.2 and after."; diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index 2f5da3c44b9..87323534bba 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -182,7 +182,6 @@ class TensorRTEngineOp : public framework::OperatorBase { bool enable_int8_; bool enable_fp16_; bool use_calib_mode_; - bool use_inspector_; std::string calibration_data_; std::string engine_key_; std::string calibration_engine_key_; @@ -219,7 +218,6 @@ class TensorRTEngineOp : public framework::OperatorBase { shape_range_info_path_ = Attr("shape_range_info_path"); allow_build_at_runtime_ = Attr("allow_build_at_runtime"); use_static_engine_ = Attr("use_static_engine"); - use_inspector_ = HasAttr("use_inspector") && Attr("use_inspector"); if (use_static_engine_) { model_opt_cache_dir_ = Attr("model_opt_cache_dir"); } @@ -331,9 +329,6 @@ class TensorRTEngineOp : public framework::OperatorBase { return; } auto *trt_engine = GetEngine(scope, dev_place); - if (use_inspector_) { - trt_engine->GetEngineInfo(); - } if (trt_engine->with_dynamic_shape()) { // get runtime input shapes. std::map> runtime_input_shape; -- GitLab