未验证 提交 b3888614 编写于 作者: G gaoziyuan 提交者: GitHub

[Paddle-TRT] remove engine info from RumImpl process (#50181)

* remove_engine_info

* remove_engine_info

* remove_engine_info

* change trtlayerinformation line to json

---------
Co-authored-by: Ngaoziyuan <gaoziyuan@baidu.com>
上级 bb148c54
...@@ -344,8 +344,9 @@ void TensorRTEngine::FreezeNetwork() { ...@@ -344,8 +344,9 @@ void TensorRTEngine::FreezeNetwork() {
.updateContextMemorySize(infer_engine_->getDeviceMemorySize(), .updateContextMemorySize(infer_engine_->getDeviceMemorySize(),
predictor_id_per_thread); predictor_id_per_thread);
} }
if (use_inspector_) {
GetEngineInfo(); GetEngineInfo();
}
} }
nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name, nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name,
...@@ -552,8 +553,9 @@ void TensorRTEngine::Deserialize(const std::string &engine_serialized_data) { ...@@ -552,8 +553,9 @@ void TensorRTEngine::Deserialize(const std::string &engine_serialized_data) {
.updateContextMemorySize(infer_engine_->getDeviceMemorySize(), .updateContextMemorySize(infer_engine_->getDeviceMemorySize(),
predictor_id_per_thread); predictor_id_per_thread);
} }
if (use_inspector_) {
GetEngineInfo(); GetEngineInfo();
}
} }
void TensorRTEngine::SetRuntimeBatch(size_t batch_size) { void TensorRTEngine::SetRuntimeBatch(size_t batch_size) {
...@@ -828,7 +830,7 @@ void TensorRTEngine::GetEngineInfo() { ...@@ -828,7 +830,7 @@ void TensorRTEngine::GetEngineInfo() {
auto *infer_context = context(); auto *infer_context = context();
infer_inspector->setExecutionContext(infer_context); infer_inspector->setExecutionContext(infer_context);
LOG(INFO) << infer_inspector->getEngineInformation( LOG(INFO) << infer_inspector->getEngineInformation(
nvinfer1::LayerInformationFormat::kONELINE); nvinfer1::LayerInformationFormat::kJSON);
LOG(INFO) << "====== engine info end ======"; LOG(INFO) << "====== engine info end ======";
#else #else
LOG(INFO) << "Inspector needs TensorRT version 8.2 and after."; LOG(INFO) << "Inspector needs TensorRT version 8.2 and after.";
......
...@@ -182,7 +182,6 @@ class TensorRTEngineOp : public framework::OperatorBase { ...@@ -182,7 +182,6 @@ class TensorRTEngineOp : public framework::OperatorBase {
bool enable_int8_; bool enable_int8_;
bool enable_fp16_; bool enable_fp16_;
bool use_calib_mode_; bool use_calib_mode_;
bool use_inspector_;
std::string calibration_data_; std::string calibration_data_;
std::string engine_key_; std::string engine_key_;
std::string calibration_engine_key_; std::string calibration_engine_key_;
...@@ -219,7 +218,6 @@ class TensorRTEngineOp : public framework::OperatorBase { ...@@ -219,7 +218,6 @@ class TensorRTEngineOp : public framework::OperatorBase {
shape_range_info_path_ = Attr<std::string>("shape_range_info_path"); shape_range_info_path_ = Attr<std::string>("shape_range_info_path");
allow_build_at_runtime_ = Attr<bool>("allow_build_at_runtime"); allow_build_at_runtime_ = Attr<bool>("allow_build_at_runtime");
use_static_engine_ = Attr<bool>("use_static_engine"); use_static_engine_ = Attr<bool>("use_static_engine");
use_inspector_ = HasAttr("use_inspector") && Attr<bool>("use_inspector");
if (use_static_engine_) { if (use_static_engine_) {
model_opt_cache_dir_ = Attr<std::string>("model_opt_cache_dir"); model_opt_cache_dir_ = Attr<std::string>("model_opt_cache_dir");
} }
...@@ -331,9 +329,6 @@ class TensorRTEngineOp : public framework::OperatorBase { ...@@ -331,9 +329,6 @@ class TensorRTEngineOp : public framework::OperatorBase {
return; return;
} }
auto *trt_engine = GetEngine(scope, dev_place); auto *trt_engine = GetEngine(scope, dev_place);
if (use_inspector_) {
trt_engine->GetEngineInfo();
}
if (trt_engine->with_dynamic_shape()) { if (trt_engine->with_dynamic_shape()) {
// get runtime input shapes. // get runtime input shapes.
std::map<std::string, std::vector<int32_t>> runtime_input_shape; std::map<std::string, std::vector<int32_t>> runtime_input_shape;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册