From 3816d221ffdcf97babf0ad95f2ae03e70be698ea Mon Sep 17 00:00:00 2001 From: Zhaolong Xing Date: Fri, 2 Aug 2019 15:16:02 +0800 Subject: [PATCH] Fix the CE error which caused by paddle-trt version (#18941) * Fix Mask rcnn predictor 1. refine memory optim algorithm to support the model with the block op. 2. output diff : modify the affine channel fuse 3. add condition_block_infer op add interface for setting trt calib table dir test=develop * add the missing files. test=develop * 1 add trt fp16 support test=develop * fix trt fp16 ce error test=develop * add an vlog if the user use trt4 and specify fp16. test=develop --- paddle/fluid/inference/tensorrt/engine.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index cc9382419d5..d4d872976c4 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -51,6 +51,7 @@ void TensorRTEngine::FreezeNetwork() { // build engine. infer_builder_->setMaxBatchSize(max_batch_); infer_builder_->setMaxWorkspaceSize(max_workspace_); +#if IS_TRT_VERSION_GE(5000) bool enable_fp16 = (precision_ == AnalysisConfig::Precision::kHalf); if (enable_fp16) { bool support_fp16 = infer_builder_->platformHasFastFp16(); @@ -60,6 +61,11 @@ void TensorRTEngine::FreezeNetwork() { "FP16 speed up, use FP32 instead."; } } +#else + LOG(INFO) << "Using FP16 in Paddle-trt must ensure that the version of TRT " + "is at least 5." + "So, use FP32 to run."; +#endif bool enable_int8 = (precision_ == AnalysisConfig::Precision::kInt8); if (enable_int8) { -- GitLab