diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index e1e1be683123966235c7e3b00fe894ff2c841c94..03f5a751511adba7b508db9944c30d17866bad2d 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -124,6 +124,7 @@ void TensorRTEngine::FreezeNetwork() { << ", this might be ok when trt does not need this range"; } } +#if IS_TRT_VERSION_GE(5122) auto is_layer_int8 = [&](nvinfer1::ILayer *layer) -> bool { for (int j = 0; j < layer->getNbInputs(); j++) { auto *temp_in = layer->getInput(j); @@ -161,6 +162,11 @@ void TensorRTEngine::FreezeNetwork() { layer->setPrecision(nvinfer1::DataType::kFLOAT); } } +#else + LOG(WARNING) << "If your TensorRT version is lower than 5.1.2.2, you " + "must provide quantization scales for all tensors using " + "TRT to run."; +#endif #endif } }