diff --git a/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc b/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc index 301b4140ac20fe5cd6d08e0d71136774ecaf91b1..7e94bd0c487bd5048d97cd559f212c883ee843ea 100644 --- a/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc @@ -25,7 +25,7 @@ class HardSigmoidOpConverter : public OpConverter { public: void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { -#if IS_TRT_VERSION_GE(5000) +#if IS_TRT_VERSION_GE(5130) VLOG(3) << "convert a fluid HardSigmoid op to tensorrt IActivationLayer " "layer without bias"; framework::OpDesc op_desc(op, nullptr); diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 4c0806ba307e95c7c2a792dd1c1a57200d74d0b6..9a72de15f3974aa85a7b82a94cdc3936a06330c5 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -124,6 +124,7 @@ void TensorRTEngine::FreezeNetwork() { << ", this might be ok when trt does not need this range"; } } +#if IS_TRT_VERSION_GE(5122) auto is_layer_int8 = [&](nvinfer1::ILayer *layer) -> bool { for (int j = 0; j < layer->getNbInputs(); j++) { auto *temp_in = layer->getInput(j); @@ -161,6 +162,11 @@ void TensorRTEngine::FreezeNetwork() { layer->setPrecision(nvinfer1::DataType::kFLOAT); } } +#else + LOG(WARNING) << "If your TensorRT version is lower than 5.1.2.2, you " + "must provide quantization scales for all tensors using " + "TRT to run."; +#endif #endif } }