diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 03f5a751511adba7b508db9944c30d17866bad2d..22be877493272cd393538fd4f04184e77d38e2db 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -186,6 +186,14 @@ void TensorRTEngine::FreezeNetwork() { Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true)); } infer_builder_config_->addOptimizationProfile(optim_profile_); + infer_builder_config_->setMaxWorkspaceSize(max_workspace_); + if (enable_int8) { + // Due to a bug of TRT, we must set precision BuilderFlag to kFP16 before + // kINT8 here to perform INT8 inference. + infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16); + infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kINT8); + infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kSTRICT_TYPES); + } if (WithFp16()) { infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16); if (disable_trt_plugin_fp16()) { diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index f5d22b982de2b41474d97565a44dedc67c8a85d7..e8cbb9431cb3c79ed6c6269f96c256fd50afb121 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -51,6 +51,7 @@ struct SimpleOpTypeSetTeller : public Teller { "relu", "depthwise_conv2d", "softmax", + "sigmoid", "batch_norm", "elementwise_add", "leaky_relu", diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 2bd30bc05179e2881c4ecb321d76d5506233cc0e..07af5c152b1cd42d1034ed9c5a1d8d8bc3782827 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -431,9 +431,9 @@ if(WITH_GPU AND TENSORRT_FOUND) EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) - set(TRT_MODEL_QUANT_RESNET_DIR "${INFERENCE_DEMO_INSTALL_DIR}/quant_small_model") + set(TRT_MODEL_QUANT_RESNET_DIR "${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model") if (NOT EXISTS ${TRT_MODEL_QUANT_RESNET_DIR}) - inference_download_and_uncompress(${INFERENCE_DEMO_INSTALL_DIR} ${INFERENCE_URL}/tensorrt_test "quant_small_model.tar.gz") + inference_download_and_uncompress(${INFERENCE_DEMO_INSTALL_DIR} ${INFERENCE_URL}/tensorrt_test "small_quant_model.tgz") endif() inference_analysis_test(trt_quant_int8_test SRCS trt_quant_int8_test.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} diff --git a/paddle/fluid/inference/tests/api/trt_quant_int8_test.cc b/paddle/fluid/inference/tests/api/trt_quant_int8_test.cc index ca5cdbbcb26c81d028d87a7ec186c73d6e2b7fde..6adf3cf743b0e3c303b28c1e4cdc3b4be376bf32 100644 --- a/paddle/fluid/inference/tests/api/trt_quant_int8_test.cc +++ b/paddle/fluid/inference/tests/api/trt_quant_int8_test.cc @@ -25,12 +25,20 @@ namespace inference { TEST(quant_int8, resnet50) { std::string model_dir = FLAGS_infer_model; AnalysisConfig config; - config.EnableUseGpu(100, 0); + config.EnableUseGpu(1000, 0); config.SetModel(model_dir); config.SwitchUseFeedFetchOps(false); config.EnableTensorRtEngine(1 << 30, 1, 1, AnalysisConfig::Precision::kInt8, false, false); + std::map> min_input_shape = { + {"image", {1, 1, 3, 3}}}; + std::map> max_input_shape = { + {"image", {1, 1, 10, 10}}}; + std::map> opt_input_shape = { + {"image", {1, 1, 3, 3}}}; + config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape, + opt_input_shape); auto predictor = CreatePaddlePredictor(config); auto input_names = predictor->GetInputNames(); int channels = 1;