From 5801311cb2e3f30a8d1a4ef2dee07dcb8e31d183 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 4 Aug 2021 02:54:23 +0000 Subject: [PATCH] not infer for int8 + normal trained model --- tests/test.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test.sh b/tests/test.sh index c6c0c9b3..7bc7c0fa 100644 --- a/tests/test.sh +++ b/tests/test.sh @@ -185,6 +185,9 @@ function func_inference(){ elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then for use_trt in ${use_trt_list[*]}; do for precision in ${precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then continue fi @@ -241,7 +244,6 @@ if [ ${MODE} = "infer" ]; then fi #run inference is_quant=${infer_quant_flag[Count]} - echo "is_quant: ${is_quant}" func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} Count=$(($Count + 1)) done -- GitLab