diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index 87d2f333c1923b088d32e5fb420ae7a5021d6b08..d94e53034a2bf67b364e6d91f83acfb9e5445b8a 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -101,7 +101,7 @@ def main(): quanter = QAT(config=quant_config) quanter.quantize(model) - init_model(config, model, logger) + init_model(config, model) model.eval() # build metric diff --git a/test/ocr_det_params.txt b/test/ocr_det_params.txt index da7e034bdd1ef1f593020d2dd5e809b472651b54..bdfd4d4f47431bca97437963e1dc56d1b57838bb 100644 --- a/test/ocr_det_params.txt +++ b/test/ocr_det_params.txt @@ -17,7 +17,7 @@ distill_train:null eval:tools/eval.py -c configs/det/det_mv3_db.yml -o Global.save_inference_dir:./output/ -Global.checkpoints: +Global.pretrained_model: norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o fpgm_export:deploy/slim/prune/export_prune_model.py diff --git a/test/test.sh b/test/test.sh index a75aed426988755e395f27520fc76de10b4676d1..1cbb36012b5be45ad9ecf718df4df4d6f165c76f 100644 --- a/test/test.sh +++ b/test/test.sh @@ -101,7 +101,7 @@ function func_inference(){ for use_mkldnn in ${use_mkldnn_list[*]}; do for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}" + _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" eval $command status_check $? "${command}" "${status_log}" @@ -115,7 +115,7 @@ function func_inference(){ continue fi for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}" + _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" eval $command status_check $? "${command}" "${status_log}" @@ -136,6 +136,7 @@ for gpu in ${gpu_list[*]}; do env="" elif [ ${#gpu} -le 1 ];then env="export CUDA_VISIBLE_DEVICES=${gpu}" + eval ${env} elif [ ${#gpu} -le 15 ];then IFS="," array=(${gpu}) @@ -215,7 +216,7 @@ for gpu in ${gpu_list[*]}; do status_check $? "${export_cmd}" "${status_log}" #run inference - echo $env + eval $env save_infer_path="${save_log}" func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" done