未验证 提交 cd53b915 编写于 作者: D Double_V 提交者: GitHub

Merge pull request #3272 from LDOUBLEV/test_ci_v6

set env
...@@ -101,7 +101,7 @@ def main(): ...@@ -101,7 +101,7 @@ def main():
quanter = QAT(config=quant_config) quanter = QAT(config=quant_config)
quanter.quantize(model) quanter.quantize(model)
init_model(config, model, logger) init_model(config, model)
model.eval() model.eval()
# build metric # build metric
......
...@@ -17,7 +17,7 @@ distill_train:null ...@@ -17,7 +17,7 @@ distill_train:null
eval:tools/eval.py -c configs/det/det_mv3_db.yml -o eval:tools/eval.py -c configs/det/det_mv3_db.yml -o
Global.save_inference_dir:./output/ Global.save_inference_dir:./output/
Global.checkpoints: Global.pretrained_model:
norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py fpgm_export:deploy/slim/prune/export_prune_model.py
......
...@@ -101,7 +101,7 @@ function func_inference(){ ...@@ -101,7 +101,7 @@ function func_inference(){
for use_mkldnn in ${use_mkldnn_list[*]}; do for use_mkldnn in ${use_mkldnn_list[*]}; do
for threads in ${cpu_threads_list[*]}; do for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}" _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
eval $command eval $command
status_check $? "${command}" "${status_log}" status_check $? "${command}" "${status_log}"
...@@ -115,7 +115,7 @@ function func_inference(){ ...@@ -115,7 +115,7 @@ function func_inference(){
continue continue
fi fi
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}" _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
eval $command eval $command
status_check $? "${command}" "${status_log}" status_check $? "${command}" "${status_log}"
...@@ -136,6 +136,7 @@ for gpu in ${gpu_list[*]}; do ...@@ -136,6 +136,7 @@ for gpu in ${gpu_list[*]}; do
env="" env=""
elif [ ${#gpu} -le 1 ];then elif [ ${#gpu} -le 1 ];then
env="export CUDA_VISIBLE_DEVICES=${gpu}" env="export CUDA_VISIBLE_DEVICES=${gpu}"
eval ${env}
elif [ ${#gpu} -le 15 ];then elif [ ${#gpu} -le 15 ];then
IFS="," IFS=","
array=(${gpu}) array=(${gpu})
...@@ -215,7 +216,7 @@ for gpu in ${gpu_list[*]}; do ...@@ -215,7 +216,7 @@ for gpu in ${gpu_list[*]}; do
status_check $? "${export_cmd}" "${status_log}" status_check $? "${export_cmd}" "${status_log}"
#run inference #run inference
echo $env eval $env
save_infer_path="${save_log}" save_infer_path="${save_log}"
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}"
done done
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册