infer.sh 8.2 KB
Newer Older
L
LDOUBLEV 已提交
1
#!/bin/bash
L
LDOUBLEV 已提交
2
FILENAME=$1
L
LDOUBLEV 已提交
3 4 5 6 7 8
dataline=$(cat ${FILENAME})
# parser params
IFS=$'\n'
lines=(${dataline})
function func_parser(){
    strs=$1
L
LDOUBLEV 已提交
9
    IFS=": "
L
LDOUBLEV 已提交
10 11 12 13 14 15 16 17 18 19
    array=(${strs})
    tmp=${array[1]}
    echo ${tmp}
}
IFS=$'\n'
# The training params
train_model_list=$(func_parser "${lines[0]}")
slim_trainer_list=$(func_parser "${lines[3]}")
python=$(func_parser "${lines[4]}")
# inference params
L
LDOUBLEV 已提交
20
# inference=$(func_parser "${lines[5]}")
L
LDOUBLEV 已提交
21 22 23 24 25 26 27
devices=$(func_parser "${lines[6]}")
use_mkldnn_list=$(func_parser "${lines[7]}")
cpu_threads_list=$(func_parser "${lines[8]}")
rec_batch_size_list=$(func_parser "${lines[9]}")
gpu_trt_list=$(func_parser "${lines[10]}")
gpu_precision_list=$(func_parser "${lines[11]}")

28 29 30
infer_gpu_id=$(func_parser "${lines[12]}")
log_path=$(func_parser "${lines[13]}")

L
LDOUBLEV 已提交
31

L
LDOUBLEV 已提交
32
function status_check(){
L
LDOUBLEV 已提交
33
    last_status=$1   # the exit code
L
LDOUBLEV 已提交
34 35 36 37 38 39 40 41 42
    run_model=$2
    run_command=$3
    save_log=$4
    if [ $last_status -eq 0 ]; then
        echo -e "\033[33m $run_model successfully with command - ${run_command}!  \033[0m" | tee -a ${save_log}
    else
        echo -e "\033[33m $case failed with command - ${run_command}!  \033[0m" | tee -a ${save_log}
    fi
}
L
LDOUBLEV 已提交
43
IFS='|'
L
LDOUBLEV 已提交
44
for train_model in ${train_model_list[*]}; do 
L
LDOUBLEV 已提交
45
    if [ ${train_model} = "ocr_det" ];then
L
LDOUBLEV 已提交
46 47
        model_name="det"
        yml_file="configs/det/det_mv3_db.yml"
L
LDOUBLEV 已提交
48 49
        # wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar 
        tar xf ./inference/ch_det_data_50.tar 
50
        img_dir="./inference/ch_det_data_50/"
L
LDOUBLEV 已提交
51
    elif [ ${train_model} = "ocr_rec" ];then
L
LDOUBLEV 已提交
52 53
        model_name="rec"
        yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml"
54 55
        wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar && tar xf ./inference/ch_rec_data_200.tar 
        img_dir="./inference/ch_rec_data_200/"
L
LDOUBLEV 已提交
56 57 58 59 60
    fi

    # eval 
    for slim_trainer in ${slim_trainer_list[*]}; do 
        if [ ${slim_trainer} = "norm" ]; then
L
LDOUBLEV 已提交
61
            if [ ${model_name} = "det" ]; then
L
LDOUBLEV 已提交
62 63 64 65 66 67 68
                eval_model_name="ch_ppocr_mobile_v2.0_det_infer"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
            else 
                eval_model_name="ch_ppocr_mobile_v2.0_rec_infer"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar
            fi 
        elif [ ${slim_trainer} = "quant" ]; then
L
LDOUBLEV 已提交
69
            if [ ${model_name} = "det" ]; then
L
LDOUBLEV 已提交
70 71 72 73 74
                eval_model_name="ch_ppocr_mobile_v2.0_det_quant_infer"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar
            else
                eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_infer"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar
L
LDOUBLEV 已提交
75
            fi
L
LDOUBLEV 已提交
76
        elif [ ${slim_trainer} = "distill" ]; then
L
LDOUBLEV 已提交
77
            if [ ${model_name} = "det" ]; then
L
LDOUBLEV 已提交
78 79 80 81 82 83 84
                eval_model_name="ch_ppocr_mobile_v2.0_det_distill_infer"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar
            else
                eval_model_name="ch_ppocr_mobile_v2.0_rec_distill_infer"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar
            fi 
        elif [ ${slim_trainer} = "prune" ]; then
L
LDOUBLEV 已提交
85
            if [ ${model_name} = "det" ]; then
L
LDOUBLEV 已提交
86 87 88 89 90 91 92
                eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar
            else
                eval_model_name="ch_ppocr_mobile_v2.0_rec_prune_train"
                wget -nc  -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar
            fi
        fi
L
LDOUBLEV 已提交
93

L
LDOUBLEV 已提交
94 95 96 97
        save_log_path="${log_path}/${eval_model_name}"
        command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path}"
        ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path}
        status_check $? "${trainer}" "${command}" "${save_log_path}/train.log"
L
LDOUBLEV 已提交
98

L
LDOUBLEV 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
        command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}"
        ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path}
        status_check $? "${trainer}" "${command}" "${save_log_path}/train.log"

        if [ $? -eq 0 ]; then
            echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log
        else
            cat ${save_log}/train.log
            echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${save_log}/train.log
        fi
        if [ "${model_name}" = "det" ]; then 
            export rec_batch_size_list=( "1" )
            inference="tools/infer/predict_det.py"
            det_model_dir=${log_path}/${eval_model_name}_infer
            rec_model_dir=""
        elif [ "${model_name}" = "rec" ]; then
            inference="tools/infer/predict_rec.py"
            rec_model_dir=${log_path}/${eval_model_name}_infer
            det_model_dir=""
        fi
        # inference 
        for device in ${devices[*]}; do 
            if [ ${device} = "cpu" ]; then
                for use_mkldnn in ${use_mkldnn_list[*]}; do
                    for threads in ${cpu_threads_list[*]}; do
                        for rec_batch_size in ${rec_batch_size_list[*]}; do    
                            save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log"
                            command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir}  --image_dir=${img_dir}  --save_log_path=${save_log_path}"
                            ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir}  --image_dir=${img_dir}  --save_log_path=${save_log_path}
                            status_check $? "${trainer}" "${command}" "${save_log_path}"
                        done
L
LDOUBLEV 已提交
130 131
                    done
                done
L
LDOUBLEV 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144
            else 
                env="CUDA_VISIBLE_DEVICES=${infer_gpu_id}"
                for use_trt in ${gpu_trt_list[*]}; do
                    for precision in ${gpu_precision_list[*]}; do
                        if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then
                            continue
                        fi
                        for rec_batch_size in ${rec_batch_size_list[*]}; do
                            save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log"
                            command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt}  --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}"
                            ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt}  --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}
                            status_check $? "${trainer}" "${command}" "${save_log_path}"
                        done
L
LDOUBLEV 已提交
145 146
                    done
                done
L
LDOUBLEV 已提交
147 148
            fi
        done
L
LDOUBLEV 已提交
149 150
    done
done