test_inference_cpp.sh 11.5 KB
Newer Older
D
dongshuilong 已提交
1 2 3 4
#!/bin/bash
source test_tipc/common_func.sh

FILENAME=$1
5 6 7 8
GPUID=$2
if [[ ! $GPUID ]];then
   GPUID=0
fi
D
dongshuilong 已提交
9 10 11 12 13 14 15 16
dataline=$(awk 'NR==1, NR==16{print}'  $FILENAME)

# parser params
IFS=$'\n'
lines=(${dataline})

# parser cpp inference model 
model_name=$(func_parser_value "${lines[1]}")
D
dongshuilong 已提交
17 18 19 20
cpp_infer_type=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir=$(func_parser_value "${lines[3]}")
cpp_det_infer_model_dir=$(func_parser_value "${lines[4]}")
cpp_infer_is_quant=$(func_parser_value "${lines[7]}")
D
dongshuilong 已提交
21
# parser cpp inference 
D
dongshuilong 已提交
22 23 24 25 26 27 28 29 30
inference_cmd=$(func_parser_value "${lines[8]}")
cpp_use_gpu_list=$(func_parser_value "${lines[9]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[10]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[11]}")
cpp_batch_size_list=$(func_parser_value "${lines[12]}")
cpp_use_trt_list=$(func_parser_value "${lines[13]}")
cpp_precision_list=$(func_parser_value "${lines[14]}")
cpp_image_dir_value=$(func_parser_value "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[16]}")
31 32
generate_yaml_cmd=$(func_parser_value "${lines[17]}")
transform_index_cmd=$(func_parser_value "${lines[18]}")
D
dongshuilong 已提交
33 34 35 36

LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp.log"
37
# generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py"
D
dongshuilong 已提交
38

D
dongshuilong 已提交
39
function func_shitu_cpp_inference(){
D
dongshuilong 已提交
40 41 42 43 44 45 46
    IFS='|'
    _script=$1
    _model_dir=$2
    _log_path=$3
    _img_dir=$4
    _flag_quant=$5
    # inference 
D
dongshuilong 已提交
47

D
dongshuilong 已提交
48 49 50 51 52 53 54 55 56 57 58 59
    for use_gpu in ${cpp_use_gpu_list[*]}; do
        if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
            for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
                if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                    continue
                fi
                for threads in ${cpp_cpu_threads_list[*]}; do
                    for batch_size in ${cpp_batch_size_list[*]}; do
                        precision="fp32"
                        if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                            precison="int8"
                        fi
D
dongshuilong 已提交
60 61
                        _save_log_path="${_log_path}/shitu_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"

62
			command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}"
D
dongshuilong 已提交
63
			eval $command
64
			eval $transform_index_cmd
D
dongshuilong 已提交
65 66
			command="${_script} 2>&1|tee ${_save_log_path}"
			eval $command
D
dongshuilong 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${command}" "${status_log}"
                    done
                done
            done
        elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
            for use_trt in ${cpp_use_trt_list[*]}; do
                for precision in ${cpp_precision_list[*]}; do
                    if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
                        continue
                    fi 
                    if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
                        continue
                    fi
                    if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
                        continue
                    fi
                    for batch_size in ${cpp_batch_size_list[*]}; do
D
dongshuilong 已提交
85
                        _save_log_path="${_log_path}/shitu_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
86
			command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}"
D
dongshuilong 已提交
87
                        eval $command
88
			eval $transform_index_cmd
D
dongshuilong 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
			command="${_script} 2>&1|tee ${_save_log_path}"
			eval $command
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${_script}" "${status_log}"
                    done
                done
            done
        else
            echo "Does not support hardware other than CPU and GPU Currently!"
        fi
    done
}

function func_cls_cpp_inference(){
    IFS='|'
    _script=$1
    _model_dir=$2
    _log_path=$3
    _img_dir=$4
    _flag_quant=$5
    # inference 

    for use_gpu in ${cpp_use_gpu_list[*]}; do
        if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
            for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
                if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                    continue
                fi
                for threads in ${cpp_cpu_threads_list[*]}; do
                    for batch_size in ${cpp_batch_size_list[*]}; do
                        precision="fp32"
                        if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                            precison="int8"
                        fi
                        _save_log_path="${_log_path}/cls_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"

125
			command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}"
D
dongshuilong 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
			eval $command
			command1="${_script} 2>&1|tee ${_save_log_path}"
			eval ${command1}
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${command1}" "${status_log}"
                    done
                done
            done
        elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
            for use_trt in ${cpp_use_trt_list[*]}; do
                for precision in ${cpp_precision_list[*]}; do
                    if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
                        continue
                    fi 
                    if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
                        continue
                    fi
                    if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
                        continue
                    fi
                    for batch_size in ${cpp_batch_size_list[*]}; do
                        _save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
148
			command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}"
D
dongshuilong 已提交
149
                        eval $command
D
dongshuilong 已提交
150 151
			command="${_script} 2>&1|tee ${_save_log_path}"
			eval $command
D
dongshuilong 已提交
152 153 154 155 156 157 158 159 160 161 162
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${command}" "${status_log}"
                    done
                done
            done
        else
            echo "Does not support hardware other than CPU and GPU Currently!"
        fi
    done
}

163

D
dongshuilong 已提交
164 165 166 167 168 169 170 171
if [[ $cpp_infer_type == "cls" ]]; then
   cd deploy/cpp
elif [[ $cpp_infer_type == "shitu" ]]; then
   cd deploy/cpp_shitu
else
   echo "Only support cls and shitu"
   exit 0
fi
D
dongshuilong 已提交
172

D
dongshuilong 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
if [[ $cpp_infer_type == "shitu" ]]; then
    echo "################### update cmake ###################"
    wget -nc  https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0.tar.gz
    tar xf cmake-3.22.0.tar.gz
    cd ./cmake-3.22.0
    export root_path=$PWD
    export install_path=${root_path}/cmake
    eval "./bootstrap --prefix=${install_path}"
    make -j
    make install
    export PATH=${install_path}/bin:$PATH
    cd ..
    echo "################### update cmake done ###################"

    echo "################### build faiss ###################"
    apt-get install -y libopenblas-dev
    git clone https://github.com/facebookresearch/faiss.git
    cd faiss
    export faiss_install_path=$PWD/faiss_install
    eval "cmake -B build . -DFAISS_ENABLE_PYTHON=OFF  -DCMAKE_INSTALL_PREFIX=${faiss_install_path}"
    make -C build -j faiss
    make -C build install
195
    cd ..
D
dongshuilong 已提交
196 197 198 199 200 201 202 203 204
fi

if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
    echo "################### build opencv skipped ###################"
else
    echo "################### build opencv ###################"
    rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
    wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
    tar -xf opencv-3.4.7.tar.gz
D
dongshuilong 已提交
205

D
dongshuilong 已提交
206 207
    cd opencv-3.4.7/
    install_path=$(pwd)/opencv3
D
dongshuilong 已提交
208

D
dongshuilong 已提交
209 210 211
    rm -rf build
    mkdir build
    cd build
D
dongshuilong 已提交
212

D
dongshuilong 已提交
213
    cmake .. \
D
dongshuilong 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
            -DCMAKE_INSTALL_PREFIX=${install_path} \
            -DCMAKE_BUILD_TYPE=Release \
            -DBUILD_SHARED_LIBS=OFF \
            -DWITH_IPP=OFF \
            -DBUILD_IPP_IW=OFF \
            -DWITH_LAPACK=OFF \
            -DWITH_EIGEN=OFF \
            -DCMAKE_INSTALL_LIBDIR=lib64 \
            -DWITH_ZLIB=ON \
            -DBUILD_ZLIB=ON \
            -DWITH_JPEG=ON \
            -DBUILD_JPEG=ON \
            -DWITH_PNG=ON \
            -DBUILD_PNG=ON \
            -DWITH_TIFF=ON \
            -DBUILD_TIFF=ON

D
dongshuilong 已提交
231 232
    make -j
    make install
233
    cd ../../
D
dongshuilong 已提交
234
    echo "################### build opencv finished ###################"
D
dongshuilong 已提交
235 236
fi

D
dongshuilong 已提交
237 238
echo "################### build PaddleClas demo ####################"
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
239 240
# LIB_DIR=/work/project/project/test/paddle_inference/
LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
D
dongshuilong 已提交
241 242 243 244 245 246 247
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)

BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
D
dongshuilong 已提交
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
if [[ $cpp_infer_type == cls ]]; then
    cmake .. \
	-DPADDLE_LIB=${LIB_DIR} \
	-DWITH_MKL=ON \
	-DWITH_GPU=ON \
	-DWITH_STATIC_LIB=OFF \
	-DWITH_TENSORRT=OFF \
	-DOPENCV_DIR=${OPENCV_DIR} \
	-DCUDNN_LIB=${CUDNN_LIB_DIR} \
	-DCUDA_LIB=${CUDA_LIB_DIR} \
	-DTENSORRT_DIR=${TENSORRT_DIR}
else
    cmake ..\
	-DPADDLE_LIB=${LIB_DIR} \
	-DWITH_MKL=ON \
	-DWITH_GPU=ON \
	-DWITH_STATIC_LIB=OFF \
	-DWITH_TENSORRT=OFF \
	-DOPENCV_DIR=${OPENCV_DIR} \
	-DCUDNN_LIB=${CUDNN_LIB_DIR} \
	-DCUDA_LIB=${CUDA_LIB_DIR} \
	-DTENSORRT_DIR=${TENSORRT_DIR} \
270
	-DFAISS_DIR=${faiss_install_path} \
D
dongshuilong 已提交
271 272
	-DFAISS_WITH_MKL=OFF
fi
D
dongshuilong 已提交
273 274
make -j
cd ../../../
D
dongshuilong 已提交
275 276
# cd ../../
echo "################### build PaddleClas demo finished ###################"
D
dongshuilong 已提交
277 278 279


# set cuda device
280 281 282 283 284 285 286 287
# GPUID=$2
# if [ ${#GPUID} -le 0 ];then
#     env="export CUDA_VISIBLE_DEVICES=0"
# else
#     env="export CUDA_VISIBLE_DEVICES=${GPUID}"
# fi
# set CUDA_VISIBLE_DEVICES
# eval $env
D
dongshuilong 已提交
288 289 290 291 292 293


echo "################### run test ###################"
export Count=0
IFS="|"
infer_quant_flag=(${cpp_infer_is_quant})
D
dongshuilong 已提交
294
for infer_model in ${cpp_infer_model_dir[*]}; do
D
dongshuilong 已提交
295 296
    #run inference
    is_quant=${infer_quant_flag[Count]}
D
dongshuilong 已提交
297
    if [[ $cpp_infer_type == "cls" ]]; then
298
	func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
D
dongshuilong 已提交
299
    else
300
	func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
D
dongshuilong 已提交
301
    fi
D
dongshuilong 已提交
302 303
    Count=$(($Count + 1))
done