test_inference_cpp.sh 11.2 KB
Newer Older
D
dongshuilong 已提交
1 2 3 4 5 6 7 8 9 10 11 12
#!/bin/bash
source test_tipc/common_func.sh

FILENAME=$1
dataline=$(awk 'NR==1, NR==16{print}'  $FILENAME)

# parser params
IFS=$'\n'
lines=(${dataline})

# parser cpp inference model 
model_name=$(func_parser_value "${lines[1]}")
D
dongshuilong 已提交
13 14 15 16
cpp_infer_type=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir=$(func_parser_value "${lines[3]}")
cpp_det_infer_model_dir=$(func_parser_value "${lines[4]}")
cpp_infer_is_quant=$(func_parser_value "${lines[7]}")
D
dongshuilong 已提交
17
# parser cpp inference 
D
dongshuilong 已提交
18 19 20 21 22 23 24 25 26
inference_cmd=$(func_parser_value "${lines[8]}")
cpp_use_gpu_list=$(func_parser_value "${lines[9]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[10]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[11]}")
cpp_batch_size_list=$(func_parser_value "${lines[12]}")
cpp_use_trt_list=$(func_parser_value "${lines[13]}")
cpp_precision_list=$(func_parser_value "${lines[14]}")
cpp_image_dir_value=$(func_parser_value "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[16]}")
D
dongshuilong 已提交
27 28 29 30

LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp.log"
D
dongshuilong 已提交
31
generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py"
D
dongshuilong 已提交
32

D
dongshuilong 已提交
33
function func_shitu_cpp_inference(){
D
dongshuilong 已提交
34 35 36 37 38 39 40
    IFS='|'
    _script=$1
    _model_dir=$2
    _log_path=$3
    _img_dir=$4
    _flag_quant=$5
    # inference 
D
dongshuilong 已提交
41

D
dongshuilong 已提交
42 43 44 45 46 47 48 49 50 51 52 53
    for use_gpu in ${cpp_use_gpu_list[*]}; do
        if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
            for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
                if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                    continue
                fi
                for threads in ${cpp_cpu_threads_list[*]}; do
                    for batch_size in ${cpp_batch_size_list[*]}; do
                        precision="fp32"
                        if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                            precison="int8"
                        fi
D
dongshuilong 已提交
54 55 56 57 58 59
                        _save_log_path="${_log_path}/shitu_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"

			command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir}"
			eval $command
			command="${_script} 2>&1|tee ${_save_log_path}"
			eval $command
D
dongshuilong 已提交
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${command}" "${status_log}"
                    done
                done
            done
        elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
            for use_trt in ${cpp_use_trt_list[*]}; do
                for precision in ${cpp_precision_list[*]}; do
                    if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
                        continue
                    fi 
                    if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
                        continue
                    fi
                    if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
                        continue
                    fi
                    for batch_size in ${cpp_batch_size_list[*]}; do
D
dongshuilong 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
                        _save_log_path="${_log_path}/shitu_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
			command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir}"
                        eval $command
			command="${_script} 2>&1|tee ${_save_log_path}"
			eval $command
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${_script}" "${status_log}"
                    done
                done
            done
        else
            echo "Does not support hardware other than CPU and GPU Currently!"
        fi
    done
}

function func_cls_cpp_inference(){
    IFS='|'
    _script=$1
    _model_dir=$2
    _log_path=$3
    _img_dir=$4
    _flag_quant=$5
    # inference 

    for use_gpu in ${cpp_use_gpu_list[*]}; do
        if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
            for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
                if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                    continue
                fi
                for threads in ${cpp_cpu_threads_list[*]}; do
                    for batch_size in ${cpp_batch_size_list[*]}; do
                        precision="fp32"
                        if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
                            precison="int8"
                        fi
                        _save_log_path="${_log_path}/cls_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"

			command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir}"
			eval $command
			command1="${_script} 2>&1|tee ${_save_log_path}"
			eval ${command1}
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${command1}" "${status_log}"
                    done
                done
            done
        elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
            for use_trt in ${cpp_use_trt_list[*]}; do
                for precision in ${cpp_precision_list[*]}; do
                    if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
                        continue
                    fi 
                    if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
                        continue
                    fi
                    if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
                        continue
                    fi
                    for batch_size in ${cpp_batch_size_list[*]}; do
                        _save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
			command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir}"
D
dongshuilong 已提交
141
                        eval $command
D
dongshuilong 已提交
142 143
			command="${_script} 2>&1|tee ${_save_log_path}"
			eval $command
D
dongshuilong 已提交
144 145 146 147 148 149 150 151 152 153 154
                        last_status=${PIPESTATUS[0]}
                        status_check $last_status "${command}" "${status_log}"
                    done
                done
            done
        else
            echo "Does not support hardware other than CPU and GPU Currently!"
        fi
    done
}

D
dongshuilong 已提交
155 156 157 158 159 160 161 162
if [[ $cpp_infer_type == "cls" ]]; then
   cd deploy/cpp
elif [[ $cpp_infer_type == "shitu" ]]; then
   cd deploy/cpp_shitu
else
   echo "Only support cls and shitu"
   exit 0
fi
D
dongshuilong 已提交
163

D
dongshuilong 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
if [[ $cpp_infer_type == "shitu" ]]; then
    echo "################### update cmake ###################"
    wget -nc  https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0.tar.gz
    tar xf cmake-3.22.0.tar.gz
    cd ./cmake-3.22.0
    export root_path=$PWD
    export install_path=${root_path}/cmake
    eval "./bootstrap --prefix=${install_path}"
    make -j
    make install
    export PATH=${install_path}/bin:$PATH
    cd ..
    echo "################### update cmake done ###################"

    echo "################### build faiss ###################"
    apt-get install -y libopenblas-dev
    git clone https://github.com/facebookresearch/faiss.git
    cd faiss
    export faiss_install_path=$PWD/faiss_install
    eval "cmake -B build . -DFAISS_ENABLE_PYTHON=OFF  -DCMAKE_INSTALL_PREFIX=${faiss_install_path}"
    make -C build -j faiss
    make -C build install
fi

if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
    echo "################### build opencv skipped ###################"
else
    echo "################### build opencv ###################"
    rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
    wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
    tar -xf opencv-3.4.7.tar.gz
D
dongshuilong 已提交
195

D
dongshuilong 已提交
196 197
    cd opencv-3.4.7/
    install_path=$(pwd)/opencv3
D
dongshuilong 已提交
198

D
dongshuilong 已提交
199 200 201
    rm -rf build
    mkdir build
    cd build
D
dongshuilong 已提交
202

D
dongshuilong 已提交
203
    cmake .. \
D
dongshuilong 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
            -DCMAKE_INSTALL_PREFIX=${install_path} \
            -DCMAKE_BUILD_TYPE=Release \
            -DBUILD_SHARED_LIBS=OFF \
            -DWITH_IPP=OFF \
            -DBUILD_IPP_IW=OFF \
            -DWITH_LAPACK=OFF \
            -DWITH_EIGEN=OFF \
            -DCMAKE_INSTALL_LIBDIR=lib64 \
            -DWITH_ZLIB=ON \
            -DBUILD_ZLIB=ON \
            -DWITH_JPEG=ON \
            -DBUILD_JPEG=ON \
            -DWITH_PNG=ON \
            -DBUILD_PNG=ON \
            -DWITH_TIFF=ON \
            -DBUILD_TIFF=ON

D
dongshuilong 已提交
221 222 223 224
    make -j
    make install
    cd ../
    echo "################### build opencv finished ###################"
D
dongshuilong 已提交
225 226 227
fi


D
dongshuilong 已提交
228 229 230 231
echo "################### build PaddleClas demo ####################"
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
LIB_DIR=/work/project/project/test/paddle_inference/
# LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
D
dongshuilong 已提交
232 233 234 235 236 237 238
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)

BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
D
dongshuilong 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
if [[ $cpp_infer_type == cls ]]; then
    cmake .. \
	-DPADDLE_LIB=${LIB_DIR} \
	-DWITH_MKL=ON \
	-DWITH_GPU=ON \
	-DWITH_STATIC_LIB=OFF \
	-DWITH_TENSORRT=OFF \
	-DOPENCV_DIR=${OPENCV_DIR} \
	-DCUDNN_LIB=${CUDNN_LIB_DIR} \
	-DCUDA_LIB=${CUDA_LIB_DIR} \
	-DTENSORRT_DIR=${TENSORRT_DIR}
	echo "---------------------------"
else
    cmake ..\
	-DPADDLE_LIB=${LIB_DIR} \
	-DWITH_MKL=ON \
	-DWITH_GPU=ON \
	-DWITH_STATIC_LIB=OFF \
	-DWITH_TENSORRT=OFF \
	-DOPENCV_DIR=${OPENCV_DIR} \
	-DCUDNN_LIB=${CUDNN_LIB_DIR} \
	-DCUDA_LIB=${CUDA_LIB_DIR} \
	-DTENSORRT_DIR=${TENSORRT_DIR} \
	-DFAISS_DIR=${FAISS_DIR} \
	-DFAISS_WITH_MKL=OFF
fi
D
dongshuilong 已提交
265 266
make -j
cd ../../../
D
dongshuilong 已提交
267 268
# cd ../../
echo "################### build PaddleClas demo finished ###################"
D
dongshuilong 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285


# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
    env=" "
else
    env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env


echo "################### run test ###################"
export Count=0
IFS="|"
infer_quant_flag=(${cpp_infer_is_quant})
D
dongshuilong 已提交
286
for infer_model in ${cpp_infer_model_dir[*]}; do
D
dongshuilong 已提交
287 288
    #run inference
    is_quant=${infer_quant_flag[Count]}
D
dongshuilong 已提交
289 290 291 292 293
    if [[ $cpp_infer_type == "cls" ]]; then
    	func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
    else
    	func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
    fi
D
dongshuilong 已提交
294 295
    Count=$(($Count + 1))
done