diff --git a/test_tipc/docs/test_inference_cpp.md b/test_tipc/docs/test_inference_cpp.md index a87141a79a3fa21b054857b70df96d49c0caabea..01847af3b694ecc4dfd99d3c686cb31542448adc 100644 --- a/test_tipc/docs/test_inference_cpp.md +++ b/test_tipc/docs/test_inference_cpp.md @@ -17,7 +17,10 @@ C++预测功能测试的主程序为`test_inference_cpp.sh`,可以测试基于 运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 ``` # 请设置paddle_inference环境变量,如: -export PADDLE_DIR=/path/paddle_inference +export PADDLE_INFER_DIR=/path/to/paddle_inference +# 若不设置paddle_inference环境变量,也可通过指定参数的方式使脚本自动下载paddle_inference.tgz,如: +bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 'https://paddle-inference-lib.bj.bcebos.com/2.3.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz' + # 若未使用docker镜像: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7-gcc82-dev # 请设置TensorRT环境变量,如: export TENSORRT_ROOT=/usr/local/TensorRT6-cuda10.1-cudnn7 @@ -30,8 +33,10 @@ export TENSORRT_ROOT=/usr/local/TensorRT6-cuda10.1-cudnn7 bash test_tipc/prepare.sh ./test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt "cpp_infer" # 用法1: bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt -# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 -bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt '1' +# 用法2: 指定下载paddle_inference.tgz链接,第二个传入参数为下载链接 +bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 'https://paddle-inference-lib.bj.bcebos.com/2.3.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz' +# 用法3: 同时指定下载paddle_inference.tgz链接和指定GPU卡预测,第三个传入参数为GPU卡号 +bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 'https://paddle-inference-lib.bj.bcebos.com/2.3.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz' '1' ``` 运行预测指令后,在`test_tipc/output`文件夹下自动会保存运行日志,包括以下文件: diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 12e44715b5d17ba9cdd463e11412cbb5b8b832de..4a335d0582e3f4c068707e339f17d145678fe417 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -129,11 +129,21 @@ else fi # build program -# TODO: set PADDLE_DIR and TENSORRT_ROOT -if [ -z $PADDLE_DIR ]; then - wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda11.1_cudnn8.1.1_trt7.2.3.4/paddle_inference.tgz --no-check-certificate - tar zxf paddle_inference.tgz - PADDLE_DIR=$(pwd)/paddle_inference +# TODO: set PADDLE_INFER_DIR and TENSORRT_ROOT +if [ -z $PADDLE_INFER_DIR ]; then + Paddle_Infer_Link=$2 + if [ "" = "$Paddle_Infer_Link" ];then + wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz --no-check-certificate + tar zxf paddle_inference.tgz + PADDLE_INFER_DIR=$(pwd)/paddle_inference + else + wget -nc $Paddle_Infer_Link --no-check-certificate + tar zxf paddle_inference.tgz + PADDLE_INFER_DIR=$(pwd)/paddle_inference + if [ ! -d "paddle_inference" ]; then + PADDLE_INFER_DIR=$(pwd)/paddle_inference_install_dir + fi + fi fi if [ -z $TENSORRT_ROOT ]; then TENSORRT_ROOT=/usr/local/TensorRT6-cuda10.1-cudnn7 @@ -148,10 +158,10 @@ mkdir -p build cd ./build cmake .. \ -DWITH_GPU=ON \ - -DWITH_MKL=OFF \ + -DWITH_MKL=ON \ -DWITH_TENSORRT=OFF \ -DPADDLE_LIB_NAME=libpaddle_inference \ - -DPADDLE_DIR=${PADDLE_DIR} \ + -DPADDLE_DIR=${PADDLE_INFER_DIR} \ -DCUDA_LIB=${CUDA_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DTENSORRT_LIB_DIR=${TENSORRT_LIB_DIR} \ @@ -160,13 +170,13 @@ cmake .. \ -DWITH_KEYPOINT=ON \ -DWITH_MOT=ON -make -j4 +make -j8 cd ../../../ echo "################### build finished! ###################" # set cuda device -GPUID=$2 +GPUID=$3 if [ ${#GPUID} -le 0 ];then env=" " else @@ -178,7 +188,6 @@ Count=0 IFS="|" infer_quant_flag=(${cpp_infer_is_quant_list}) for infer_mode in ${cpp_infer_mode_list[*]}; do - # run export case ${infer_mode} in norm) run_export=${norm_export} ;;