未验证 提交 6d1b8c52 编写于 作者: S Sing_chan 提交者: GitHub

fix bug when build inference lib without tensorrt (#38156)

上级 eaa2363e
......@@ -19,8 +19,9 @@ PADDLE_ROOT=$1
TURN_ON_MKL=$2 # use MKL or Openblas
TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
DATA_DIR=$4 # dataset
TENSORRT_ROOT_DIR=$5 # TensorRT root dir, default to /usr
MSVC_STATIC_CRT=$6
USE_TENSORRT=$5
TENSORRT_ROOT_DIR=$6 # TensorRT root dir, default to /usr
MSVC_STATIC_CRT=$7
inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir
WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform
......@@ -37,11 +38,6 @@ else
use_gpu_list='false'
fi
USE_TENSORRT=OFF
if [ -d "$TENSORRT_ROOT_DIR" ]; then
USE_TENSORRT=ON
fi
PREFIX=inference-vis-demos%2F
URL_ROOT=http://paddlemodels.bj.bcebos.com/${PREFIX}
......
......@@ -742,7 +742,7 @@ for /F %%i in ("%libsize%") do (
)
cd /d %work_dir%\paddle\fluid\inference\api\demo_ci
%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %TENSORRT_ROOT% %MSVC_STATIC_CRT%
%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %WITH_TENSORRT% %TENSORRT_ROOT% %MSVC_STATIC_CRT%
goto:eof
:test_inference_error
......
......@@ -2244,7 +2244,7 @@ EOF
demo_ci_startTime_s=`date +%s`
cd ${PADDLE_ROOT}/paddle/fluid/inference/api/demo_ci
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ${INFERENCE_DEMO_INSTALL_DIR} \
${TENSORRT_ROOT_DIR:-/usr}
${WITH_TENSORRT:-ON} ${TENSORRT_ROOT_DIR:-/usr}
DEMO_EXIT_CODE=$?
./clean.sh
demo_ci_endTime_s=`date +%s`
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册