From 83d6a5ad2c79c6551ab4c5c0512a5d72a1e1d163 Mon Sep 17 00:00:00 2001 From: Sing_chan <51314274+betterpig@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:38:59 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90cherry-pick=202.3=E3=80=91modify=20inf?= =?UTF-8?q?erence=20model=20test=20build=20method=20to=20support=20multi?= =?UTF-8?q?=20version=20(#41503)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * change inference demo_test build method to ninja to choose visual studio version automaticly * notest;test=windows_ci_inference * set cuda of demo_ci by arg,fix bug of ninja compile,test=document_fix;test=windows_ci;test=windows_ci_inference * fix bug;test=document_fix;test=windows_ci;test=windows_ci_inference * fix bug;test=document_fix;test=windows_ci_inference" * set lib_path according to generator --- .../inference/api/demo_ci/CMakeLists.txt | 30 ++++++++++------- paddle/fluid/inference/api/demo_ci/run.sh | 33 +++++++++++-------- .../inference/tests/infer_ut/CMakeLists.txt | 33 ++++++++++++------- .../infer_ut/external-cmake/gtest-cpp.cmake | 6 ++-- paddle/fluid/inference/tests/infer_ut/run.sh | 14 ++++---- paddle/scripts/paddle_build.bat | 7 ++-- 6 files changed, 73 insertions(+), 50 deletions(-) diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt index df98a7b05cf..c02fcd07813 100644 --- a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -84,13 +84,15 @@ if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() - if(NOT DEFINED CUDA_LIB) + set(CUDA_LIB "" CACHE STRING "CUDA_LIB") + if("${TENSORRT_ROOT}" STREQUAL "") if(DEFINED ENV{CUDA_PATH}) set(CUDA_LIB "$ENV{CUDA_PATH}\\lib\\x64") else() set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64") endif() endif() + message(STATUS "Current CUDA lib path: ${CUDA_LIB}") endif(NOT WIN32) endif() @@ -208,41 +210,47 @@ endif() add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) target_link_libraries(${DEMO_NAME} ${DEPS}) if(WIN32) + if("${CMAKE_GENERATOR}" MATCHES "Ninja") + set(LIB_PATH ${CMAKE_BINARY_DIR}) + else() + set(LIB_PATH ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) + endif() + if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} ) if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) + ${LIB_PATH}) endif() endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release - COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release - COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release + COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${LIB_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${LIB_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${LIB_PATH} ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release + COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${LIB_PATH} ) endif() if(WITH_ONNXRUNTIME) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.dll - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib/paddle2onnx.dll - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${LIB_PATH} ) endif() endif() diff --git a/paddle/fluid/inference/api/demo_ci/run.sh b/paddle/fluid/inference/api/demo_ci/run.sh index 2c0945cd5b3..290c547c986 100755 --- a/paddle/fluid/inference/api/demo_ci/run.sh +++ b/paddle/fluid/inference/api/demo_ci/run.sh @@ -23,6 +23,7 @@ USE_TENSORRT=$5 TENSORRT_ROOT_DIR=$6 # TensorRT root dir, default to /usr WITH_ONNXRUNTIME=$7 MSVC_STATIC_CRT=$8 +CUDA_LIB=$9/lib/x64 inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform @@ -112,16 +113,18 @@ for WITH_STATIC_LIB in ON OFF; do continue fi # -----simple_on_word2vec on windows----- - cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ + cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \ -DWITH_MKL=$TURN_ON_MKL \ -DDEMO_NAME=simple_on_word2vec \ -DWITH_GPU=$TEST_GPU_CPU \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB \ -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \ - -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME - msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln + -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \ + -DCMAKE_BUILD_TYPE=Release \ + -DCUDA_LIB="$CUDA_LIB" + ninja for use_gpu in $use_gpu_list; do - Release/simple_on_word2vec.exe \ + ./simple_on_word2vec.exe \ --dirname=$DATA_DIR/word2vec/word2vec.inference.model \ --use_gpu=$use_gpu if [ $? -ne 0 ]; then @@ -132,17 +135,19 @@ for WITH_STATIC_LIB in ON OFF; do # -----vis_demo on windows----- rm -rf * - cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ + cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \ -DWITH_MKL=$TURN_ON_MKL \ -DDEMO_NAME=vis_demo \ -DWITH_GPU=$TEST_GPU_CPU \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB \ -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \ - -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME - msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln + -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \ + -DCMAKE_BUILD_TYPE=Release \ + -DCUDA_LIB="$CUDA_LIB" + ninja for use_gpu in $use_gpu_list; do for vis_demo_name in $vis_demo_list; do - Release/vis_demo.exe \ + ./vis_demo.exe \ --modeldir=$DATA_DIR/$vis_demo_name/model \ --data=$DATA_DIR/$vis_demo_name/data.txt \ --refer=$DATA_DIR/$vis_demo_name/result.txt \ @@ -153,11 +158,11 @@ for WITH_STATIC_LIB in ON OFF; do fi done done - + # --------tensorrt mobilenet on windows------ if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then rm -rf * - cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ + cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \ -DWITH_MKL=$TURN_ON_MKL \ -DDEMO_NAME=trt_mobilenet_demo \ -DWITH_GPU=$TEST_GPU_CPU \ @@ -165,9 +170,11 @@ for WITH_STATIC_LIB in ON OFF; do -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \ -DUSE_TENSORRT=$USE_TENSORRT \ -DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \ - -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME - msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln - Release/trt_mobilenet_demo.exe \ + -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \ + -DCMAKE_BUILD_TYPE=Release \ + -DCUDA_LIB="$CUDA_LIB" + ninja + ./trt_mobilenet_demo.exe \ --modeldir=$DATA_DIR/mobilenet/model \ --data=$DATA_DIR/mobilenet/data.txt \ --refer=$DATA_DIR/mobilenet/result.txt diff --git a/paddle/fluid/inference/tests/infer_ut/CMakeLists.txt b/paddle/fluid/inference/tests/infer_ut/CMakeLists.txt index f376cbd4fb3..5c17e2d62d3 100644 --- a/paddle/fluid/inference/tests/infer_ut/CMakeLists.txt +++ b/paddle/fluid/inference/tests/infer_ut/CMakeLists.txt @@ -83,13 +83,15 @@ if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() - if(NOT DEFINED CUDA_LIB) + set(CUDA_LIB "" CACHE STRING "CUDA_LIB") + if("${TENSORRT_ROOT}" STREQUAL "") if(DEFINED ENV{CUDA_PATH}) set(CUDA_LIB "$ENV{CUDA_PATH}\\lib\\x64") else() set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64") endif() endif() + message(STATUS "Current CUDA lib path: ${CUDA_LIB}") endif(NOT WIN32) endif() @@ -236,47 +238,54 @@ if(WITH_GTEST) include_directories(${GTEST_INSTALL_DIR}/include) add_dependencies(${DEMO_NAME} thirdparty_gtest) IF(WIN32) - target_link_libraries(${DEMO_NAME} ${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest${CMAKE_STATIC_LIBRARY_SUFFIX}) + target_link_libraries(${DEMO_NAME} ${GTEST_LIBRARIES}) ELSE() target_link_libraries(${DEMO_NAME} ${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest${CMAKE_STATIC_LIBRARY_SUFFIX}) ENDIF(WIN32) endif() if(WIN32) + if("${CMAKE_GENERATOR}" MATCHES "Ninja") + set(LIB_PATH ${CMAKE_BINARY_DIR}) + else() + set(LIB_PATH ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) + endif() + if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} ) if(${TENSORRT_MAJOR_VERSION} EQUAL 7) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) + ${LIB_PATH}) endif() endif() if(WITH_MKL) + message("LIB_PATH IS ${LIB_PATH}") add_custom_command(TARGET ${DEMO_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release - COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release - COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release + COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${LIB_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${LIB_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${LIB_PATH} ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release + COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${LIB_PATH} ) endif() if(WITH_ONNXRUNTIME) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.dll - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib/paddle2onnx.dll - ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + ${LIB_PATH} ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${LIB_PATH} ) endif() endif() diff --git a/paddle/fluid/inference/tests/infer_ut/external-cmake/gtest-cpp.cmake b/paddle/fluid/inference/tests/infer_ut/external-cmake/gtest-cpp.cmake index 3e83a21e386..b38984314ec 100644 --- a/paddle/fluid/inference/tests/infer_ut/external-cmake/gtest-cpp.cmake +++ b/paddle/fluid/inference/tests/infer_ut/external-cmake/gtest-cpp.cmake @@ -8,10 +8,12 @@ set(GTEST_REPOSITORY https://github.com/google/googletest.git) set(GTEST_TAG release-1.8.1) INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) IF(WIN32) + # if use CMAKE_INSTALL_LIBDIR, the path of lib actually is install/gtest/lib/gtest.lib but GTEST_LIBRARIES + # is install/gtest/gtest.lib set(GTEST_LIBRARIES - "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE) + "${GTEST_INSTALL_DIR}/lib/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE) set(GTEST_MAIN_LIBRARIES - "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE) + "${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE) ELSE() set(GTEST_LIBRARIES "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE) diff --git a/paddle/fluid/inference/tests/infer_ut/run.sh b/paddle/fluid/inference/tests/infer_ut/run.sh index 8123d378500..331608a2cbc 100755 --- a/paddle/fluid/inference/tests/infer_ut/run.sh +++ b/paddle/fluid/inference/tests/infer_ut/run.sh @@ -22,6 +22,7 @@ DATA_DIR=$4 # dataset TENSORRT_ROOT_DIR=$5 # TensorRT ROOT dir, default to /usr/local/TensorRT WITH_ONNXRUNTIME=$6 MSVC_STATIC_CRT=$7 +CUDA_LIB=$8/lib/x64 inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir EXIT_CODE=0 # init default exit code WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform @@ -135,7 +136,7 @@ function compile_test() { cd ${build_dir} TEST_NAME=$1 if [ $WIN_DETECT != "" ]; then - cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ + cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \ -DWITH_MKL=$TURN_ON_MKL \ -DDEMO_NAME=${TEST_NAME} \ -DWITH_GPU=$TEST_GPU_CPU \ @@ -146,8 +147,9 @@ function compile_test() { -DWITH_GTEST=ON \ -DCMAKE_CXX_FLAGS='/std:c++17' \ -DCMAKE_BUILD_TYPE=Release \ - -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME - msbuild /maxcpucount /property:Configuration=Release ALL_BUILD.vcxproj + -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \ + -DCUDA_LIB="$CUDA_LIB" + ninja else cmake .. -DPADDLE_LIB=${inference_install_dir} \ -DWITH_MKL=$TURN_ON_MKL \ @@ -171,11 +173,7 @@ mkdir -p ${log_dir} cd ${build_dir} rm -rf * -if [ $WIN_DETECT != "" ]; then - exe_dir=${build_dir}/Release -else - exe_dir=${build_dir} -fi; +exe_dir=${build_dir} printf "${YELLOW} start test_resnet50 ${NC} \n"; compile_test "test_resnet50" diff --git a/paddle/scripts/paddle_build.bat b/paddle/scripts/paddle_build.bat index f9ab3f606bf..cc55ea82df6 100644 --- a/paddle/scripts/paddle_build.bat +++ b/paddle/scripts/paddle_build.bat @@ -255,7 +255,6 @@ set MSVC_STATIC_CRT=ON set ON_INFER=ON set WITH_TENSORRT=ON set WITH_INFERENCE_API_TEST=ON -set vcvars64_dir="D:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat" call :cmake || goto cmake_error call :build || goto build_error @@ -711,7 +710,7 @@ echo cmake .. -G %GENERATOR% -DCMAKE_BUILD_TYPE=Release -DWITH_AVX=%WITH_AVX% -D -DWITH_INFERENCE_API_TEST=%WITH_INFERENCE_API_TEST% -DTHIRD_PARTY_PATH=%THIRD_PARTY_PATH% ^ -DINFERENCE_DEMO_INSTALL_DIR=%INFERENCE_DEMO_INSTALL_DIR% -DWITH_STATIC_LIB=%WITH_STATIC_LIB% ^ -DWITH_TENSORRT=%WITH_TENSORRT% -DTENSORRT_ROOT="%TENSORRT_ROOT%" -DMSVC_STATIC_CRT=%MSVC_STATIC_CRT% ^ --DWITH_UNITY_BUILD=%WITH_UNITY_BUILD% -DCUDA_ARCH_NAME=%CUDA_ARCH_NAME% ^ +-DWITH_UNITY_BUILD=%WITH_UNITY_BUILD% -DCUDA_ARCH_NAME=%CUDA_ARCH_NAME% -DCUB_PATH=%THIRD_PARTY_HOME%/cub ^ -DCUDA_TOOLKIT_ROOT_DIR="%CUDA_TOOLKIT_ROOT_DIR%" -DNEW_RELEASE_ALL=%NEW_RELEASE_ALL% -DNEW_RELEASE_PYPI=%NEW_RELEASE_PYPI% ^ -DNEW_RELEASE_JIT=%NEW_RELEASE_JIT% >> %work_dir%\win_cmake.sh @@ -753,7 +752,7 @@ for /F %%i in ("%libsize%") do ( ) cd /d %work_dir%\paddle\fluid\inference\api\demo_ci -%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %WITH_TENSORRT% %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% +%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %WITH_TENSORRT% %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% "%CUDA_TOOLKIT_ROOT_DIR%" goto:eof :test_inference_error @@ -784,7 +783,7 @@ echo Step 7. Testing fluid library with infer_ut for inference ... echo ======================================== cd /d %work_dir%\paddle\fluid\inference\tests\infer_ut -%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% +%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% "%CUDA_TOOLKIT_ROOT_DIR%" goto:eof :test_inference_ut_error -- GitLab