未验证 提交 83d6a5ad 编写于 作者: S Sing_chan 提交者: GitHub

【cherry-pick 2.3】modify inference model test build method to support multi version (#41503)

* change inference demo_test build method to ninja to choose visual studio version automaticly

* notest;test=windows_ci_inference

* set cuda of demo_ci by arg,fix bug of ninja compile,test=document_fix;test=windows_ci;test=windows_ci_inference

* fix bug;test=document_fix;test=windows_ci;test=windows_ci_inference

* fix bug;test=document_fix;test=windows_ci_inference"

* set lib_path according to generator
上级 8525bc63
...@@ -84,13 +84,15 @@ if(WITH_GPU) ...@@ -84,13 +84,15 @@ if(WITH_GPU)
if(NOT WIN32) if(NOT WIN32)
set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
else() else()
if(NOT DEFINED CUDA_LIB) set(CUDA_LIB "" CACHE STRING "CUDA_LIB")
if("${TENSORRT_ROOT}" STREQUAL "")
if(DEFINED ENV{CUDA_PATH}) if(DEFINED ENV{CUDA_PATH})
set(CUDA_LIB "$ENV{CUDA_PATH}\\lib\\x64") set(CUDA_LIB "$ENV{CUDA_PATH}\\lib\\x64")
else() else()
set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64")
endif() endif()
endif() endif()
message(STATUS "Current CUDA lib path: ${CUDA_LIB}")
endif(NOT WIN32) endif(NOT WIN32)
endif() endif()
...@@ -208,41 +210,47 @@ endif() ...@@ -208,41 +210,47 @@ endif()
add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
target_link_libraries(${DEMO_NAME} ${DEPS}) target_link_libraries(${DEMO_NAME} ${DEPS})
if(WIN32) if(WIN32)
if("${CMAKE_GENERATOR}" MATCHES "Ninja")
set(LIB_PATH ${CMAKE_BINARY_DIR})
else()
set(LIB_PATH ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})
endif()
if(USE_TENSORRT) if(USE_TENSORRT)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
) )
if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7) if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) ${LIB_PATH})
endif() endif()
endif() endif()
if(WITH_MKL) if(WITH_MKL)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${LIB_PATH}
) )
else() else()
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${LIB_PATH}
) )
endif() endif()
if(WITH_ONNXRUNTIME) if(WITH_ONNXRUNTIME)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.dll COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.dll
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib/paddle2onnx.dll COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib/paddle2onnx.dll
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
) )
endif() endif()
if(NOT WITH_STATIC_LIB) if(NOT WITH_STATIC_LIB)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${LIB_PATH}
) )
endif() endif()
endif() endif()
...@@ -23,6 +23,7 @@ USE_TENSORRT=$5 ...@@ -23,6 +23,7 @@ USE_TENSORRT=$5
TENSORRT_ROOT_DIR=$6 # TensorRT root dir, default to /usr TENSORRT_ROOT_DIR=$6 # TensorRT root dir, default to /usr
WITH_ONNXRUNTIME=$7 WITH_ONNXRUNTIME=$7
MSVC_STATIC_CRT=$8 MSVC_STATIC_CRT=$8
CUDA_LIB=$9/lib/x64
inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir
WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform
...@@ -112,16 +113,18 @@ for WITH_STATIC_LIB in ON OFF; do ...@@ -112,16 +113,18 @@ for WITH_STATIC_LIB in ON OFF; do
continue continue
fi fi
# -----simple_on_word2vec on windows----- # -----simple_on_word2vec on windows-----
cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \ -DWITH_MKL=$TURN_ON_MKL \
-DDEMO_NAME=simple_on_word2vec \ -DDEMO_NAME=simple_on_word2vec \
-DWITH_GPU=$TEST_GPU_CPU \ -DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
-DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \ -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \
-DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \
msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln -DCMAKE_BUILD_TYPE=Release \
-DCUDA_LIB="$CUDA_LIB"
ninja
for use_gpu in $use_gpu_list; do for use_gpu in $use_gpu_list; do
Release/simple_on_word2vec.exe \ ./simple_on_word2vec.exe \
--dirname=$DATA_DIR/word2vec/word2vec.inference.model \ --dirname=$DATA_DIR/word2vec/word2vec.inference.model \
--use_gpu=$use_gpu --use_gpu=$use_gpu
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
...@@ -132,17 +135,19 @@ for WITH_STATIC_LIB in ON OFF; do ...@@ -132,17 +135,19 @@ for WITH_STATIC_LIB in ON OFF; do
# -----vis_demo on windows----- # -----vis_demo on windows-----
rm -rf * rm -rf *
cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \ -DWITH_MKL=$TURN_ON_MKL \
-DDEMO_NAME=vis_demo \ -DDEMO_NAME=vis_demo \
-DWITH_GPU=$TEST_GPU_CPU \ -DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
-DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \ -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \
-DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \
msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln -DCMAKE_BUILD_TYPE=Release \
-DCUDA_LIB="$CUDA_LIB"
ninja
for use_gpu in $use_gpu_list; do for use_gpu in $use_gpu_list; do
for vis_demo_name in $vis_demo_list; do for vis_demo_name in $vis_demo_list; do
Release/vis_demo.exe \ ./vis_demo.exe \
--modeldir=$DATA_DIR/$vis_demo_name/model \ --modeldir=$DATA_DIR/$vis_demo_name/model \
--data=$DATA_DIR/$vis_demo_name/data.txt \ --data=$DATA_DIR/$vis_demo_name/data.txt \
--refer=$DATA_DIR/$vis_demo_name/result.txt \ --refer=$DATA_DIR/$vis_demo_name/result.txt \
...@@ -153,11 +158,11 @@ for WITH_STATIC_LIB in ON OFF; do ...@@ -153,11 +158,11 @@ for WITH_STATIC_LIB in ON OFF; do
fi fi
done done
done done
# --------tensorrt mobilenet on windows------ # --------tensorrt mobilenet on windows------
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
rm -rf * rm -rf *
cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \ -DWITH_MKL=$TURN_ON_MKL \
-DDEMO_NAME=trt_mobilenet_demo \ -DDEMO_NAME=trt_mobilenet_demo \
-DWITH_GPU=$TEST_GPU_CPU \ -DWITH_GPU=$TEST_GPU_CPU \
...@@ -165,9 +170,11 @@ for WITH_STATIC_LIB in ON OFF; do ...@@ -165,9 +170,11 @@ for WITH_STATIC_LIB in ON OFF; do
-DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \ -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \
-DUSE_TENSORRT=$USE_TENSORRT \ -DUSE_TENSORRT=$USE_TENSORRT \
-DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \ -DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \
-DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \
msbuild /maxcpucount /property:Configuration=Release cpp_inference_demo.sln -DCMAKE_BUILD_TYPE=Release \
Release/trt_mobilenet_demo.exe \ -DCUDA_LIB="$CUDA_LIB"
ninja
./trt_mobilenet_demo.exe \
--modeldir=$DATA_DIR/mobilenet/model \ --modeldir=$DATA_DIR/mobilenet/model \
--data=$DATA_DIR/mobilenet/data.txt \ --data=$DATA_DIR/mobilenet/data.txt \
--refer=$DATA_DIR/mobilenet/result.txt --refer=$DATA_DIR/mobilenet/result.txt
......
...@@ -83,13 +83,15 @@ if(WITH_GPU) ...@@ -83,13 +83,15 @@ if(WITH_GPU)
if(NOT WIN32) if(NOT WIN32)
set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
else() else()
if(NOT DEFINED CUDA_LIB) set(CUDA_LIB "" CACHE STRING "CUDA_LIB")
if("${TENSORRT_ROOT}" STREQUAL "")
if(DEFINED ENV{CUDA_PATH}) if(DEFINED ENV{CUDA_PATH})
set(CUDA_LIB "$ENV{CUDA_PATH}\\lib\\x64") set(CUDA_LIB "$ENV{CUDA_PATH}\\lib\\x64")
else() else()
set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64")
endif() endif()
endif() endif()
message(STATUS "Current CUDA lib path: ${CUDA_LIB}")
endif(NOT WIN32) endif(NOT WIN32)
endif() endif()
...@@ -236,47 +238,54 @@ if(WITH_GTEST) ...@@ -236,47 +238,54 @@ if(WITH_GTEST)
include_directories(${GTEST_INSTALL_DIR}/include) include_directories(${GTEST_INSTALL_DIR}/include)
add_dependencies(${DEMO_NAME} thirdparty_gtest) add_dependencies(${DEMO_NAME} thirdparty_gtest)
IF(WIN32) IF(WIN32)
target_link_libraries(${DEMO_NAME} ${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest${CMAKE_STATIC_LIBRARY_SUFFIX}) target_link_libraries(${DEMO_NAME} ${GTEST_LIBRARIES})
ELSE() ELSE()
target_link_libraries(${DEMO_NAME} ${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest${CMAKE_STATIC_LIBRARY_SUFFIX}) target_link_libraries(${DEMO_NAME} ${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest${CMAKE_STATIC_LIBRARY_SUFFIX})
ENDIF(WIN32) ENDIF(WIN32)
endif() endif()
if(WIN32) if(WIN32)
if("${CMAKE_GENERATOR}" MATCHES "Ninja")
set(LIB_PATH ${CMAKE_BINARY_DIR})
else()
set(LIB_PATH ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})
endif()
if(USE_TENSORRT) if(USE_TENSORRT)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
) )
if(${TENSORRT_MAJOR_VERSION} EQUAL 7) if(${TENSORRT_MAJOR_VERSION} EQUAL 7)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) ${LIB_PATH})
endif() endif()
endif() endif()
if(WITH_MKL) if(WITH_MKL)
message("LIB_PATH IS ${LIB_PATH}")
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${LIB_PATH}
) )
else() else()
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${LIB_PATH}
) )
endif() endif()
if(WITH_ONNXRUNTIME) if(WITH_ONNXRUNTIME)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.dll COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib/onnxruntime.dll
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib/paddle2onnx.dll COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib/paddle2onnx.dll
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ${LIB_PATH}
) )
endif() endif()
if(NOT WITH_STATIC_LIB) if(NOT WITH_STATIC_LIB)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${LIB_PATH}
) )
endif() endif()
endif() endif()
...@@ -8,10 +8,12 @@ set(GTEST_REPOSITORY https://github.com/google/googletest.git) ...@@ -8,10 +8,12 @@ set(GTEST_REPOSITORY https://github.com/google/googletest.git)
set(GTEST_TAG release-1.8.1) set(GTEST_TAG release-1.8.1)
INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR})
IF(WIN32) IF(WIN32)
# if use CMAKE_INSTALL_LIBDIR, the path of lib actually is install/gtest/lib/gtest.lib but GTEST_LIBRARIES
# is install/gtest/gtest.lib
set(GTEST_LIBRARIES set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE) "${GTEST_INSTALL_DIR}/lib/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE)
set(GTEST_MAIN_LIBRARIES set(GTEST_MAIN_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE) "${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE)
ELSE() ELSE()
set(GTEST_LIBRARIES set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE) "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE)
......
...@@ -22,6 +22,7 @@ DATA_DIR=$4 # dataset ...@@ -22,6 +22,7 @@ DATA_DIR=$4 # dataset
TENSORRT_ROOT_DIR=$5 # TensorRT ROOT dir, default to /usr/local/TensorRT TENSORRT_ROOT_DIR=$5 # TensorRT ROOT dir, default to /usr/local/TensorRT
WITH_ONNXRUNTIME=$6 WITH_ONNXRUNTIME=$6
MSVC_STATIC_CRT=$7 MSVC_STATIC_CRT=$7
CUDA_LIB=$8/lib/x64
inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir
EXIT_CODE=0 # init default exit code EXIT_CODE=0 # init default exit code
WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform
...@@ -135,7 +136,7 @@ function compile_test() { ...@@ -135,7 +136,7 @@ function compile_test() {
cd ${build_dir} cd ${build_dir}
TEST_NAME=$1 TEST_NAME=$1
if [ $WIN_DETECT != "" ]; then if [ $WIN_DETECT != "" ]; then
cmake .. -G "Visual Studio 15 2017" -A x64 -T host=x64 -DPADDLE_LIB=${inference_install_dir} \ cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \ -DWITH_MKL=$TURN_ON_MKL \
-DDEMO_NAME=${TEST_NAME} \ -DDEMO_NAME=${TEST_NAME} \
-DWITH_GPU=$TEST_GPU_CPU \ -DWITH_GPU=$TEST_GPU_CPU \
...@@ -146,8 +147,9 @@ function compile_test() { ...@@ -146,8 +147,9 @@ function compile_test() {
-DWITH_GTEST=ON \ -DWITH_GTEST=ON \
-DCMAKE_CXX_FLAGS='/std:c++17' \ -DCMAKE_CXX_FLAGS='/std:c++17' \
-DCMAKE_BUILD_TYPE=Release \ -DCMAKE_BUILD_TYPE=Release \
-DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \
msbuild /maxcpucount /property:Configuration=Release ALL_BUILD.vcxproj -DCUDA_LIB="$CUDA_LIB"
ninja
else else
cmake .. -DPADDLE_LIB=${inference_install_dir} \ cmake .. -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \ -DWITH_MKL=$TURN_ON_MKL \
...@@ -171,11 +173,7 @@ mkdir -p ${log_dir} ...@@ -171,11 +173,7 @@ mkdir -p ${log_dir}
cd ${build_dir} cd ${build_dir}
rm -rf * rm -rf *
if [ $WIN_DETECT != "" ]; then exe_dir=${build_dir}
exe_dir=${build_dir}/Release
else
exe_dir=${build_dir}
fi;
printf "${YELLOW} start test_resnet50 ${NC} \n"; printf "${YELLOW} start test_resnet50 ${NC} \n";
compile_test "test_resnet50" compile_test "test_resnet50"
......
...@@ -255,7 +255,6 @@ set MSVC_STATIC_CRT=ON ...@@ -255,7 +255,6 @@ set MSVC_STATIC_CRT=ON
set ON_INFER=ON set ON_INFER=ON
set WITH_TENSORRT=ON set WITH_TENSORRT=ON
set WITH_INFERENCE_API_TEST=ON set WITH_INFERENCE_API_TEST=ON
set vcvars64_dir="D:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat"
call :cmake || goto cmake_error call :cmake || goto cmake_error
call :build || goto build_error call :build || goto build_error
...@@ -711,7 +710,7 @@ echo cmake .. -G %GENERATOR% -DCMAKE_BUILD_TYPE=Release -DWITH_AVX=%WITH_AVX% -D ...@@ -711,7 +710,7 @@ echo cmake .. -G %GENERATOR% -DCMAKE_BUILD_TYPE=Release -DWITH_AVX=%WITH_AVX% -D
-DWITH_INFERENCE_API_TEST=%WITH_INFERENCE_API_TEST% -DTHIRD_PARTY_PATH=%THIRD_PARTY_PATH% ^ -DWITH_INFERENCE_API_TEST=%WITH_INFERENCE_API_TEST% -DTHIRD_PARTY_PATH=%THIRD_PARTY_PATH% ^
-DINFERENCE_DEMO_INSTALL_DIR=%INFERENCE_DEMO_INSTALL_DIR% -DWITH_STATIC_LIB=%WITH_STATIC_LIB% ^ -DINFERENCE_DEMO_INSTALL_DIR=%INFERENCE_DEMO_INSTALL_DIR% -DWITH_STATIC_LIB=%WITH_STATIC_LIB% ^
-DWITH_TENSORRT=%WITH_TENSORRT% -DTENSORRT_ROOT="%TENSORRT_ROOT%" -DMSVC_STATIC_CRT=%MSVC_STATIC_CRT% ^ -DWITH_TENSORRT=%WITH_TENSORRT% -DTENSORRT_ROOT="%TENSORRT_ROOT%" -DMSVC_STATIC_CRT=%MSVC_STATIC_CRT% ^
-DWITH_UNITY_BUILD=%WITH_UNITY_BUILD% -DCUDA_ARCH_NAME=%CUDA_ARCH_NAME% ^ -DWITH_UNITY_BUILD=%WITH_UNITY_BUILD% -DCUDA_ARCH_NAME=%CUDA_ARCH_NAME% -DCUB_PATH=%THIRD_PARTY_HOME%/cub ^
-DCUDA_TOOLKIT_ROOT_DIR="%CUDA_TOOLKIT_ROOT_DIR%" -DNEW_RELEASE_ALL=%NEW_RELEASE_ALL% -DNEW_RELEASE_PYPI=%NEW_RELEASE_PYPI% ^ -DCUDA_TOOLKIT_ROOT_DIR="%CUDA_TOOLKIT_ROOT_DIR%" -DNEW_RELEASE_ALL=%NEW_RELEASE_ALL% -DNEW_RELEASE_PYPI=%NEW_RELEASE_PYPI% ^
-DNEW_RELEASE_JIT=%NEW_RELEASE_JIT% >> %work_dir%\win_cmake.sh -DNEW_RELEASE_JIT=%NEW_RELEASE_JIT% >> %work_dir%\win_cmake.sh
...@@ -753,7 +752,7 @@ for /F %%i in ("%libsize%") do ( ...@@ -753,7 +752,7 @@ for /F %%i in ("%libsize%") do (
) )
cd /d %work_dir%\paddle\fluid\inference\api\demo_ci cd /d %work_dir%\paddle\fluid\inference\api\demo_ci
%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %WITH_TENSORRT% %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% %cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %WITH_TENSORRT% %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% "%CUDA_TOOLKIT_ROOT_DIR%"
goto:eof goto:eof
:test_inference_error :test_inference_error
...@@ -784,7 +783,7 @@ echo Step 7. Testing fluid library with infer_ut for inference ... ...@@ -784,7 +783,7 @@ echo Step 7. Testing fluid library with infer_ut for inference ...
echo ======================================== echo ========================================
cd /d %work_dir%\paddle\fluid\inference\tests\infer_ut cd /d %work_dir%\paddle\fluid\inference\tests\infer_ut
%cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% %cache_dir%\tools\busybox64.exe bash run.sh %work_dir:\=/% %WITH_MKL% %WITH_GPU% %cache_dir:\=/%/inference_demo %TENSORRT_ROOT% %WITH_ONNXRUNTIME% %MSVC_STATIC_CRT% "%CUDA_TOOLKIT_ROOT_DIR%"
goto:eof goto:eof
:test_inference_ut_error :test_inference_ut_error
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册