未验证 提交 3241cea2 编写于 作者: H heliqi 提交者: GitHub

Fix compiling ort test cases error on Windows (#42186)

* fix windows compile test case error

* test windows ci

* cmake add onnxruntime

* cmake add onnxruntime

* test windows ci

* auto_code_generator add ort lib copy

* fallback modify windows ci bat

* ci notest;test=document_fix;test=windows_ci_inference;test=windows_ci;test=windows_op
上级 8df81f83
...@@ -53,6 +53,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS ...@@ -53,6 +53,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_CXX_STANDARD=14
-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
...@@ -60,6 +61,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS ...@@ -60,6 +61,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS
-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE} -DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}
-DONNX_CUSTOM_PROTOC_PATH=${PROTOC_BIN_PATH} -DONNX_CUSTOM_PROTOC_PATH=${PROTOC_BIN_PATH}
-DWITH_STATIC=OFF -DWITH_STATIC=OFF
-DMSVC_STATIC_CRT=${MSVC_STATIC_CRT}
-DCMAKE_INSTALL_PREFIX=${PADDLE2ONNX_INSTALL_DIR} -DCMAKE_INSTALL_PREFIX=${PADDLE2ONNX_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=${PADDLE2ONNX_INSTALL_DIR}/${LIBDIR} -DCMAKE_INSTALL_LIBDIR=${PADDLE2ONNX_INSTALL_DIR}/${LIBDIR}
-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_POSITION_INDEPENDENT_CODE=ON
......
...@@ -57,6 +57,18 @@ if(WIN32) ...@@ -57,6 +57,18 @@ if(WIN32)
list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/mkldnn.dll) list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/mkldnn.dll)
endif() endif()
if(WITH_ONNXRUNTIME)
message("Copied onnxruntime for Eager AutoCodeGen")
ADD_CUSTOM_COMMAND(OUTPUT ${eager_generator_path}/onnxruntime.dll
COMMAND ${CMAKE_COMMAND} -E copy ${ONNXRUNTIME_SHARED_LIB} ${eager_generator_path}
DEPENDS onnxruntime)
list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/onnxruntime.dll)
ADD_CUSTOM_COMMAND(OUTPUT ${eager_generator_path}/paddle2onnx.dll
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE2ONNX_SHARED_LIB} ${eager_generator_path}
DEPENDS paddle2onnx)
list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/paddle2onnx.dll)
endif()
add_custom_target(eager_codegen add_custom_target(eager_codegen
COMMAND "${eager_generator_path}/eager_generator.exe" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated" COMMAND "${eager_generator_path}/eager_generator.exe" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated"
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_dygraph_forward_h_path} ${dygraph_forward_h_path} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_dygraph_forward_h_path} ${dygraph_forward_h_path}
......
...@@ -46,10 +46,6 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor ...@@ -46,10 +46,6 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor
zero_copy_tensor reset_tensor_array zero_copy_tensor reset_tensor_array
analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg}) analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg})
if(WITH_ONNXRUNTIME)
set(STATIC_INFERENCE_API ${STATIC_INFERENCE_API} onnxruntime_predictor)
endif()
#windows GPU static library over the limit, so not create_static_lib, and cc_library is dummy #windows GPU static library over the limit, so not create_static_lib, and cc_library is dummy
if(WIN32 AND WITH_GPU) if(WIN32 AND WITH_GPU)
cc_library(paddle_inference DEPS ${fluid_modules} phi ${STATIC_INFERENCE_API} ${utils_modules}) cc_library(paddle_inference DEPS ${fluid_modules} phi ${STATIC_INFERENCE_API} ${utils_modules})
...@@ -98,7 +94,6 @@ if (WITH_ONNXRUNTIME) ...@@ -98,7 +94,6 @@ if (WITH_ONNXRUNTIME)
set(SHARED_INFERENCE_SRCS ${SHARED_INFERENCE_SRCS} set(SHARED_INFERENCE_SRCS ${SHARED_INFERENCE_SRCS}
${CMAKE_CURRENT_SOURCE_DIR}/api/onnxruntime_predictor.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/onnxruntime_predictor.cc
) )
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} onnxruntime_predictor)
endif (WITH_ONNXRUNTIME) endif (WITH_ONNXRUNTIME)
# Create shared inference library # Create shared inference library
......
...@@ -50,9 +50,8 @@ if(WITH_GPU AND TENSORRT_FOUND) ...@@ -50,9 +50,8 @@ if(WITH_GPU AND TENSORRT_FOUND)
endif() endif()
if (WITH_ONNXRUNTIME) if (WITH_ONNXRUNTIME)
cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} cc_library(analysis_predictor SRCS analysis_predictor.cc onnxruntime_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps}
zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils onnxruntime paddle2onnx) zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils onnxruntime paddle2onnx)
cc_library(onnxruntime_predictor SRCS onnxruntime_predictor.cc DEPS analysis_predictor)
else (WITH_ONNXRUNTIME) else (WITH_ONNXRUNTIME)
cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps}
zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils) zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils)
...@@ -82,16 +81,6 @@ elseif (WIN32) ...@@ -82,16 +81,6 @@ elseif (WIN32)
ARGS --dirname=${WORD2VEC_MODEL_DIR}) ARGS --dirname=${WORD2VEC_MODEL_DIR})
endif() endif()
if (WITH_ONNXRUNTIME)
if (NOT APPLE AND NOT WIN32)
cc_test(test_onnxruntime_predictor SRCS onnxruntime_predictor_tester.cc DEPS paddle_inference_shared
ARGS --dirname=${MOBILENETV2_MODEL_DIR})
elseif (WIN32)
cc_test(test_onnxruntime_predictor SRCS onnxruntime_predictor_tester.cc DEPS onnxruntime_predictor benchmark ${inference_deps}
ARGS --dirname=${MOBILENETV2_MODEL_DIR})
endif()
endif()
if(WITH_TESTING AND WITH_MKLDNN) if(WITH_TESTING AND WITH_MKLDNN)
if (NOT APPLE AND NOT WIN32) if (NOT APPLE AND NOT WIN32)
cc_test(test_mkldnn_quantizer SRCS mkldnn_quantizer_tester.cc DEPS paddle_inference_shared ARGS --dirname=${WORD2VEC_MODEL_DIR}) cc_test(test_mkldnn_quantizer SRCS mkldnn_quantizer_tester.cc DEPS paddle_inference_shared ARGS --dirname=${WORD2VEC_MODEL_DIR})
......
...@@ -16,9 +16,10 @@ ...@@ -16,9 +16,10 @@
cc_library(reset_tensor_array SRCS reset_tensor_array.cc DEPS lod_tensor scope) cc_library(reset_tensor_array SRCS reset_tensor_array.cc DEPS lod_tensor scope)
if (WITH_ONNXRUNTIME) if (WITH_ONNXRUNTIME)
cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce onnxruntime) cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce onnxruntime)
cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc DEPS onnxruntime)
else (WITH_ONNXRUNTIME) else (WITH_ONNXRUNTIME)
cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce) cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce)
cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc)
endif (WITH_ONNXRUNTIME) endif (WITH_ONNXRUNTIME)
cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc)
cc_test(zero_copy_tensor_test SRCS zero_copy_tensor_test.cc DEPS paddle_inference_api) cc_test(zero_copy_tensor_test SRCS zero_copy_tensor_test.cc DEPS paddle_inference_api)
...@@ -86,10 +86,6 @@ set(PYBIND_SRCS ...@@ -86,10 +86,6 @@ set(PYBIND_SRCS
communication.cc communication.cc
cuda_streams_py.cc) cuda_streams_py.cc)
if (WITH_ONNXRUNTIME)
set(PYBIND_DEPS ${PYBIND_DEPS} onnxruntime_predictor)
endif()
if(NOT ON_INFER) if(NOT ON_INFER)
set (PYBIND_DEPS ${PYBIND_DEPS} processgroup eager_reducer) set (PYBIND_DEPS ${PYBIND_DEPS} processgroup eager_reducer)
if (WITH_NCCL) if (WITH_NCCL)
...@@ -172,10 +168,6 @@ if(WITH_PYTHON) ...@@ -172,10 +168,6 @@ if(WITH_PYTHON)
list(APPEND OP_FUNCTION_GENERETOR_DEPS hccl_context) list(APPEND OP_FUNCTION_GENERETOR_DEPS hccl_context)
endif(WITH_ASCEND_CL) endif(WITH_ASCEND_CL)
if (WITH_ONNXRUNTIME)
list(APPEND OP_FUNCTION_GENERETOR_DEPS onnxruntime_predictor)
endif()
if(WITH_CNCL) if(WITH_CNCL)
list(APPEND OP_FUNCTION_GENERETOR_DEPS cncl_context) list(APPEND OP_FUNCTION_GENERETOR_DEPS cncl_context)
endif(WITH_CNCL) endif(WITH_CNCL)
......
...@@ -86,6 +86,10 @@ if not defined NEW_RELEASE_JIT set NEW_RELEASE_JIT=OFF ...@@ -86,6 +86,10 @@ if not defined NEW_RELEASE_JIT set NEW_RELEASE_JIT=OFF
set task_name=%1 set task_name=%1
set UPLOAD_TP_FILE=OFF set UPLOAD_TP_FILE=OFF
rem ------initialize set git config------
git config --global core.longpaths true
rem ------initialize the python environment------ rem ------initialize the python environment------
set PYTHON_EXECUTABLE=%PYTHON_ROOT%\python.exe set PYTHON_EXECUTABLE=%PYTHON_ROOT%\python.exe
set PATH=%PYTHON_ROOT%\Scripts;%PYTHON_ROOT%;%PATH% set PATH=%PYTHON_ROOT%\Scripts;%PYTHON_ROOT%;%PATH%
...@@ -255,6 +259,7 @@ set MSVC_STATIC_CRT=ON ...@@ -255,6 +259,7 @@ set MSVC_STATIC_CRT=ON
set ON_INFER=ON set ON_INFER=ON
set WITH_TENSORRT=ON set WITH_TENSORRT=ON
set WITH_INFERENCE_API_TEST=ON set WITH_INFERENCE_API_TEST=ON
set WITH_ONNXRUNTIME=ON
call :cmake || goto cmake_error call :cmake || goto cmake_error
call :build || goto build_error call :build || goto build_error
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册