From 5eba384704ebe292bae6deb97b5b461c0a07f5a9 Mon Sep 17 00:00:00 2001 From: heliqi <1101791222@qq.com> Date: Tue, 26 Apr 2022 14:09:17 +0800 Subject: [PATCH] [Cherry-Pick]Fix compiling ort test cases error on Windows(#42186) (#42247) * fix windows compile test case error --- cmake/external/paddle2onnx.cmake | 2 ++ .../fluid/eager/auto_code_generator/CMakeLists.txt | 12 ++++++++++++ paddle/fluid/inference/CMakeLists.txt | 5 ----- paddle/fluid/inference/api/CMakeLists.txt | 13 +------------ paddle/fluid/inference/api/details/CMakeLists.txt | 3 ++- paddle/fluid/pybind/CMakeLists.txt | 8 -------- paddle/scripts/paddle_build.bat | 5 +++++ 7 files changed, 22 insertions(+), 26 deletions(-) diff --git a/cmake/external/paddle2onnx.cmake b/cmake/external/paddle2onnx.cmake index ba6f0396008..6db71a804ac 100644 --- a/cmake/external/paddle2onnx.cmake +++ b/cmake/external/paddle2onnx.cmake @@ -53,6 +53,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_CXX_STANDARD=11 -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} @@ -60,6 +61,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS -DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE} -DONNX_CUSTOM_PROTOC_PATH=${PROTOC_BIN_PATH} -DWITH_STATIC=OFF + -DMSVC_STATIC_CRT=${MSVC_STATIC_CRT} -DCMAKE_INSTALL_PREFIX=${PADDLE2ONNX_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=${PADDLE2ONNX_INSTALL_DIR}/${LIBDIR} -DCMAKE_POSITION_INDEPENDENT_CODE=ON diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index 668e60d857b..d673c64d9da 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -57,6 +57,18 @@ if(WIN32) list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/mkldnn.dll) endif() + if(WITH_ONNXRUNTIME) + message("Copied onnxruntime for Eager AutoCodeGen") + ADD_CUSTOM_COMMAND(OUTPUT ${eager_generator_path}/onnxruntime.dll + COMMAND ${CMAKE_COMMAND} -E copy ${ONNXRUNTIME_SHARED_LIB} ${eager_generator_path} + DEPENDS onnxruntime) + list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/onnxruntime.dll) + ADD_CUSTOM_COMMAND(OUTPUT ${eager_generator_path}/paddle2onnx.dll + COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE2ONNX_SHARED_LIB} ${eager_generator_path} + DEPENDS paddle2onnx) + list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/paddle2onnx.dll) + endif() + add_custom_target(eager_codegen COMMAND "${eager_generator_path}/eager_generator.exe" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated" COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_dygraph_forward_h_path} ${dygraph_forward_h_path} diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index bdf364aa9ad..e9d6036b139 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -46,10 +46,6 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor zero_copy_tensor reset_tensor_array analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg}) -if(WITH_ONNXRUNTIME) - set(STATIC_INFERENCE_API ${STATIC_INFERENCE_API} onnxruntime_predictor) -endif() - #TODO(wilber, T8T9): Do we still need to support windows gpu static library? if(WIN32 AND WITH_GPU) cc_library(paddle_inference DEPS ${fluid_modules} ${phi_modules} ${STATIC_INFERENCE_API} ${utils_modules}) @@ -98,7 +94,6 @@ if (WITH_ONNXRUNTIME) set(SHARED_INFERENCE_SRCS ${SHARED_INFERENCE_SRCS} ${CMAKE_CURRENT_SOURCE_DIR}/api/onnxruntime_predictor.cc ) - set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} onnxruntime_predictor) endif (WITH_ONNXRUNTIME) # Create shared inference library diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index bdc16ef4c79..edec1b1c7d0 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -50,9 +50,8 @@ if(WITH_GPU AND TENSORRT_FOUND) endif() if (WITH_ONNXRUNTIME) - cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} + cc_library(analysis_predictor SRCS analysis_predictor.cc onnxruntime_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils onnxruntime paddle2onnx) - cc_library(onnxruntime_predictor SRCS onnxruntime_predictor.cc DEPS analysis_predictor) else (WITH_ONNXRUNTIME) cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils) @@ -82,16 +81,6 @@ elseif (WIN32) ARGS --dirname=${WORD2VEC_MODEL_DIR}) endif() -if (WITH_ONNXRUNTIME) - if (NOT APPLE AND NOT WIN32) - cc_test(test_onnxruntime_predictor SRCS onnxruntime_predictor_tester.cc DEPS paddle_inference_shared - ARGS --dirname=${MOBILENETV2_MODEL_DIR}) - elseif (WIN32) - cc_test(test_onnxruntime_predictor SRCS onnxruntime_predictor_tester.cc DEPS onnxruntime_predictor benchmark ${inference_deps} - ARGS --dirname=${MOBILENETV2_MODEL_DIR}) - endif() -endif() - if(WITH_TESTING AND WITH_MKLDNN) if (NOT APPLE AND NOT WIN32) cc_test(test_mkldnn_quantizer SRCS mkldnn_quantizer_tester.cc DEPS paddle_inference_shared ARGS --dirname=${WORD2VEC_MODEL_DIR}) diff --git a/paddle/fluid/inference/api/details/CMakeLists.txt b/paddle/fluid/inference/api/details/CMakeLists.txt index b2cfb060dd3..0d7a8d57a9c 100644 --- a/paddle/fluid/inference/api/details/CMakeLists.txt +++ b/paddle/fluid/inference/api/details/CMakeLists.txt @@ -16,9 +16,10 @@ cc_library(reset_tensor_array SRCS reset_tensor_array.cc DEPS lod_tensor scope) if (WITH_ONNXRUNTIME) cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce onnxruntime) + cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc DEPS onnxruntime) else (WITH_ONNXRUNTIME) cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce) + cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc) endif (WITH_ONNXRUNTIME) -cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc) cc_test(zero_copy_tensor_test SRCS zero_copy_tensor_test.cc DEPS paddle_inference_api) diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index f8e7081de01..31107c44068 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -83,10 +83,6 @@ set(PYBIND_SRCS communication.cc cuda_streams_py.cc) -if (WITH_ONNXRUNTIME) - set(PYBIND_DEPS ${PYBIND_DEPS} onnxruntime_predictor) -endif() - if(NOT ON_INFER) set (PYBIND_DEPS ${PYBIND_DEPS} processgroup eager_reducer) if (WITH_NCCL) @@ -165,10 +161,6 @@ if(WITH_PYTHON) list(APPEND OP_FUNCTION_GENERETOR_DEPS hccl_context) endif(WITH_ASCEND_CL) - if (WITH_ONNXRUNTIME) - list(APPEND OP_FUNCTION_GENERETOR_DEPS onnxruntime_predictor) - endif() - if(WITH_CNCL) list(APPEND OP_FUNCTION_GENERETOR_DEPS cncl_context) endif(WITH_CNCL) diff --git a/paddle/scripts/paddle_build.bat b/paddle/scripts/paddle_build.bat index 0f51a2e931b..38077878be7 100644 --- a/paddle/scripts/paddle_build.bat +++ b/paddle/scripts/paddle_build.bat @@ -86,6 +86,10 @@ if not defined NEW_RELEASE_JIT set NEW_RELEASE_JIT=OFF set task_name=%1 set UPLOAD_TP_FILE=OFF +rem ------initialize set git config------ +git config --global core.longpaths true + + rem ------initialize the python environment------ set PYTHON_EXECUTABLE=%PYTHON_ROOT%\python.exe set PATH=%PYTHON_ROOT%\Scripts;%PYTHON_ROOT%;%PATH% @@ -255,6 +259,7 @@ set MSVC_STATIC_CRT=ON set ON_INFER=ON set WITH_TENSORRT=ON set WITH_INFERENCE_API_TEST=ON +set WITH_ONNXRUNTIME=ON call :cmake || goto cmake_error call :build || goto build_error -- GitLab