diff --git a/cmake/external/paddle2onnx.cmake b/cmake/external/paddle2onnx.cmake index ba6f0396008fc25dd21d462a2d19285a6cbe9080..2fc22578cae9dc752fd7484cc618efb4d4a5ea36 100644 --- a/cmake/external/paddle2onnx.cmake +++ b/cmake/external/paddle2onnx.cmake @@ -53,6 +53,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_CXX_STANDARD=14 -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} @@ -60,6 +61,7 @@ set(PADDLE2ONNX_OPTIONAL_ARGS -DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE} -DONNX_CUSTOM_PROTOC_PATH=${PROTOC_BIN_PATH} -DWITH_STATIC=OFF + -DMSVC_STATIC_CRT=${MSVC_STATIC_CRT} -DCMAKE_INSTALL_PREFIX=${PADDLE2ONNX_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=${PADDLE2ONNX_INSTALL_DIR}/${LIBDIR} -DCMAKE_POSITION_INDEPENDENT_CODE=ON diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index 668e60d857b9ca371243891db686421810fda0bb..d673c64d9da3ca7b9339cd3b7765e41919b5cdbd 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -57,6 +57,18 @@ if(WIN32) list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/mkldnn.dll) endif() + if(WITH_ONNXRUNTIME) + message("Copied onnxruntime for Eager AutoCodeGen") + ADD_CUSTOM_COMMAND(OUTPUT ${eager_generator_path}/onnxruntime.dll + COMMAND ${CMAKE_COMMAND} -E copy ${ONNXRUNTIME_SHARED_LIB} ${eager_generator_path} + DEPENDS onnxruntime) + list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/onnxruntime.dll) + ADD_CUSTOM_COMMAND(OUTPUT ${eager_generator_path}/paddle2onnx.dll + COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE2ONNX_SHARED_LIB} ${eager_generator_path} + DEPENDS paddle2onnx) + list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/paddle2onnx.dll) + endif() + add_custom_target(eager_codegen COMMAND "${eager_generator_path}/eager_generator.exe" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated" COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_dygraph_forward_h_path} ${dygraph_forward_h_path} diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 7a1f3e8326aeb1a5b2a0163f9066ea8359a4ce6a..7fae481f582898f82935d2fee06127b36d724a28 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -46,10 +46,6 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor zero_copy_tensor reset_tensor_array analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg}) -if(WITH_ONNXRUNTIME) - set(STATIC_INFERENCE_API ${STATIC_INFERENCE_API} onnxruntime_predictor) -endif() - #windows GPU static library over the limit, so not create_static_lib, and cc_library is dummy if(WIN32 AND WITH_GPU) cc_library(paddle_inference DEPS ${fluid_modules} phi ${STATIC_INFERENCE_API} ${utils_modules}) @@ -98,7 +94,6 @@ if (WITH_ONNXRUNTIME) set(SHARED_INFERENCE_SRCS ${SHARED_INFERENCE_SRCS} ${CMAKE_CURRENT_SOURCE_DIR}/api/onnxruntime_predictor.cc ) - set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} onnxruntime_predictor) endif (WITH_ONNXRUNTIME) # Create shared inference library diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index bdc16ef4c7907764473c552461cde35f011ad489..edec1b1c7d0e464cec9124339fb3fb08c35cc8bd 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -50,9 +50,8 @@ if(WITH_GPU AND TENSORRT_FOUND) endif() if (WITH_ONNXRUNTIME) - cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} + cc_library(analysis_predictor SRCS analysis_predictor.cc onnxruntime_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils onnxruntime paddle2onnx) - cc_library(onnxruntime_predictor SRCS onnxruntime_predictor.cc DEPS analysis_predictor) else (WITH_ONNXRUNTIME) cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info infer_io_utils) @@ -82,16 +81,6 @@ elseif (WIN32) ARGS --dirname=${WORD2VEC_MODEL_DIR}) endif() -if (WITH_ONNXRUNTIME) - if (NOT APPLE AND NOT WIN32) - cc_test(test_onnxruntime_predictor SRCS onnxruntime_predictor_tester.cc DEPS paddle_inference_shared - ARGS --dirname=${MOBILENETV2_MODEL_DIR}) - elseif (WIN32) - cc_test(test_onnxruntime_predictor SRCS onnxruntime_predictor_tester.cc DEPS onnxruntime_predictor benchmark ${inference_deps} - ARGS --dirname=${MOBILENETV2_MODEL_DIR}) - endif() -endif() - if(WITH_TESTING AND WITH_MKLDNN) if (NOT APPLE AND NOT WIN32) cc_test(test_mkldnn_quantizer SRCS mkldnn_quantizer_tester.cc DEPS paddle_inference_shared ARGS --dirname=${WORD2VEC_MODEL_DIR}) diff --git a/paddle/fluid/inference/api/details/CMakeLists.txt b/paddle/fluid/inference/api/details/CMakeLists.txt index b2cfb060dd32559f6157fc456c7399736fc9fe51..0d7a8d57a9c5a8f92bcac391c9ff2dd5a476994d 100644 --- a/paddle/fluid/inference/api/details/CMakeLists.txt +++ b/paddle/fluid/inference/api/details/CMakeLists.txt @@ -16,9 +16,10 @@ cc_library(reset_tensor_array SRCS reset_tensor_array.cc DEPS lod_tensor scope) if (WITH_ONNXRUNTIME) cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce onnxruntime) + cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc DEPS onnxruntime) else (WITH_ONNXRUNTIME) cc_library(zero_copy_tensor SRCS zero_copy_tensor.cc DEPS scope lod_tensor enforce) + cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc) endif (WITH_ONNXRUNTIME) -cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc) cc_test(zero_copy_tensor_test SRCS zero_copy_tensor_test.cc DEPS paddle_inference_api) diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 63abc2c2cf471b7f5b453e50a8472fe643937287..2491cd90a83ef58a257057de14d7386541744fec 100755 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -86,10 +86,6 @@ set(PYBIND_SRCS communication.cc cuda_streams_py.cc) -if (WITH_ONNXRUNTIME) - set(PYBIND_DEPS ${PYBIND_DEPS} onnxruntime_predictor) -endif() - if(NOT ON_INFER) set (PYBIND_DEPS ${PYBIND_DEPS} processgroup eager_reducer) if (WITH_NCCL) @@ -172,10 +168,6 @@ if(WITH_PYTHON) list(APPEND OP_FUNCTION_GENERETOR_DEPS hccl_context) endif(WITH_ASCEND_CL) - if (WITH_ONNXRUNTIME) - list(APPEND OP_FUNCTION_GENERETOR_DEPS onnxruntime_predictor) - endif() - if(WITH_CNCL) list(APPEND OP_FUNCTION_GENERETOR_DEPS cncl_context) endif(WITH_CNCL) diff --git a/paddle/scripts/paddle_build.bat b/paddle/scripts/paddle_build.bat index a7a2592f971c5668a2b25410a7d2d3988e8239a1..f4a09436d86ce9d22d8d763351aaff61d97a9e32 100644 --- a/paddle/scripts/paddle_build.bat +++ b/paddle/scripts/paddle_build.bat @@ -86,6 +86,10 @@ if not defined NEW_RELEASE_JIT set NEW_RELEASE_JIT=OFF set task_name=%1 set UPLOAD_TP_FILE=OFF +rem ------initialize set git config------ +git config --global core.longpaths true + + rem ------initialize the python environment------ set PYTHON_EXECUTABLE=%PYTHON_ROOT%\python.exe set PATH=%PYTHON_ROOT%\Scripts;%PYTHON_ROOT%;%PATH% @@ -255,6 +259,7 @@ set MSVC_STATIC_CRT=ON set ON_INFER=ON set WITH_TENSORRT=ON set WITH_INFERENCE_API_TEST=ON +set WITH_ONNXRUNTIME=ON call :cmake || goto cmake_error call :build || goto build_error