diff --git a/cmake/external/onnxruntime.cmake b/cmake/external/onnxruntime.cmake index b52b2c00d9cce8f06361a6adac23e0e5c94ba69e..15901568ae1cd8fbc2faa46f76365fa711ccb4e4 100644 --- a/cmake/external/onnxruntime.cmake +++ b/cmake/external/onnxruntime.cmake @@ -134,3 +134,15 @@ endif() add_library(onnxruntime STATIC IMPORTED GLOBAL) set_property(TARGET onnxruntime PROPERTY IMPORTED_LOCATION ${ONNXRUNTIME_LIB}) add_dependencies(onnxruntime ${ONNXRUNTIME_PROJECT}) + +function(copy_onnx TARGET_NAME) + # If error of Exitcode0xc000007b happened when a .exe running, copy onnxruntime.dll + # to the .exe folder. + if(TARGET ${TARGET_NAME}) + add_custom_command( + TARGET ${TARGET_NAME} + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy ${ONNXRUNTIME_SHARED_LIB} + ${CMAKE_CURRENT_BINARY_DIR} DEPENDS onnxruntime) + endif() +endfunction() diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 8569a3bb6151f71c57a016892cef4881ff01308b..2e4b73c6ac19af71e2369fe1e07b67e4fa52dd1e 100755 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -473,6 +473,13 @@ if(WITH_MKLDNN) test_compute_propagate_scales_mkldnn_pass SRCS mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc DEPS compute_propagate_scales_mkldnn_pass naive_executor) + + if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(test_compute_propagate_scales_mkldnn_pass) + endif() + cc_test( test_cpu_quantize_placement_pass SRCS mkldnn/cpu_quantize_placement_pass_tester.cc diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index c001f5eb8dfdc883432f11acc2a898d45382329a..67f0e3212db43bee37d4d592db59adeebd23017d 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -109,4 +109,9 @@ elseif(WIN32) paddle_inference_api ARGS --inference_model_dir=${WORD2VEC_MODEL_DIR}) + if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(test_analyzer) + endif() endif() diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 0d55b9c66416a4ef024e73d7fd32b37c14f0a4d6..9e601df8088fc172e39115dca106918b5c3d8da5 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -99,6 +99,12 @@ cc_test( SRCS api_tester.cc DEPS paddle_inference_api) +if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(test_paddle_inference_api) +endif() + if(WITH_TESTING) if(NOT APPLE AND NOT WIN32) if(WITH_GPU) diff --git a/paddle/fluid/inference/api/details/CMakeLists.txt b/paddle/fluid/inference/api/details/CMakeLists.txt index 2acd96b3fb97cfb87436641262d0403a3945715e..02d5f91d630ce8a2108e76ecfbceb7695bb18bd2 100644 --- a/paddle/fluid/inference/api/details/CMakeLists.txt +++ b/paddle/fluid/inference/api/details/CMakeLists.txt @@ -38,3 +38,9 @@ cc_test( zero_copy_tensor_test SRCS zero_copy_tensor_test.cc DEPS paddle_inference_api) + +if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(zero_copy_tensor_test) +endif() diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt index 0f1350459ef22c23a3c2e54ad50ceae937974b5d..cd03dce1795e2114e48da551746bd189542a06d1 100644 --- a/paddle/fluid/inference/tensorrt/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -24,5 +24,12 @@ nv_test( test_tensorrt_engine SRCS test_engine.cc test_dynamic_engine.cc DEPS dynload_cuda tensorrt_engine tensorrt_plugin) + +if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(test_tensorrt_engine) +endif() + add_subdirectory(plugin) add_subdirectory(convert) diff --git a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt index c999c009605ee998b15cd7131c4e57b6eaf439b9..90089fcbfd806bbce31d641b005c803b2c109d4f 100644 --- a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt @@ -85,3 +85,9 @@ nv_test( SRCS test_op_converter.cc DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_converter) + +if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(test_op_converter) +endif() diff --git a/paddle/fluid/inference/utils/CMakeLists.txt b/paddle/fluid/inference/utils/CMakeLists.txt index 9ab07633e0fe05595417c3399fe41cbada13c140..f165002f353e4ed6bc565f2209ff2c96316c935d 100644 --- a/paddle/fluid/inference/utils/CMakeLists.txt +++ b/paddle/fluid/inference/utils/CMakeLists.txt @@ -18,6 +18,13 @@ cc_test( infer_io_utils_tester SRCS io_utils_tester.cc DEPS infer_io_utils) + +if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(infer_io_utils_tester) +endif() + cc_library(table_printer SRCS table_printer.cc) cc_test( test_table_printer diff --git a/paddle/fluid/operators/benchmark/CMakeLists.txt b/paddle/fluid/operators/benchmark/CMakeLists.txt index e05011eaf6b3a52f4b8616b648f8ba9339acb83f..b0a1c488f047caae4894312dd85fbf8acd03d328 100644 --- a/paddle/fluid/operators/benchmark/CMakeLists.txt +++ b/paddle/fluid/operators/benchmark/CMakeLists.txt @@ -12,3 +12,9 @@ cc_test( ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} eigen_function) + +if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(op_tester) +endif() diff --git a/paddle/fluid/operators/tensorrt/CMakeLists.txt b/paddle/fluid/operators/tensorrt/CMakeLists.txt index e0fed2804a9b7756a0c9c30eeec051b6c65bf175..0d731b14c6a97aea36fcc8b13cb9c94f011bc9fa 100644 --- a/paddle/fluid/operators/tensorrt/CMakeLists.txt +++ b/paddle/fluid/operators/tensorrt/CMakeLists.txt @@ -4,3 +4,9 @@ nv_test( test_tensorrt_engine_op SRCS tensorrt_engine_op_test.cc DEPS tensorrt_engine_op analysis) + +if(WITH_ONNXRUNTIME AND WIN32) + # Copy onnxruntime for some c++ test in Windows, since the test will + # be build only in CI, so suppose the generator in Windows is Ninja. + copy_onnx(test_tensorrt_engine_op) +endif() diff --git a/tools/windows/run_unittests.sh b/tools/windows/run_unittests.sh index 23a0b4d32828f02854588ad77022f9e0c13b44f7..7af1cd81391d46d9388ec355a53b0da323977ef2 100644 --- a/tools/windows/run_unittests.sh +++ b/tools/windows/run_unittests.sh @@ -87,8 +87,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ^lite_mul_model_test$|\ ^trt_split_converter_test$|\ ^paddle_infer_api_copy_tensor_tester$|\ -^test_tensorrt_engine_op$|\ -^test_tensorrt_engine$|\ ^test_trt_deformable_conv$|\ ^test_imperative_triple_grad$|\ ^test_full_name_usage$|\ @@ -103,7 +101,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ^test_tensor_scalar_type_promotion_static$|\ ^test_matrix_power_op$|\ ^test_deformable_conv_v1_op$|\ -^zero_copy_tensor_test$|\ ^test_where_index$|\ ^test_custom_grad_input$|\ ^test_conv3d_transpose_op$|\ @@ -116,16 +113,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ^test_basic_api_transformation$|\ ^test_deformable_conv_op$|\ ^test_variable$|\ -^test_conv_bias_mkldnn_fuse_pass_cc$|\ -^test_conv_batch_norm_mkldnn_fuse_pass$|\ -^test_compute_propagate_scales_mkldnn_pass$|\ -^test_cpu_quantize_pass$|\ -^test_cpu_quantize_squash_pass$|\ -^op_tester$|\ -^test_analyzer$|\ -^infer_io_utils_tester$|\ -^test_paddle_inference_api$|\ -^test_mkldnn_quantizer$|\ ^test_mkldnn_conv_hard_sigmoid_fuse_pass$|\ ^test_mkldnn_conv_hard_swish_fuse_pass$|\ ^test_conv_act_mkldnn_fuse_pass$|\ @@ -147,11 +134,9 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ^test_slice$|\ ^test_conv_elementwise_add_fuse_pass$|\ ^test_executor_and_mul$|\ -^test_op_converter$|\ ^test_analyzer_int8_resnet50$|\ ^test_analyzer_int8_mobilenetv1$|\ ^test_trt_conv_pass$|\ -^test_analysis_predictor$|\ ^test_roll_op$|\ ^test_lcm$|\ ^test_elementwise_floordiv_op$|\ @@ -160,7 +145,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ^test_trt_convert_deformable_conv$|\ ^test_conv_elementwise_add2_act_fuse_pass$|\ ^test_tensor_scalar_type_promotion_dynamic$|\ -^test_api_impl$|\ ^test_model$|\ ^test_py_reader_combination$|\ ^test_trt_convert_flatten$|\ @@ -198,7 +182,8 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ^test_trt_fc_fuse_quant_dequant_pass$|\ ^test_unsqueeze2_eltwise_fuse_pass$|\ ^test_parallel_executor_seresnext_with_fuse_all_reduce_gpu$|\ -^test_parallel_executor_seresnext_with_reduce_gpu$" +^test_parallel_executor_seresnext_with_reduce_gpu$|\ +^test_api_impl$" # /*==========Fixed Disabled Windows CPU OPENBLAS((PR-CI-Windows-OPENBLAS)) unittests==============================*/