未验证 提交 05b7ef8d 编写于 作者: S Sing_chan 提交者: GitHub

[Windows CI] copy onnxruntime.dll to c++ test folder in windows (#44121)

* copy onnxruntime.dll to c++ test folder in windows

* remove ut that failed due to onnxrumtime.dll

* test_api_impl failed of diff

* use TARGET to make sure if the test exist; use POST_BUILD to add copy command
上级 d752a7f2
...@@ -134,3 +134,15 @@ endif() ...@@ -134,3 +134,15 @@ endif()
add_library(onnxruntime STATIC IMPORTED GLOBAL) add_library(onnxruntime STATIC IMPORTED GLOBAL)
set_property(TARGET onnxruntime PROPERTY IMPORTED_LOCATION ${ONNXRUNTIME_LIB}) set_property(TARGET onnxruntime PROPERTY IMPORTED_LOCATION ${ONNXRUNTIME_LIB})
add_dependencies(onnxruntime ${ONNXRUNTIME_PROJECT}) add_dependencies(onnxruntime ${ONNXRUNTIME_PROJECT})
function(copy_onnx TARGET_NAME)
# If error of Exitcode0xc000007b happened when a .exe running, copy onnxruntime.dll
# to the .exe folder.
if(TARGET ${TARGET_NAME})
add_custom_command(
TARGET ${TARGET_NAME}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ONNXRUNTIME_SHARED_LIB}
${CMAKE_CURRENT_BINARY_DIR} DEPENDS onnxruntime)
endif()
endfunction()
...@@ -473,6 +473,13 @@ if(WITH_MKLDNN) ...@@ -473,6 +473,13 @@ if(WITH_MKLDNN)
test_compute_propagate_scales_mkldnn_pass test_compute_propagate_scales_mkldnn_pass
SRCS mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc SRCS mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc
DEPS compute_propagate_scales_mkldnn_pass naive_executor) DEPS compute_propagate_scales_mkldnn_pass naive_executor)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_compute_propagate_scales_mkldnn_pass)
endif()
cc_test( cc_test(
test_cpu_quantize_placement_pass test_cpu_quantize_placement_pass
SRCS mkldnn/cpu_quantize_placement_pass_tester.cc SRCS mkldnn/cpu_quantize_placement_pass_tester.cc
......
...@@ -109,4 +109,9 @@ elseif(WIN32) ...@@ -109,4 +109,9 @@ elseif(WIN32)
paddle_inference_api paddle_inference_api
ARGS ARGS
--inference_model_dir=${WORD2VEC_MODEL_DIR}) --inference_model_dir=${WORD2VEC_MODEL_DIR})
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_analyzer)
endif()
endif() endif()
...@@ -99,6 +99,12 @@ cc_test( ...@@ -99,6 +99,12 @@ cc_test(
SRCS api_tester.cc SRCS api_tester.cc
DEPS paddle_inference_api) DEPS paddle_inference_api)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_paddle_inference_api)
endif()
if(WITH_TESTING) if(WITH_TESTING)
if(NOT APPLE AND NOT WIN32) if(NOT APPLE AND NOT WIN32)
if(WITH_GPU) if(WITH_GPU)
......
...@@ -38,3 +38,9 @@ cc_test( ...@@ -38,3 +38,9 @@ cc_test(
zero_copy_tensor_test zero_copy_tensor_test
SRCS zero_copy_tensor_test.cc SRCS zero_copy_tensor_test.cc
DEPS paddle_inference_api) DEPS paddle_inference_api)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(zero_copy_tensor_test)
endif()
...@@ -24,5 +24,12 @@ nv_test( ...@@ -24,5 +24,12 @@ nv_test(
test_tensorrt_engine test_tensorrt_engine
SRCS test_engine.cc test_dynamic_engine.cc SRCS test_engine.cc test_dynamic_engine.cc
DEPS dynload_cuda tensorrt_engine tensorrt_plugin) DEPS dynload_cuda tensorrt_engine tensorrt_plugin)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_tensorrt_engine)
endif()
add_subdirectory(plugin) add_subdirectory(plugin)
add_subdirectory(convert) add_subdirectory(convert)
...@@ -85,3 +85,9 @@ nv_test( ...@@ -85,3 +85,9 @@ nv_test(
SRCS test_op_converter.cc SRCS test_op_converter.cc
DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine
tensorrt_converter) tensorrt_converter)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_op_converter)
endif()
...@@ -18,6 +18,13 @@ cc_test( ...@@ -18,6 +18,13 @@ cc_test(
infer_io_utils_tester infer_io_utils_tester
SRCS io_utils_tester.cc SRCS io_utils_tester.cc
DEPS infer_io_utils) DEPS infer_io_utils)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(infer_io_utils_tester)
endif()
cc_library(table_printer SRCS table_printer.cc) cc_library(table_printer SRCS table_printer.cc)
cc_test( cc_test(
test_table_printer test_table_printer
......
...@@ -12,3 +12,9 @@ cc_test( ...@@ -12,3 +12,9 @@ cc_test(
${GLOB_OP_LIB} ${GLOB_OP_LIB}
${GLOB_OPERATOR_DEPS} ${GLOB_OPERATOR_DEPS}
eigen_function) eigen_function)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(op_tester)
endif()
...@@ -4,3 +4,9 @@ nv_test( ...@@ -4,3 +4,9 @@ nv_test(
test_tensorrt_engine_op test_tensorrt_engine_op
SRCS tensorrt_engine_op_test.cc SRCS tensorrt_engine_op_test.cc
DEPS tensorrt_engine_op analysis) DEPS tensorrt_engine_op analysis)
if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_tensorrt_engine_op)
endif()
...@@ -87,8 +87,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ...@@ -87,8 +87,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\
^lite_mul_model_test$|\ ^lite_mul_model_test$|\
^trt_split_converter_test$|\ ^trt_split_converter_test$|\
^paddle_infer_api_copy_tensor_tester$|\ ^paddle_infer_api_copy_tensor_tester$|\
^test_tensorrt_engine_op$|\
^test_tensorrt_engine$|\
^test_trt_deformable_conv$|\ ^test_trt_deformable_conv$|\
^test_imperative_triple_grad$|\ ^test_imperative_triple_grad$|\
^test_full_name_usage$|\ ^test_full_name_usage$|\
...@@ -103,7 +101,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ...@@ -103,7 +101,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\
^test_tensor_scalar_type_promotion_static$|\ ^test_tensor_scalar_type_promotion_static$|\
^test_matrix_power_op$|\ ^test_matrix_power_op$|\
^test_deformable_conv_v1_op$|\ ^test_deformable_conv_v1_op$|\
^zero_copy_tensor_test$|\
^test_where_index$|\ ^test_where_index$|\
^test_custom_grad_input$|\ ^test_custom_grad_input$|\
^test_conv3d_transpose_op$|\ ^test_conv3d_transpose_op$|\
...@@ -116,16 +113,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ...@@ -116,16 +113,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\
^test_basic_api_transformation$|\ ^test_basic_api_transformation$|\
^test_deformable_conv_op$|\ ^test_deformable_conv_op$|\
^test_variable$|\ ^test_variable$|\
^test_conv_bias_mkldnn_fuse_pass_cc$|\
^test_conv_batch_norm_mkldnn_fuse_pass$|\
^test_compute_propagate_scales_mkldnn_pass$|\
^test_cpu_quantize_pass$|\
^test_cpu_quantize_squash_pass$|\
^op_tester$|\
^test_analyzer$|\
^infer_io_utils_tester$|\
^test_paddle_inference_api$|\
^test_mkldnn_quantizer$|\
^test_mkldnn_conv_hard_sigmoid_fuse_pass$|\ ^test_mkldnn_conv_hard_sigmoid_fuse_pass$|\
^test_mkldnn_conv_hard_swish_fuse_pass$|\ ^test_mkldnn_conv_hard_swish_fuse_pass$|\
^test_conv_act_mkldnn_fuse_pass$|\ ^test_conv_act_mkldnn_fuse_pass$|\
...@@ -147,11 +134,9 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ...@@ -147,11 +134,9 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\
^test_slice$|\ ^test_slice$|\
^test_conv_elementwise_add_fuse_pass$|\ ^test_conv_elementwise_add_fuse_pass$|\
^test_executor_and_mul$|\ ^test_executor_and_mul$|\
^test_op_converter$|\
^test_analyzer_int8_resnet50$|\ ^test_analyzer_int8_resnet50$|\
^test_analyzer_int8_mobilenetv1$|\ ^test_analyzer_int8_mobilenetv1$|\
^test_trt_conv_pass$|\ ^test_trt_conv_pass$|\
^test_analysis_predictor$|\
^test_roll_op$|\ ^test_roll_op$|\
^test_lcm$|\ ^test_lcm$|\
^test_elementwise_floordiv_op$|\ ^test_elementwise_floordiv_op$|\
...@@ -160,7 +145,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ...@@ -160,7 +145,6 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\
^test_trt_convert_deformable_conv$|\ ^test_trt_convert_deformable_conv$|\
^test_conv_elementwise_add2_act_fuse_pass$|\ ^test_conv_elementwise_add2_act_fuse_pass$|\
^test_tensor_scalar_type_promotion_dynamic$|\ ^test_tensor_scalar_type_promotion_dynamic$|\
^test_api_impl$|\
^test_model$|\ ^test_model$|\
^test_py_reader_combination$|\ ^test_py_reader_combination$|\
^test_trt_convert_flatten$|\ ^test_trt_convert_flatten$|\
...@@ -198,7 +182,8 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\ ...@@ -198,7 +182,8 @@ disable_win_inference_test="^trt_quant_int8_yolov3_r50_test$|\
^test_trt_fc_fuse_quant_dequant_pass$|\ ^test_trt_fc_fuse_quant_dequant_pass$|\
^test_unsqueeze2_eltwise_fuse_pass$|\ ^test_unsqueeze2_eltwise_fuse_pass$|\
^test_parallel_executor_seresnext_with_fuse_all_reduce_gpu$|\ ^test_parallel_executor_seresnext_with_fuse_all_reduce_gpu$|\
^test_parallel_executor_seresnext_with_reduce_gpu$" ^test_parallel_executor_seresnext_with_reduce_gpu$|\
^test_api_impl$"
# /*==========Fixed Disabled Windows CPU OPENBLAS((PR-CI-Windows-OPENBLAS)) unittests==============================*/ # /*==========Fixed Disabled Windows CPU OPENBLAS((PR-CI-Windows-OPENBLAS)) unittests==============================*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册