未验证 提交 b9640034 编写于 作者: S Sing_chan 提交者: GitHub

Make inference_api_test compile with shared lib in Windows (#43946)

* Revert "Revert "make inference_api_test compile with dynamic linking library (#41944)" (#43882)"

This reverts commit e6d81ddf.

* modify third_party cmake

* move SKIP_CPP_TEST return in the begining
上级 9a1fdad3
...@@ -45,7 +45,7 @@ if(WITH_ARM_BRPC) ...@@ -45,7 +45,7 @@ if(WITH_ARM_BRPC)
file( file(
WRITE ${GLOG_SOURCE_DIR}/CMakeLists.txt WRITE ${GLOG_SOURCE_DIR}/CMakeLists.txt
"PROJECT(ARM_GLOGS)\n" "cmake_minimum_required(VERSION 3.0)\n" "PROJECT(ARM_GLOGS)\n" "cmake_minimum_required(VERSION 3.0)\n"
"install(DIRECTORY arm_glog/include arm_glog/lib \n" "install(DIRECTORY arm_glog/include arm_glog/lib\n"
" DESTINATION . USE_SOURCE_PERMISSIONS)\n") " DESTINATION . USE_SOURCE_PERMISSIONS)\n")
ExternalProject_Add( ExternalProject_Add(
extern_glog extern_glog
......
...@@ -225,7 +225,7 @@ if(NOT DEFINED WITH_MKLDNN) ...@@ -225,7 +225,7 @@ if(NOT DEFINED WITH_MKLDNN)
if(WITH_MKL AND AVX2_FOUND) if(WITH_MKL AND AVX2_FOUND)
set(WITH_MKLDNN ON) set(WITH_MKLDNN ON)
else() else()
message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN") message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN.")
set(WITH_MKLDNN OFF) set(WITH_MKLDNN OFF)
endif() endif()
endif() endif()
......
...@@ -119,6 +119,8 @@ cc_library( ...@@ -119,6 +119,8 @@ cc_library(
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(paddle_inference_shared ${os_dependency_modules}) target_link_libraries(paddle_inference_shared ${os_dependency_modules})
if(WIN32) if(WIN32)
set_property(TARGET paddle_inference_shared
PROPERTY WINDOWS_EXPORT_ALL_SYMBOLS ON)
target_link_libraries(paddle_inference_shared gflags) target_link_libraries(paddle_inference_shared gflags)
endif() endif()
......
...@@ -49,10 +49,10 @@ function(inference_analysis_test_build TARGET) ...@@ -49,10 +49,10 @@ function(inference_analysis_test_build TARGET)
SRCS SRCS
${analysis_test_SRCS} ${analysis_test_SRCS}
DEPS DEPS
${analysis_test_EXTRA_DEPS}
analysis analysis
pass pass
${GLOB_PASS_LIB} ${GLOB_PASS_LIB})
${analysis_test_EXTRA_DEPS})
endif() endif()
endfunction() endfunction()
...@@ -80,10 +80,10 @@ function(inference_analysis_test TARGET) ...@@ -80,10 +80,10 @@ function(inference_analysis_test TARGET)
SRCS SRCS
${analysis_test_SRCS} ${analysis_test_SRCS}
DEPS DEPS
${analysis_test_EXTRA_DEPS}
analysis analysis
pass pass
${GLOB_PASS_LIB} ${GLOB_PASS_LIB})
${analysis_test_EXTRA_DEPS})
inference_base_test_run(${TARGET} COMMAND ${TARGET} ARGS inference_base_test_run(${TARGET} COMMAND ${TARGET} ARGS
${analysis_test_ARGS}) ${analysis_test_ARGS})
endif() endif()
......
...@@ -20,10 +20,6 @@ cc_library( ...@@ -20,10 +20,6 @@ cc_library(
SRCS ${C_API_SRCS} SRCS ${C_API_SRCS}
DEPS paddle_inference) DEPS paddle_inference)
if(NOT ON_INFER)
return()
endif()
# Create inference capi shared library # Create inference capi shared library
cc_library( cc_library(
paddle_inference_c_shared SHARED paddle_inference_c_shared SHARED
......
...@@ -20,10 +20,6 @@ cc_library( ...@@ -20,10 +20,6 @@ cc_library(
SRCS ${C_API_SRCS} SRCS ${C_API_SRCS}
DEPS paddle_inference) DEPS paddle_inference)
if(NOT ON_INFER)
return()
endif()
# Create inference capi shared library # Create inference capi shared library
cc_library( cc_library(
paddle_inference_c_shared SHARED paddle_inference_c_shared SHARED
......
if(NOT APPLE AND NOT WIN32) # If CI_SKIP_CPP_TEST=ON, there is no need to build and run these test.
set(INFERENCE_EXTRA_DEPS paddle_inference_shared) if("$ENV{CI_SKIP_CPP_TEST}" STREQUAL "ON")
else() return()
set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_inference_io
ir_pass_manager analysis_predictor benchmark)
endif() endif()
if(WITH_GPU AND TENSORRT_FOUND) set(INFERENCE_EXTRA_DEPS paddle_inference_shared)
set(INFERENCE_EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} analysis ${analysis_deps})
endif()
function(download_data install_dir data_file check_sum) function(download_data install_dir data_file check_sum)
string(REGEX MATCH "[^/\\]+$" file_name ${data_file}) string(REGEX MATCH "[^/\\]+$" file_name ${data_file})
...@@ -948,18 +944,26 @@ if(WITH_GPU AND TENSORRT_FOUND) ...@@ -948,18 +944,26 @@ if(WITH_GPU AND TENSORRT_FOUND)
analyzer_capi_exp_gpu_tester.cc analyzer_capi_exp_gpu_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_gpu paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_gpu paddle_inference_c)
endif()
inference_analysis_test( inference_analysis_test(
test_analyzer_capi_exp_xpu test_analyzer_capi_exp_xpu
SRCS SRCS
analyzer_capi_exp_xpu_tester.cc analyzer_capi_exp_xpu_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_xpu paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_xpu paddle_inference_c)
endif()
set(TRT_MODEL_QUANT_RESNET_DIR set(TRT_MODEL_QUANT_RESNET_DIR
"${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model") "${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model")
...@@ -1107,9 +1111,13 @@ inference_analysis_test( ...@@ -1107,9 +1111,13 @@ inference_analysis_test(
analyzer_capi_exp_tester.cc analyzer_capi_exp_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${RESNET50_MODEL_DIR}/model) --infer_model=${RESNET50_MODEL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp paddle_inference_c)
endif()
inference_analysis_test( inference_analysis_test(
test_analyzer_capi_exp_pd_config test_analyzer_capi_exp_pd_config
...@@ -1117,9 +1125,14 @@ inference_analysis_test( ...@@ -1117,9 +1125,14 @@ inference_analysis_test(
analyzer_capi_exp_pd_config_tester.cc analyzer_capi_exp_pd_config_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${MOBILENET_INSTALL_DIR}/model) --infer_model=${MOBILENET_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_pd_config
paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_pd_config paddle_inference_c)
endif()
inference_analysis_test( inference_analysis_test(
test_analyzer_capi_exp_pd_tensor test_analyzer_capi_exp_pd_tensor
...@@ -1127,9 +1140,14 @@ inference_analysis_test( ...@@ -1127,9 +1140,14 @@ inference_analysis_test(
analyzer_capi_exp_pd_tensor_tester.cc analyzer_capi_exp_pd_tensor_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${MOBILENET_INSTALL_DIR}/model) --infer_model=${MOBILENET_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_pd_tensor
paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_pd_tensor paddle_inference_c)
endif()
if(NOT APPLE AND NOT WIN32) if(NOT APPLE AND NOT WIN32)
inference_analysis_test( inference_analysis_test(
...@@ -1138,10 +1156,16 @@ if(NOT APPLE AND NOT WIN32) ...@@ -1138,10 +1156,16 @@ if(NOT APPLE AND NOT WIN32)
analyzer_capi_exp_pd_threads_tester.cc analyzer_capi_exp_pd_threads_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${MOBILENET_INSTALL_DIR}/model) --infer_model=${MOBILENET_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_pd_threads
paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_pd_threads paddle_inference_c)
endif()
endif() endif()
inference_analysis_test( inference_analysis_test(
test_analyzer_zerocopytensor_tensor test_analyzer_zerocopytensor_tensor
SRCS SRCS
...@@ -1182,9 +1206,13 @@ if(WITH_MKLDNN) ...@@ -1182,9 +1206,13 @@ if(WITH_MKLDNN)
analyzer_capi_exp_int_tester.cc analyzer_capi_exp_int_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${INT8_DATA_DIR}/resnet50/model) --infer_model=${INT8_DATA_DIR}/resnet50/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_int paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_int paddle_inference_c)
endif()
endif() endif()
inference_analysis_test( inference_analysis_test(
...@@ -1193,9 +1221,13 @@ inference_analysis_test( ...@@ -1193,9 +1221,13 @@ inference_analysis_test(
analyzer_capi_exp_ner_tester.cc analyzer_capi_exp_ner_tester.cc
EXTRA_DEPS EXTRA_DEPS
${INFERENCE_EXTRA_DEPS} ${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS ARGS
--infer_model=${CHINESE_NER_INSTALL_DIR}/model) --infer_model=${CHINESE_NER_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_ner paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_ner paddle_inference_c)
endif()
if(WITH_GPU) if(WITH_GPU)
inference_analysis_test( inference_analysis_test(
...@@ -1224,10 +1256,6 @@ cc_test( ...@@ -1224,10 +1256,6 @@ cc_test(
SRCS paddle_infer_api_errors_tester.cc SRCS paddle_infer_api_errors_tester.cc
DEPS paddle_inference_api) DEPS paddle_inference_api)
if("$ENV{CI_SKIP_CPP_TEST}" STREQUAL "ON")
return()
endif()
if(WITH_GPU AND TENSORRT_FOUND) if(WITH_GPU AND TENSORRT_FOUND)
set_tests_properties(trt_resnext_test PROPERTIES TIMEOUT 300) set_tests_properties(trt_resnext_test PROPERTIES TIMEOUT 300)
set_tests_properties(trt_quant_int8_yolov3_r50_test PROPERTIES TIMEOUT 300) set_tests_properties(trt_quant_int8_yolov3_r50_test PROPERTIES TIMEOUT 300)
......
...@@ -66,11 +66,6 @@ void profile(bool use_mkldnn = false) { ...@@ -66,11 +66,6 @@ void profile(bool use_mkldnn = false) {
FLAGS_num_threads); FLAGS_num_threads);
} }
TEST(Analyzer_resnet50, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
// Check the fuse status // Check the fuse status
TEST(Analyzer_resnet50, fuse_statis) { TEST(Analyzer_resnet50, fuse_statis) {
AnalysisConfig cfg; AnalysisConfig cfg;
...@@ -82,6 +77,11 @@ TEST(Analyzer_resnet50, fuse_statis) { ...@@ -82,6 +77,11 @@ TEST(Analyzer_resnet50, fuse_statis) {
LOG(INFO) << "num_ops: " << num_ops; LOG(INFO) << "num_ops: " << num_ops;
} }
TEST(Analyzer_resnet50, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
// Compare result of NativeConfig and AnalysisConfig // Compare result of NativeConfig and AnalysisConfig
void compare(bool use_mkldnn = false) { void compare(bool use_mkldnn = false) {
AnalysisConfig cfg; AnalysisConfig cfg;
......
...@@ -23,6 +23,11 @@ namespace inference { ...@@ -23,6 +23,11 @@ namespace inference {
TEST(TensorRT_fc, compare) { TEST(TensorRT_fc, compare) {
std::string model_dir = FLAGS_infer_model + "/fc_uint8"; std::string model_dir = FLAGS_infer_model + "/fc_uint8";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir);
config.DisableGlogInfo();
auto predictor = CreatePaddlePredictor(config);
compare(model_dir, /* use_tensorrt */ true); compare(model_dir, /* use_tensorrt */ true);
// Open it when need. // Open it when need.
// profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt); // profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt);
......
...@@ -23,6 +23,11 @@ namespace inference { ...@@ -23,6 +23,11 @@ namespace inference {
TEST(TensorRT_mobilenet, compare) { TEST(TensorRT_mobilenet, compare) {
std::string model_dir = FLAGS_infer_model + "/mobilenet"; std::string model_dir = FLAGS_infer_model + "/mobilenet";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir);
config.DisableGlogInfo();
auto predictor = CreatePaddlePredictor(config);
compare(model_dir, /* use_tensorrt */ true); compare(model_dir, /* use_tensorrt */ true);
// Open it when need. // Open it when need.
// profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt); // profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt);
......
...@@ -23,6 +23,11 @@ namespace inference { ...@@ -23,6 +23,11 @@ namespace inference {
TEST(TensorRT_resnext50, compare) { TEST(TensorRT_resnext50, compare) {
std::string model_dir = FLAGS_infer_model + "/resnext50"; std::string model_dir = FLAGS_infer_model + "/resnext50";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir);
config.DisableGlogInfo();
auto predictor = CreatePaddlePredictor(config);
compare(model_dir, /* use_tensorrt */ true); compare(model_dir, /* use_tensorrt */ true);
} }
......
...@@ -39,10 +39,9 @@ enum class AllocationType : int8_t { ...@@ -39,10 +39,9 @@ enum class AllocationType : int8_t {
const char* AllocationTypeStr(AllocationType type); const char* AllocationTypeStr(AllocationType type);
PADDLE_API size_t size_t GetOrRegisterGlobalDeviceTypeId(const std::string& device_type);
GetOrRegisterGlobalDeviceTypeId(const std::string& device_type);
PADDLE_API std::string GetGlobalDeviceType(size_t device_type_id_); std::string GetGlobalDeviceType(size_t device_type_id_);
/// \brief The place is used to specify where the data is stored. /// \brief The place is used to specify where the data is stored.
class PADDLE_API Place { class PADDLE_API Place {
......
...@@ -685,7 +685,8 @@ set PATH=%THIRD_PARTY_PATH:/=\%\install\openblas\lib;%THIRD_PARTY_PATH:/=\%\inst ...@@ -685,7 +685,8 @@ set PATH=%THIRD_PARTY_PATH:/=\%\install\openblas\lib;%THIRD_PARTY_PATH:/=\%\inst
%THIRD_PARTY_PATH:/=\%\install\zlib\bin;%THIRD_PARTY_PATH:/=\%\install\mklml\lib;^ %THIRD_PARTY_PATH:/=\%\install\zlib\bin;%THIRD_PARTY_PATH:/=\%\install\mklml\lib;^
%THIRD_PARTY_PATH:/=\%\install\mkldnn\bin;%THIRD_PARTY_PATH:/=\%\install\warpctc\bin;^ %THIRD_PARTY_PATH:/=\%\install\mkldnn\bin;%THIRD_PARTY_PATH:/=\%\install\warpctc\bin;^
%THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib;%THIRD_PARTY_PATH:/=\%\install\paddle2onnx\lib;^ %THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib;%THIRD_PARTY_PATH:/=\%\install\paddle2onnx\lib;^
%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%PATH% %work_dir%\%BUILD_DIR%\paddle\fluid\inference;%work_dir%\%BUILD_DIR%\paddle\fluid\inference\capi_exp;^
%PATH%
REM TODO: make ut find .dll in install\onnxruntime\lib REM TODO: make ut find .dll in install\onnxruntime\lib
xcopy %THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib\onnxruntime.dll %work_dir%\%BUILD_DIR%\paddle\fluid\inference\tests\api\ /Y xcopy %THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib\onnxruntime.dll %work_dir%\%BUILD_DIR%\paddle\fluid\inference\tests\api\ /Y
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册