From e6d81ddf03b1d7f478ec54308e6b20ac9272cd99 Mon Sep 17 00:00:00 2001 From: Sing_chan <51314274+betterpig@users.noreply.github.com> Date: Tue, 28 Jun 2022 11:40:46 +0800 Subject: [PATCH] Revert "make inference_api_test compile with dynamic linking library (#41944)" (#43882) This reverts commit f8b9073f0ecc8130c93ffac06f808cc57cad272b. --- paddle/fluid/inference/CMakeLists.txt | 2 - .../fluid/inference/analysis/CMakeLists.txt | 8 +-- paddle/fluid/inference/capi/CMakeLists.txt | 4 ++ .../fluid/inference/capi_exp/CMakeLists.txt | 4 ++ .../fluid/inference/tests/api/CMakeLists.txt | 63 ++++++------------- .../analyzer_image_classification_tester.cc | 10 +-- .../inference/tests/api/trt_fc_prelu_test.cc | 5 -- .../inference/tests/api/trt_mobilenet_test.cc | 5 -- .../inference/tests/api/trt_resnext_test.cc | 5 -- paddle/phi/common/place.h | 5 +- paddle/scripts/paddle_build.bat | 3 +- 11 files changed, 39 insertions(+), 75 deletions(-) diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 6ff4655429..4e991a3013 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -119,8 +119,6 @@ cc_library( get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) target_link_libraries(paddle_inference_shared ${os_dependency_modules}) if(WIN32) - set_property(TARGET paddle_inference_shared - PROPERTY WINDOWS_EXPORT_ALL_SYMBOLS ON) target_link_libraries(paddle_inference_shared gflags) endif() diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index c001f5eb8d..4b7bed65ba 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -49,10 +49,10 @@ function(inference_analysis_test_build TARGET) SRCS ${analysis_test_SRCS} DEPS - ${analysis_test_EXTRA_DEPS} analysis pass - ${GLOB_PASS_LIB}) + ${GLOB_PASS_LIB} + ${analysis_test_EXTRA_DEPS}) endif() endfunction() @@ -80,10 +80,10 @@ function(inference_analysis_test TARGET) SRCS ${analysis_test_SRCS} DEPS - ${analysis_test_EXTRA_DEPS} analysis pass - ${GLOB_PASS_LIB}) + ${GLOB_PASS_LIB} + ${analysis_test_EXTRA_DEPS}) inference_base_test_run(${TARGET} COMMAND ${TARGET} ARGS ${analysis_test_ARGS}) endif() diff --git a/paddle/fluid/inference/capi/CMakeLists.txt b/paddle/fluid/inference/capi/CMakeLists.txt index 25d8a39dc6..73ba41607a 100644 --- a/paddle/fluid/inference/capi/CMakeLists.txt +++ b/paddle/fluid/inference/capi/CMakeLists.txt @@ -20,6 +20,10 @@ cc_library( SRCS ${C_API_SRCS} DEPS paddle_inference) +if(NOT ON_INFER) + return() +endif() + # Create inference capi shared library cc_library( paddle_inference_c_shared SHARED diff --git a/paddle/fluid/inference/capi_exp/CMakeLists.txt b/paddle/fluid/inference/capi_exp/CMakeLists.txt index 56de57cbb9..e35e14a0c0 100644 --- a/paddle/fluid/inference/capi_exp/CMakeLists.txt +++ b/paddle/fluid/inference/capi_exp/CMakeLists.txt @@ -20,6 +20,10 @@ cc_library( SRCS ${C_API_SRCS} DEPS paddle_inference) +if(NOT ON_INFER) + return() +endif() + # Create inference capi shared library cc_library( paddle_inference_c_shared SHARED diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index d6c4c18492..8261ce288c 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -1,4 +1,13 @@ -set(INFERENCE_EXTRA_DEPS paddle_inference_shared) +if(NOT APPLE AND NOT WIN32) + set(INFERENCE_EXTRA_DEPS paddle_inference_shared) +else() + set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_inference_io + ir_pass_manager analysis_predictor benchmark) +endif() + +if(WITH_GPU AND TENSORRT_FOUND) + set(INFERENCE_EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} analysis ${analysis_deps}) +endif() function(download_data install_dir data_file check_sum) string(REGEX MATCH "[^/\\]+$" file_name ${data_file}) @@ -939,26 +948,18 @@ if(WITH_GPU AND TENSORRT_FOUND) analyzer_capi_exp_gpu_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) - if(WIN32) - target_link_libraries(test_analyzer_capi_exp_gpu paddle_inference_c_shared) - else() - target_link_libraries(test_analyzer_capi_exp_gpu paddle_inference_c) - endif() inference_analysis_test( test_analyzer_capi_exp_xpu SRCS analyzer_capi_exp_xpu_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) - if(WIN32) - target_link_libraries(test_analyzer_capi_exp_xpu paddle_inference_c_shared) - else() - target_link_libraries(test_analyzer_capi_exp_xpu paddle_inference_c) - endif() set(TRT_MODEL_QUANT_RESNET_DIR "${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model") @@ -1106,13 +1107,9 @@ inference_analysis_test( analyzer_capi_exp_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${RESNET50_MODEL_DIR}/model) -if(WIN32) - target_link_libraries(test_analyzer_capi_exp paddle_inference_c_shared) -else() - target_link_libraries(test_analyzer_capi_exp paddle_inference_c) -endif() inference_analysis_test( test_analyzer_capi_exp_pd_config @@ -1120,14 +1117,9 @@ inference_analysis_test( analyzer_capi_exp_pd_config_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${MOBILENET_INSTALL_DIR}/model) -if(WIN32) - target_link_libraries(test_analyzer_capi_exp_pd_config - paddle_inference_c_shared) -else() - target_link_libraries(test_analyzer_capi_exp_pd_config paddle_inference_c) -endif() inference_analysis_test( test_analyzer_capi_exp_pd_tensor @@ -1135,14 +1127,9 @@ inference_analysis_test( analyzer_capi_exp_pd_tensor_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${MOBILENET_INSTALL_DIR}/model) -if(WIN32) - target_link_libraries(test_analyzer_capi_exp_pd_tensor - paddle_inference_c_shared) -else() - target_link_libraries(test_analyzer_capi_exp_pd_tensor paddle_inference_c) -endif() if(NOT APPLE AND NOT WIN32) inference_analysis_test( @@ -1151,16 +1138,10 @@ if(NOT APPLE AND NOT WIN32) analyzer_capi_exp_pd_threads_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${MOBILENET_INSTALL_DIR}/model) - if(WIN32) - target_link_libraries(test_analyzer_capi_exp_pd_threads - paddle_inference_c_shared) - else() - target_link_libraries(test_analyzer_capi_exp_pd_threads paddle_inference_c) - endif() endif() - inference_analysis_test( test_analyzer_zerocopytensor_tensor SRCS @@ -1201,13 +1182,9 @@ if(WITH_MKLDNN) analyzer_capi_exp_int_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${INT8_DATA_DIR}/resnet50/model) - if(WIN32) - target_link_libraries(test_analyzer_capi_exp_int paddle_inference_c_shared) - else() - target_link_libraries(test_analyzer_capi_exp_int paddle_inference_c) - endif() endif() inference_analysis_test( @@ -1216,13 +1193,9 @@ inference_analysis_test( analyzer_capi_exp_ner_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + paddle_inference_c ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model) -if(WIN32) - target_link_libraries(test_analyzer_capi_exp_ner paddle_inference_c_shared) -else() - target_link_libraries(test_analyzer_capi_exp_ner paddle_inference_c) -endif() if(WITH_GPU) inference_analysis_test( diff --git a/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc index dc8921ef73..0df36592cc 100644 --- a/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc @@ -66,6 +66,11 @@ void profile(bool use_mkldnn = false) { FLAGS_num_threads); } +TEST(Analyzer_resnet50, profile) { profile(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); } +#endif + // Check the fuse status TEST(Analyzer_resnet50, fuse_statis) { AnalysisConfig cfg; @@ -77,11 +82,6 @@ TEST(Analyzer_resnet50, fuse_statis) { LOG(INFO) << "num_ops: " << num_ops; } -TEST(Analyzer_resnet50, profile) { profile(); } -#ifdef PADDLE_WITH_MKLDNN -TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); } -#endif - // Compare result of NativeConfig and AnalysisConfig void compare(bool use_mkldnn = false) { AnalysisConfig cfg; diff --git a/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc b/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc index 70c1eb8bab..93d4a88383 100644 --- a/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc +++ b/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc @@ -23,11 +23,6 @@ namespace inference { TEST(TensorRT_fc, compare) { std::string model_dir = FLAGS_infer_model + "/fc_uint8"; - AnalysisConfig config; - config.EnableUseGpu(100, 0); - config.SetModel(model_dir); - config.DisableGlogInfo(); - auto predictor = CreatePaddlePredictor(config); compare(model_dir, /* use_tensorrt */ true); // Open it when need. // profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt); diff --git a/paddle/fluid/inference/tests/api/trt_mobilenet_test.cc b/paddle/fluid/inference/tests/api/trt_mobilenet_test.cc index 45c14f4fc8..3b25c32fc7 100644 --- a/paddle/fluid/inference/tests/api/trt_mobilenet_test.cc +++ b/paddle/fluid/inference/tests/api/trt_mobilenet_test.cc @@ -23,11 +23,6 @@ namespace inference { TEST(TensorRT_mobilenet, compare) { std::string model_dir = FLAGS_infer_model + "/mobilenet"; - AnalysisConfig config; - config.EnableUseGpu(100, 0); - config.SetModel(model_dir); - config.DisableGlogInfo(); - auto predictor = CreatePaddlePredictor(config); compare(model_dir, /* use_tensorrt */ true); // Open it when need. // profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt); diff --git a/paddle/fluid/inference/tests/api/trt_resnext_test.cc b/paddle/fluid/inference/tests/api/trt_resnext_test.cc index 8d4e331fa9..374074957c 100644 --- a/paddle/fluid/inference/tests/api/trt_resnext_test.cc +++ b/paddle/fluid/inference/tests/api/trt_resnext_test.cc @@ -23,11 +23,6 @@ namespace inference { TEST(TensorRT_resnext50, compare) { std::string model_dir = FLAGS_infer_model + "/resnext50"; - AnalysisConfig config; - config.EnableUseGpu(100, 0); - config.SetModel(model_dir); - config.DisableGlogInfo(); - auto predictor = CreatePaddlePredictor(config); compare(model_dir, /* use_tensorrt */ true); } diff --git a/paddle/phi/common/place.h b/paddle/phi/common/place.h index ead3e463c2..cbc1faf94f 100644 --- a/paddle/phi/common/place.h +++ b/paddle/phi/common/place.h @@ -39,9 +39,10 @@ enum class AllocationType : int8_t { const char* AllocationTypeStr(AllocationType type); -size_t GetOrRegisterGlobalDeviceTypeId(const std::string& device_type); +PADDLE_API size_t +GetOrRegisterGlobalDeviceTypeId(const std::string& device_type); -std::string GetGlobalDeviceType(size_t device_type_id_); +PADDLE_API std::string GetGlobalDeviceType(size_t device_type_id_); /// \brief The place is used to specify where the data is stored. class PADDLE_API Place { diff --git a/paddle/scripts/paddle_build.bat b/paddle/scripts/paddle_build.bat index 9680ec234b..d87915d172 100644 --- a/paddle/scripts/paddle_build.bat +++ b/paddle/scripts/paddle_build.bat @@ -685,8 +685,7 @@ set PATH=%THIRD_PARTY_PATH:/=\%\install\openblas\lib;%THIRD_PARTY_PATH:/=\%\inst %THIRD_PARTY_PATH:/=\%\install\zlib\bin;%THIRD_PARTY_PATH:/=\%\install\mklml\lib;^ %THIRD_PARTY_PATH:/=\%\install\mkldnn\bin;%THIRD_PARTY_PATH:/=\%\install\warpctc\bin;^ %THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib;%THIRD_PARTY_PATH:/=\%\install\paddle2onnx\lib;^ -%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%work_dir%\%BUILD_DIR%\paddle\fluid\inference\capi_exp;^ -%PATH% +%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%PATH% REM TODO: make ut find .dll in install\onnxruntime\lib xcopy %THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib\onnxruntime.dll %work_dir%\%BUILD_DIR%\paddle\fluid\inference\tests\api\ /Y -- GitLab