未验证 提交 e6d81ddf 编写于 作者: S Sing_chan 提交者: GitHub

Revert "make inference_api_test compile with dynamic linking library (#41944)" (#43882)

This reverts commit f8b9073f.
上级 5d6bdabb
......@@ -119,8 +119,6 @@ cc_library(
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(paddle_inference_shared ${os_dependency_modules})
if(WIN32)
set_property(TARGET paddle_inference_shared
PROPERTY WINDOWS_EXPORT_ALL_SYMBOLS ON)
target_link_libraries(paddle_inference_shared gflags)
endif()
......
......@@ -49,10 +49,10 @@ function(inference_analysis_test_build TARGET)
SRCS
${analysis_test_SRCS}
DEPS
${analysis_test_EXTRA_DEPS}
analysis
pass
${GLOB_PASS_LIB})
${GLOB_PASS_LIB}
${analysis_test_EXTRA_DEPS})
endif()
endfunction()
......@@ -80,10 +80,10 @@ function(inference_analysis_test TARGET)
SRCS
${analysis_test_SRCS}
DEPS
${analysis_test_EXTRA_DEPS}
analysis
pass
${GLOB_PASS_LIB})
${GLOB_PASS_LIB}
${analysis_test_EXTRA_DEPS})
inference_base_test_run(${TARGET} COMMAND ${TARGET} ARGS
${analysis_test_ARGS})
endif()
......
......@@ -20,6 +20,10 @@ cc_library(
SRCS ${C_API_SRCS}
DEPS paddle_inference)
if(NOT ON_INFER)
return()
endif()
# Create inference capi shared library
cc_library(
paddle_inference_c_shared SHARED
......
......@@ -20,6 +20,10 @@ cc_library(
SRCS ${C_API_SRCS}
DEPS paddle_inference)
if(NOT ON_INFER)
return()
endif()
# Create inference capi shared library
cc_library(
paddle_inference_c_shared SHARED
......
set(INFERENCE_EXTRA_DEPS paddle_inference_shared)
if(NOT APPLE AND NOT WIN32)
set(INFERENCE_EXTRA_DEPS paddle_inference_shared)
else()
set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_inference_io
ir_pass_manager analysis_predictor benchmark)
endif()
if(WITH_GPU AND TENSORRT_FOUND)
set(INFERENCE_EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} analysis ${analysis_deps})
endif()
function(download_data install_dir data_file check_sum)
string(REGEX MATCH "[^/\\]+$" file_name ${data_file})
......@@ -939,26 +948,18 @@ if(WITH_GPU AND TENSORRT_FOUND)
analyzer_capi_exp_gpu_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_gpu paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_gpu paddle_inference_c)
endif()
inference_analysis_test(
test_analyzer_capi_exp_xpu
SRCS
analyzer_capi_exp_xpu_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_xpu paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_xpu paddle_inference_c)
endif()
set(TRT_MODEL_QUANT_RESNET_DIR
"${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model")
......@@ -1106,13 +1107,9 @@ inference_analysis_test(
analyzer_capi_exp_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${RESNET50_MODEL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp paddle_inference_c)
endif()
inference_analysis_test(
test_analyzer_capi_exp_pd_config
......@@ -1120,14 +1117,9 @@ inference_analysis_test(
analyzer_capi_exp_pd_config_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${MOBILENET_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_pd_config
paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_pd_config paddle_inference_c)
endif()
inference_analysis_test(
test_analyzer_capi_exp_pd_tensor
......@@ -1135,14 +1127,9 @@ inference_analysis_test(
analyzer_capi_exp_pd_tensor_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${MOBILENET_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_pd_tensor
paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_pd_tensor paddle_inference_c)
endif()
if(NOT APPLE AND NOT WIN32)
inference_analysis_test(
......@@ -1151,16 +1138,10 @@ if(NOT APPLE AND NOT WIN32)
analyzer_capi_exp_pd_threads_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${MOBILENET_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_pd_threads
paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_pd_threads paddle_inference_c)
endif()
endif()
inference_analysis_test(
test_analyzer_zerocopytensor_tensor
SRCS
......@@ -1201,13 +1182,9 @@ if(WITH_MKLDNN)
analyzer_capi_exp_int_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${INT8_DATA_DIR}/resnet50/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_int paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_int paddle_inference_c)
endif()
endif()
inference_analysis_test(
......@@ -1216,13 +1193,9 @@ inference_analysis_test(
analyzer_capi_exp_ner_tester.cc
EXTRA_DEPS
${INFERENCE_EXTRA_DEPS}
paddle_inference_c
ARGS
--infer_model=${CHINESE_NER_INSTALL_DIR}/model)
if(WIN32)
target_link_libraries(test_analyzer_capi_exp_ner paddle_inference_c_shared)
else()
target_link_libraries(test_analyzer_capi_exp_ner paddle_inference_c)
endif()
if(WITH_GPU)
inference_analysis_test(
......
......@@ -66,6 +66,11 @@ void profile(bool use_mkldnn = false) {
FLAGS_num_threads);
}
TEST(Analyzer_resnet50, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
// Check the fuse status
TEST(Analyzer_resnet50, fuse_statis) {
AnalysisConfig cfg;
......@@ -77,11 +82,6 @@ TEST(Analyzer_resnet50, fuse_statis) {
LOG(INFO) << "num_ops: " << num_ops;
}
TEST(Analyzer_resnet50, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
// Compare result of NativeConfig and AnalysisConfig
void compare(bool use_mkldnn = false) {
AnalysisConfig cfg;
......
......@@ -23,11 +23,6 @@ namespace inference {
TEST(TensorRT_fc, compare) {
std::string model_dir = FLAGS_infer_model + "/fc_uint8";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir);
config.DisableGlogInfo();
auto predictor = CreatePaddlePredictor(config);
compare(model_dir, /* use_tensorrt */ true);
// Open it when need.
// profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt);
......
......@@ -23,11 +23,6 @@ namespace inference {
TEST(TensorRT_mobilenet, compare) {
std::string model_dir = FLAGS_infer_model + "/mobilenet";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir);
config.DisableGlogInfo();
auto predictor = CreatePaddlePredictor(config);
compare(model_dir, /* use_tensorrt */ true);
// Open it when need.
// profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt);
......
......@@ -23,11 +23,6 @@ namespace inference {
TEST(TensorRT_resnext50, compare) {
std::string model_dir = FLAGS_infer_model + "/resnext50";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir);
config.DisableGlogInfo();
auto predictor = CreatePaddlePredictor(config);
compare(model_dir, /* use_tensorrt */ true);
}
......
......@@ -39,9 +39,10 @@ enum class AllocationType : int8_t {
const char* AllocationTypeStr(AllocationType type);
size_t GetOrRegisterGlobalDeviceTypeId(const std::string& device_type);
PADDLE_API size_t
GetOrRegisterGlobalDeviceTypeId(const std::string& device_type);
std::string GetGlobalDeviceType(size_t device_type_id_);
PADDLE_API std::string GetGlobalDeviceType(size_t device_type_id_);
/// \brief The place is used to specify where the data is stored.
class PADDLE_API Place {
......
......@@ -685,8 +685,7 @@ set PATH=%THIRD_PARTY_PATH:/=\%\install\openblas\lib;%THIRD_PARTY_PATH:/=\%\inst
%THIRD_PARTY_PATH:/=\%\install\zlib\bin;%THIRD_PARTY_PATH:/=\%\install\mklml\lib;^
%THIRD_PARTY_PATH:/=\%\install\mkldnn\bin;%THIRD_PARTY_PATH:/=\%\install\warpctc\bin;^
%THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib;%THIRD_PARTY_PATH:/=\%\install\paddle2onnx\lib;^
%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%work_dir%\%BUILD_DIR%\paddle\fluid\inference\capi_exp;^
%PATH%
%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%PATH%
REM TODO: make ut find .dll in install\onnxruntime\lib
xcopy %THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib\onnxruntime.dll %work_dir%\%BUILD_DIR%\paddle\fluid\inference\tests\api\ /Y
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册