diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 2a5595307ca27a8d3a93cfa74458980c3a5d8af4..059c3a04487cce103bf55de965cea2400997a7e8 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -137,7 +137,7 @@ function(copy_part_of_thrid_party TARGET DST) endfunction() # inference library for only inference -set(inference_lib_deps third_party paddle_fluid paddle_fluid_c paddle_fluid_shared paddle_fluid_c_shared) +set(inference_lib_deps third_party paddle_inference paddle_inference_c paddle_inference_shared paddle_inference_c_shared) add_custom_target(inference_lib_dist DEPENDS ${inference_lib_deps}) @@ -164,20 +164,20 @@ copy_part_of_thrid_party(inference_lib_dist ${PADDLE_INFERENCE_INSTALL_DIR}) set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") if(WIN32) if(WITH_STATIC_LIB) - set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/libpaddle_fluid.lib - ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.*) + set(paddle_inference_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/libpaddle_inference.lib + ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_inference.*) else() - set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.dll - ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.lib) + set(paddle_inference_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_inference.dll + ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_inference.lib) endif() copy(inference_lib_dist - SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib} + SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_inference_lib} DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) else(WIN32) - set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*) + set(paddle_inference_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_inference.*) copy(inference_lib_dist - SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib} + SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_inference_lib} DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) endif(WIN32) @@ -196,13 +196,13 @@ copy_part_of_thrid_party(inference_lib_dist ${PADDLE_INFERENCE_C_INSTALL_DIR}) set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") if(WIN32) - set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_fluid_c.*) + set(paddle_inference_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_inference_c.*) else(WIN32) - set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/libpaddle_fluid_c.*) + set(paddle_inference_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/libpaddle_inference_c.*) endif(WIN32) copy(inference_lib_dist - SRCS ${src_dir}/inference/capi/paddle_c_api.h ${paddle_fluid_c_lib} + SRCS ${src_dir}/inference/capi/paddle_c_api.h ${paddle_inference_c_lib} DSTS ${PADDLE_INFERENCE_C_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_C_INSTALL_DIR}/paddle/lib) # fluid library for both train and inference @@ -213,12 +213,12 @@ set(dst_dir "${PADDLE_INSTALL_DIR}/paddle/fluid") set(module "inference") if(WIN32) copy(fluid_lib_dist - SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_fluid_lib} + SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_inference_lib} DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ) else() copy(fluid_lib_dist - SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_fluid_lib} + SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_inference_lib} DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ) endif() diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index fb55d5463621ec1fa4e6fb8dcbfa03a85fad51ee..8ef6bcd8600c8521afd4eafe2d0943eca77b877a 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -17,8 +17,7 @@ if(WITH_TESTING) include(tests/test.cmake) # some generic cmake function for inference endif() -# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? -cc_library(paddle_fluid_api +cc_library(paddle_inference_io SRCS io.cc DEPS paddle_framework ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS}) @@ -46,15 +45,15 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg}) #TODO(wilber, T8T9): Do we still need to support windows gpu static library? if(WIN32 AND WITH_GPU) - cc_library(paddle_fluid DEPS ${fluid_modules} ${STATIC_INFERENCE_API}) + cc_library(paddle_inference DEPS ${fluid_modules} ${STATIC_INFERENCE_API}) else() - create_static_lib(paddle_fluid ${fluid_modules} ${STATIC_INFERENCE_API}) + create_static_lib(paddle_inference ${fluid_modules} ${STATIC_INFERENCE_API}) endif() if(NOT APPLE) # TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac. - set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym") - set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}") + set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference.sym") + set_target_properties(paddle_inference PROPERTIES LINK_FLAGS "${LINK_FLAGS}") endif() # C inference API @@ -88,30 +87,30 @@ if (WITH_PSCORE) endif () # Create shared inference library -cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS} +cc_library(paddle_inference_shared SHARED SRCS ${SHARED_INFERENCE_SRCS} DEPS ${SHARED_INFERENCE_DEPS}) get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) -target_link_libraries(paddle_fluid_shared ${os_dependency_modules}) +target_link_libraries(paddle_inference_shared ${os_dependency_modules}) if(WIN32) - target_link_libraries(paddle_fluid_shared gflags) + target_link_libraries(paddle_inference_shared gflags) endif() -set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) +set_target_properties(paddle_inference_shared PROPERTIES OUTPUT_NAME paddle_inference) if(NOT APPLE AND NOT WIN32) # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. - set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.map") - set_target_properties(paddle_fluid_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") + set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference.map") + set_target_properties(paddle_inference_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") # check symbol hidden FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake "execute_process(COMMAND sh -c \"${CMAKE_CURRENT_SOURCE_DIR}/check_symbol.sh" - " ${CMAKE_CURRENT_BINARY_DIR}/libpaddle_fluid.so\" RESULT_VARIABLE symbol_res)\n" + " ${CMAKE_CURRENT_BINARY_DIR}/libpaddle_inference.so\" RESULT_VARIABLE symbol_res)\n" "if(NOT \"\${symbol_res}\" STREQUAL \"0\")\n" " message(FATAL_ERROR \"Check symbol failed.\")\n" "endif()\n") add_custom_command( OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol" COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake" - DEPENDS paddle_fluid_shared) + DEPENDS paddle_inference_shared) add_custom_target(check_symbol ALL DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol") endif() diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index 98554ed04976670c1a846cbeab69815417c0a998..dab1b9f7b113523b12f468886b56d5038fe3f165 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -1,13 +1,13 @@ unset(analysis_deps CACHE) set(analysis_deps # analysis_deps can be extended accross the project - framework_proto proto_desc graph pass paddle_fluid_api executor pretty_log + framework_proto proto_desc graph pass paddle_inference_io executor pretty_log ir_pass_manager CACHE INTERNAL "") add_subdirectory(ir_passes) add_subdirectory(passes) -cc_library(analysis_helper SRCS helper.cc DEPS framework_proto proto_desc graph paddle_fluid_api) +cc_library(analysis_helper SRCS helper.cc DEPS framework_proto proto_desc graph paddle_inference_io) cc_library(ir_pass_manager SRCS ir_pass_manager.cc DEPS graph pass ${INFER_IR_PASSES} analysis_helper) @@ -62,7 +62,7 @@ endfunction(inference_analysis_test) if (NOT APPLE AND NOT WIN32) inference_analysis_test(test_analyzer SRCS analyzer_tester.cc - EXTRA_DEPS reset_tensor_array paddle_fluid_shared + EXTRA_DEPS reset_tensor_array paddle_inference_shared ARGS --inference_model_dir=${WORD2VEC_MODEL_DIR}) elseif(WIN32) inference_analysis_test(test_analyzer diff --git a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc index 4402d5c595a2370d7d6c451c8f051d18d239d207..c697914904b3e949050d6e61a7edb521ad6dd0e5 100644 --- a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc @@ -271,9 +271,14 @@ void LiteSubgraphPass::SetUpEngine( paddle::lite_api::Place({target_type, precision_type}), paddle::lite_api::Place({target_type, PRECISION(kInt64)}), paddle::lite_api::Place({target_type, PRECISION(kFloat)}), - paddle::lite_api::Place({TARGET(kHost), PRECISION(kFloat)}), +#ifdef PADDLE_WITH_ARM + paddle::lite_api::Place({TARGET(kARM), precision_type}), + paddle::lite_api::Place({TARGET(kARM), PRECISION(kFloat)}), +#else paddle::lite_api::Place({TARGET(kX86), precision_type}), paddle::lite_api::Place({TARGET(kX86), PRECISION(kFloat)}), +#endif + paddle::lite_api::Place({TARGET(kHost), PRECISION(kFloat)}), }; config.cpu_math_library_num_threads = cpu_math_library_num_threads; config.xpu_l3_workspace_size = xpu_l3_workspace_size; diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 8bf4c5499db85fcfd64bd5350667c0e71fb6d220..22aa210c97ef821f6d18b704c71d8b23eeb74944 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -50,7 +50,7 @@ cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api) if(WITH_TESTING) if (NOT APPLE AND NOT WIN32) - inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS paddle_fluid_shared + inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS paddle_inference_shared ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${IMG_CLS_RESNET_INSTALL_DIR}) set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification) elseif(WIN32) @@ -62,7 +62,7 @@ if(WITH_TESTING) endif() if (NOT APPLE AND NOT WIN32) - cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS paddle_fluid_shared + cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS paddle_inference_shared ARGS --dirname=${WORD2VEC_MODEL_DIR}) elseif (WIN32) cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor benchmark ${inference_deps} diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt index a09f5776c71f570a77a5162d5070c2f65ff24596..e24d83af2f3685f94c79ecf7b6be0dcb11b72f7d 100644 --- a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -132,12 +132,12 @@ else() endif() if(WITH_STATIC_LIB) - set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) + set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) - set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) + set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() - set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) + set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() @@ -204,7 +204,7 @@ if(WIN32) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} + COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif() diff --git a/paddle/fluid/inference/api/demo_ci/windows_inference.md b/paddle/fluid/inference/api/demo_ci/windows_inference.md index 44b2586ad6d33ce7cbd2bb3080acc96b5e27f660..73938cb995f17a0ccd8df7effce18c4f81c03916 100644 --- a/paddle/fluid/inference/api/demo_ci/windows_inference.md +++ b/paddle/fluid/inference/api/demo_ci/windows_inference.md @@ -1,14 +1,14 @@ # windows inference -本文介绍windows inference,目前只提供了静态编译,编译出paddle_fluid.lib,包含了除openblas.dll之外的所有第三方依赖库。 +本文介绍windows inference,目前只提供了静态编译,编译出paddle_inference.lib,包含了除openblas.dll之外的所有第三方依赖库。 -1. 下载最新的paddle_fluid.lib和openblas.dll,并把它们放在同一个目录下。 +1. 下载最新的paddle_inference.lib和openblas.dll,并把它们放在同一个目录下。 2. 准备预训练好的模型文件,例如models中的模型,可以将模型用safe_inference_model接口保存下来。将模型文件放到该目录下 3. 进入Paddle/paddle/fluid/inference/api/demo_ci目录,新建build目录,然后使用cmake生成vs2015的solution文件。 -其中PADDLE_LIB是前面的paddle_fluid.lib对应文件夹, CUDA_LIB指定为x64格式下的cuda系统库目录文件夹。 +其中PADDLE_LIB是前面的paddle_inference.lib对应文件夹, CUDA_LIB指定为x64格式下的cuda系统库目录文件夹。 ```shell - cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DWITH_MKL=OFF -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=inference_icnet -DPADDLE_LIB=D:\to_the_paddle_fluid.lib -DCUDA_LIB=D:\tools\v8.0\lib\x64 + cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DWITH_MKL=OFF -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=inference_icnet -DPADDLE_LIB=D:\to_the_paddle_inference.lib -DCUDA_LIB=D:\tools\v8.0\lib\x64 ``` 然后用vs2015打开对应的项目文件,注意使用静态链接 "/MT",生成对应的exe。将openblas.dll放到exe所在目录。 diff --git a/paddle/fluid/inference/api/high_level_api.md b/paddle/fluid/inference/api/high_level_api.md index 6c4471d868acde0b897db31a1aaa1a01f592aa8e..5b90c7d369c57a9f5ebf76e201dbfcd7b9fc790e 100644 --- a/paddle/fluid/inference/api/high_level_api.md +++ b/paddle/fluid/inference/api/high_level_api.md @@ -1,7 +1,7 @@ # Inference High-level APIs This document describes the high-level inference APIs, one can use them to deploy a Paddle model for an application quickly. -The APIs are described in `paddle_inference_api.h`, just one header file, and two libaries `libpaddle_fluid.so` and `libpaddle_fluid_api.so` are needed for a deployment. +The APIs are described in `paddle_inference_api.h`, just one header file, and two libaries `libpaddle_inference.so` and `libpaddle_inference_io.so` are needed for a deployment. ## PaddleTensor We provide the `PaddleTensor` data structure to give a general tensor interface. diff --git a/paddle/fluid/inference/api/high_level_api_cn.md b/paddle/fluid/inference/api/high_level_api_cn.md index 5a15bf7893f1106222ffa5d7eb0cafaf994322f8..0d420f3369742a73f2592cd780ed4bc3bebe9439 100644 --- a/paddle/fluid/inference/api/high_level_api_cn.md +++ b/paddle/fluid/inference/api/high_level_api_cn.md @@ -5,7 +5,7 @@ 预测库包含: - 头文件 `paddle_inference_api.h` 定义了所有的接口 -- 库文件 `libpaddle_fluid.so/.a(Linux/Mac)` `libpaddle_fluid.lib/paddle_fluid.dll(Windows)` +- 库文件 `libpaddle_inference.so/.a(Linux/Mac)` `libpaddle_inference.lib/paddle_inference.dll(Windows)` 下面是详细的一些 API 概念介绍 @@ -76,7 +76,7 @@ CHECK(predictor->Run(slots, &outputs)); // 获取 outputs ... ``` -编译时,联编 `libpaddle_fluid.a/.so(Linux/Mac)` 或 `libpaddle_fluid.lib/paddle_fluid.dll(Windows)` 便可。 +编译时,联编 `libpaddle_inference.a/.so(Linux/Mac)` 或 `libpaddle_inference.lib/paddle_inference.dll(Windows)` 便可。 ## 详细代码参考 diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 4d40334cbc0b1f9f7cd1339fc87fdd368700c7f0..b7291ef3077df9120730676d80cdfacd768a699a 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -134,11 +134,15 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { "fc_elementwise_layernorm_fuse_pass", // #if CUDNN_VERSION >= 7100 // To run conv_fusion, the version of cudnn must be // guaranteed at least v7 +// cudnn8.0 has memory leak problem in conv + eltwise + act, so we +// disable the pass. +#if !(CUDNN_VERSION >= 8000 && CUDNN_VERSION < 8100) "conv_elementwise_add_act_fuse_pass", // "conv_elementwise_add2_act_fuse_pass", // - "conv_elementwise_add_fuse_pass", // -#endif // - "transpose_flatten_concat_fuse_pass", // +#endif + "conv_elementwise_add_fuse_pass", // +#endif // + "transpose_flatten_concat_fuse_pass", // // following pass should be located in the last, since it will // work on all fused ops. "runtime_context_cache_pass" diff --git a/paddle/fluid/inference/capi/CMakeLists.txt b/paddle/fluid/inference/capi/CMakeLists.txt index 7a555279f85089331d3e66f11f62574c4ae8b697..32f780122bcd61e0ef9ec53ad3380d11f54345aa 100644 --- a/paddle/fluid/inference/capi/CMakeLists.txt +++ b/paddle/fluid/inference/capi/CMakeLists.txt @@ -15,15 +15,15 @@ set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc) -cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS paddle_fluid) +cc_library(paddle_inference_c SRCS ${C_API_SRCS} DEPS paddle_inference) if(NOT ON_INFER) return() endif() # Create inference capi shared library -cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_fluid) -set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c) +cc_library(paddle_inference_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_inference) +set_target_properties(paddle_inference_c_shared PROPERTIES OUTPUT_NAME paddle_inference_c) if(WIN32) - target_link_libraries(paddle_fluid_c_shared shlwapi.lib) + target_link_libraries(paddle_inference_c_shared shlwapi.lib) endif() diff --git a/paddle/fluid/inference/paddle_fluid.map b/paddle/fluid/inference/paddle_inference.map similarity index 100% rename from paddle/fluid/inference/paddle_fluid.map rename to paddle/fluid/inference/paddle_inference.map diff --git a/paddle/fluid/inference/paddle_fluid.sym b/paddle/fluid/inference/paddle_inference.sym similarity index 100% rename from paddle/fluid/inference/paddle_fluid.sym rename to paddle/fluid/inference/paddle_inference.sym diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 2fa076b002715e79f50f95ca25ffad23e9cb2cf5..a173328e64ae5ec682fe6b729b8f1ee4f86fb867 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -1,7 +1,7 @@ if (NOT APPLE AND NOT WIN32) - set(INFERENCE_EXTRA_DEPS paddle_fluid_shared) + set(INFERENCE_EXTRA_DEPS paddle_inference_shared) else() - set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor benchmark) + set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_inference_io ir_pass_manager analysis_predictor benchmark) endif() if(WITH_GPU AND TENSORRT_FOUND) @@ -508,10 +508,10 @@ if(WITH_GPU AND TENSORRT_FOUND) EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${TEST_INSTANCE_NORM_MODEL}/) inference_analysis_test(test_analyzer_capi_gpu SRCS analyzer_capi_gpu_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) inference_analysis_test(test_analyzer_capi_xpu SRCS analyzer_capi_xpu_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) set(TRT_MODEL_QUANT_RESNET_DIR "${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model") @@ -593,11 +593,11 @@ download_data(${LITE_MODEL_INSTALL_DIR} "mul_model_fp32.tgz") # ARGS --infer_model=${RESNET50_MODEL_DIR}) inference_analysis_test(test_analyzer_capi SRCS analyzer_capi_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c ARGS --infer_model=${RESNET50_MODEL_DIR}/model) inference_analysis_test(test_analyzer_capi_pd_tensor SRCS analyzer_capi_pd_tensor_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c ARGS --infer_model=${MOBILENET_INSTALL_DIR}/model) inference_analysis_test(test_analyzer_zerocopytensor_tensor SRCS analyzer_zerocopy_tensor_tester.cc @@ -610,12 +610,12 @@ inference_analysis_test(test_analyzer_paddletensor_tensor SRCS analyzer_paddle_t if(WITH_MKLDNN) inference_analysis_test(test_analyzer_capi_int SRCS analyzer_capi_int_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c ARGS --infer_model=${INT8_DATA_DIR}/resnet50/model) endif() inference_analysis_test(test_analyzer_capi_ner SRCS analyzer_capi_ner_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model) if(WITH_GPU) diff --git a/paddle/fluid/train/CMakeLists.txt b/paddle/fluid/train/CMakeLists.txt index 8f360d77967050e926a71b8b3cd2ddc1bbd25c8a..0688c63cac3f3fdd9e8b2a215c5384d8e175bd3c 100644 --- a/paddle/fluid/train/CMakeLists.txt +++ b/paddle/fluid/train/CMakeLists.txt @@ -7,12 +7,12 @@ function(train_test TARGET_NAME) if (NOT APPLE AND NOT WIN32) cc_test(test_train_${TARGET_NAME} SRCS test_train_${TARGET_NAME}.cc - DEPS paddle_fluid_shared + DEPS paddle_inference_shared ARGS --dirname=${PYTHON_TESTS_DIR}/book/) else() cc_test(test_train_${TARGET_NAME} SRCS test_train_${TARGET_NAME}.cc - DEPS paddle_fluid_api + DEPS paddle_inference_io ARGS --dirname=${PYTHON_TESTS_DIR}/book/) endif() if(TEST test_train_${TARGET_NAME}) diff --git a/paddle/fluid/train/demo/CMakeLists.txt b/paddle/fluid/train/demo/CMakeLists.txt index 57fda493a81102096ab6c152379c9557e50d6828..95da77d68d482afd8e25c79d8d1ec5e045bbe738 100644 --- a/paddle/fluid/train/demo/CMakeLists.txt +++ b/paddle/fluid/train/demo/CMakeLists.txt @@ -69,7 +69,7 @@ endif(APPLE) target_link_libraries(demo_trainer ${MACOS_LD_FLAGS} ${ARCHIVE_START} - ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_inference.so ${ARCHIVE_END} ${MATH_LIB} ${MKLDNN_LIB} diff --git a/paddle/fluid/train/imdb_demo/CMakeLists.txt b/paddle/fluid/train/imdb_demo/CMakeLists.txt index 29d54d0d2fbf61cae9f1cf8505124a969c029005..e943d6bc78eab08caa1d513973639ea9dad13b2c 100644 --- a/paddle/fluid/train/imdb_demo/CMakeLists.txt +++ b/paddle/fluid/train/imdb_demo/CMakeLists.txt @@ -68,7 +68,7 @@ endif(APPLE) target_link_libraries(demo_trainer ${MACOS_LD_FLAGS} ${ARCHIVE_START} - ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_inference.so ${ARCHIVE_END} ${MATH_LIB} ${MKLDNN_LIB}