未验证 提交 463eae03 编写于 作者: W Wilber 提交者: GitHub

update paddle_fluid.so to paddle_inference.so (#30850)

* update paddle_fluid.so to paddle_inference.so
上级 a2170a08
...@@ -137,7 +137,7 @@ function(copy_part_of_thrid_party TARGET DST) ...@@ -137,7 +137,7 @@ function(copy_part_of_thrid_party TARGET DST)
endfunction() endfunction()
# inference library for only inference # inference library for only inference
set(inference_lib_deps third_party paddle_fluid paddle_fluid_c paddle_fluid_shared paddle_fluid_c_shared) set(inference_lib_deps third_party paddle_inference paddle_inference_c paddle_inference_shared paddle_inference_c_shared)
add_custom_target(inference_lib_dist DEPENDS ${inference_lib_deps}) add_custom_target(inference_lib_dist DEPENDS ${inference_lib_deps})
...@@ -164,20 +164,20 @@ copy_part_of_thrid_party(inference_lib_dist ${PADDLE_INFERENCE_INSTALL_DIR}) ...@@ -164,20 +164,20 @@ copy_part_of_thrid_party(inference_lib_dist ${PADDLE_INFERENCE_INSTALL_DIR})
set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
if(WIN32) if(WIN32)
if(WITH_STATIC_LIB) if(WITH_STATIC_LIB)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/libpaddle_fluid.lib set(paddle_inference_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/libpaddle_inference.lib
${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.*) ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_inference.*)
else() else()
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.dll set(paddle_inference_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_inference.dll
${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.lib) ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_inference.lib)
endif() endif()
copy(inference_lib_dist copy(inference_lib_dist
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib} SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_inference_lib}
DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib
${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib)
else(WIN32) else(WIN32)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*) set(paddle_inference_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_inference.*)
copy(inference_lib_dist copy(inference_lib_dist
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib} SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_inference_lib}
DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib)
endif(WIN32) endif(WIN32)
...@@ -196,13 +196,13 @@ copy_part_of_thrid_party(inference_lib_dist ${PADDLE_INFERENCE_C_INSTALL_DIR}) ...@@ -196,13 +196,13 @@ copy_part_of_thrid_party(inference_lib_dist ${PADDLE_INFERENCE_C_INSTALL_DIR})
set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
if(WIN32) if(WIN32)
set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_fluid_c.*) set(paddle_inference_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_inference_c.*)
else(WIN32) else(WIN32)
set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/libpaddle_fluid_c.*) set(paddle_inference_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/libpaddle_inference_c.*)
endif(WIN32) endif(WIN32)
copy(inference_lib_dist copy(inference_lib_dist
SRCS ${src_dir}/inference/capi/paddle_c_api.h ${paddle_fluid_c_lib} SRCS ${src_dir}/inference/capi/paddle_c_api.h ${paddle_inference_c_lib}
DSTS ${PADDLE_INFERENCE_C_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_C_INSTALL_DIR}/paddle/lib) DSTS ${PADDLE_INFERENCE_C_INSTALL_DIR}/paddle/include ${PADDLE_INFERENCE_C_INSTALL_DIR}/paddle/lib)
# fluid library for both train and inference # fluid library for both train and inference
...@@ -213,12 +213,12 @@ set(dst_dir "${PADDLE_INSTALL_DIR}/paddle/fluid") ...@@ -213,12 +213,12 @@ set(dst_dir "${PADDLE_INSTALL_DIR}/paddle/fluid")
set(module "inference") set(module "inference")
if(WIN32) if(WIN32)
copy(fluid_lib_dist copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_fluid_lib} SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_inference_lib}
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
) )
else() else()
copy(fluid_lib_dist copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_fluid_lib} SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_inference_lib}
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
) )
endif() endif()
......
...@@ -17,8 +17,7 @@ if(WITH_TESTING) ...@@ -17,8 +17,7 @@ if(WITH_TESTING)
include(tests/test.cmake) # some generic cmake function for inference include(tests/test.cmake) # some generic cmake function for inference
endif() endif()
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? cc_library(paddle_inference_io
cc_library(paddle_fluid_api
SRCS io.cc SRCS io.cc
DEPS paddle_framework ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS}) DEPS paddle_framework ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS})
...@@ -46,15 +45,15 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor ...@@ -46,15 +45,15 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor
analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg}) analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg})
#TODO(wilber, T8T9): Do we still need to support windows gpu static library? #TODO(wilber, T8T9): Do we still need to support windows gpu static library?
if(WIN32 AND WITH_GPU) if(WIN32 AND WITH_GPU)
cc_library(paddle_fluid DEPS ${fluid_modules} ${STATIC_INFERENCE_API}) cc_library(paddle_inference DEPS ${fluid_modules} ${STATIC_INFERENCE_API})
else() else()
create_static_lib(paddle_fluid ${fluid_modules} ${STATIC_INFERENCE_API}) create_static_lib(paddle_inference ${fluid_modules} ${STATIC_INFERENCE_API})
endif() endif()
if(NOT APPLE) if(NOT APPLE)
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac. # TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym") set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference.sym")
set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}") set_target_properties(paddle_inference PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
endif() endif()
# C inference API # C inference API
...@@ -88,30 +87,30 @@ if (WITH_PSCORE) ...@@ -88,30 +87,30 @@ if (WITH_PSCORE)
endif () endif ()
# Create shared inference library # Create shared inference library
cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS} cc_library(paddle_inference_shared SHARED SRCS ${SHARED_INFERENCE_SRCS}
DEPS ${SHARED_INFERENCE_DEPS}) DEPS ${SHARED_INFERENCE_DEPS})
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(paddle_fluid_shared ${os_dependency_modules}) target_link_libraries(paddle_inference_shared ${os_dependency_modules})
if(WIN32) if(WIN32)
target_link_libraries(paddle_fluid_shared gflags) target_link_libraries(paddle_inference_shared gflags)
endif() endif()
set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) set_target_properties(paddle_inference_shared PROPERTIES OUTPUT_NAME paddle_inference)
if(NOT APPLE AND NOT WIN32) if(NOT APPLE AND NOT WIN32)
# TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac.
set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.map") set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference.map")
set_target_properties(paddle_fluid_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") set_target_properties(paddle_inference_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
# check symbol hidden # check symbol hidden
FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake
"execute_process(COMMAND sh -c \"${CMAKE_CURRENT_SOURCE_DIR}/check_symbol.sh" "execute_process(COMMAND sh -c \"${CMAKE_CURRENT_SOURCE_DIR}/check_symbol.sh"
" ${CMAKE_CURRENT_BINARY_DIR}/libpaddle_fluid.so\" RESULT_VARIABLE symbol_res)\n" " ${CMAKE_CURRENT_BINARY_DIR}/libpaddle_inference.so\" RESULT_VARIABLE symbol_res)\n"
"if(NOT \"\${symbol_res}\" STREQUAL \"0\")\n" "if(NOT \"\${symbol_res}\" STREQUAL \"0\")\n"
" message(FATAL_ERROR \"Check symbol failed.\")\n" " message(FATAL_ERROR \"Check symbol failed.\")\n"
"endif()\n") "endif()\n")
add_custom_command( add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol" OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol"
COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake" COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake"
DEPENDS paddle_fluid_shared) DEPENDS paddle_inference_shared)
add_custom_target(check_symbol ALL DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol") add_custom_target(check_symbol ALL DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol")
endif() endif()
unset(analysis_deps CACHE) unset(analysis_deps CACHE)
set(analysis_deps # analysis_deps can be extended accross the project set(analysis_deps # analysis_deps can be extended accross the project
framework_proto proto_desc graph pass paddle_fluid_api executor pretty_log framework_proto proto_desc graph pass paddle_inference_io executor pretty_log
ir_pass_manager ir_pass_manager
CACHE INTERNAL "") CACHE INTERNAL "")
add_subdirectory(ir_passes) add_subdirectory(ir_passes)
add_subdirectory(passes) add_subdirectory(passes)
cc_library(analysis_helper SRCS helper.cc DEPS framework_proto proto_desc graph paddle_fluid_api) cc_library(analysis_helper SRCS helper.cc DEPS framework_proto proto_desc graph paddle_inference_io)
cc_library(ir_pass_manager SRCS ir_pass_manager.cc DEPS graph pass ${INFER_IR_PASSES} analysis_helper) cc_library(ir_pass_manager SRCS ir_pass_manager.cc DEPS graph pass ${INFER_IR_PASSES} analysis_helper)
...@@ -62,7 +62,7 @@ endfunction(inference_analysis_test) ...@@ -62,7 +62,7 @@ endfunction(inference_analysis_test)
if (NOT APPLE AND NOT WIN32) if (NOT APPLE AND NOT WIN32)
inference_analysis_test(test_analyzer inference_analysis_test(test_analyzer
SRCS analyzer_tester.cc SRCS analyzer_tester.cc
EXTRA_DEPS reset_tensor_array paddle_fluid_shared EXTRA_DEPS reset_tensor_array paddle_inference_shared
ARGS --inference_model_dir=${WORD2VEC_MODEL_DIR}) ARGS --inference_model_dir=${WORD2VEC_MODEL_DIR})
elseif(WIN32) elseif(WIN32)
inference_analysis_test(test_analyzer inference_analysis_test(test_analyzer
......
...@@ -271,9 +271,14 @@ void LiteSubgraphPass::SetUpEngine( ...@@ -271,9 +271,14 @@ void LiteSubgraphPass::SetUpEngine(
paddle::lite_api::Place({target_type, precision_type}), paddle::lite_api::Place({target_type, precision_type}),
paddle::lite_api::Place({target_type, PRECISION(kInt64)}), paddle::lite_api::Place({target_type, PRECISION(kInt64)}),
paddle::lite_api::Place({target_type, PRECISION(kFloat)}), paddle::lite_api::Place({target_type, PRECISION(kFloat)}),
paddle::lite_api::Place({TARGET(kHost), PRECISION(kFloat)}), #ifdef PADDLE_WITH_ARM
paddle::lite_api::Place({TARGET(kARM), precision_type}),
paddle::lite_api::Place({TARGET(kARM), PRECISION(kFloat)}),
#else
paddle::lite_api::Place({TARGET(kX86), precision_type}), paddle::lite_api::Place({TARGET(kX86), precision_type}),
paddle::lite_api::Place({TARGET(kX86), PRECISION(kFloat)}), paddle::lite_api::Place({TARGET(kX86), PRECISION(kFloat)}),
#endif
paddle::lite_api::Place({TARGET(kHost), PRECISION(kFloat)}),
}; };
config.cpu_math_library_num_threads = cpu_math_library_num_threads; config.cpu_math_library_num_threads = cpu_math_library_num_threads;
config.xpu_l3_workspace_size = xpu_l3_workspace_size; config.xpu_l3_workspace_size = xpu_l3_workspace_size;
......
...@@ -50,7 +50,7 @@ cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api) ...@@ -50,7 +50,7 @@ cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api)
if(WITH_TESTING) if(WITH_TESTING)
if (NOT APPLE AND NOT WIN32) if (NOT APPLE AND NOT WIN32)
inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS paddle_fluid_shared inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS paddle_inference_shared
ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${IMG_CLS_RESNET_INSTALL_DIR}) ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${IMG_CLS_RESNET_INSTALL_DIR})
set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification) set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification)
elseif(WIN32) elseif(WIN32)
...@@ -62,7 +62,7 @@ if(WITH_TESTING) ...@@ -62,7 +62,7 @@ if(WITH_TESTING)
endif() endif()
if (NOT APPLE AND NOT WIN32) if (NOT APPLE AND NOT WIN32)
cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS paddle_fluid_shared cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS paddle_inference_shared
ARGS --dirname=${WORD2VEC_MODEL_DIR}) ARGS --dirname=${WORD2VEC_MODEL_DIR})
elseif (WIN32) elseif (WIN32)
cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor benchmark ${inference_deps} cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor benchmark ${inference_deps}
......
...@@ -132,12 +132,12 @@ else() ...@@ -132,12 +132,12 @@ else()
endif() endif()
if(WITH_STATIC_LIB) if(WITH_STATIC_LIB)
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else() else()
if(WIN32) if(WIN32)
set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else() else()
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
endif() endif()
endif() endif()
...@@ -204,7 +204,7 @@ if(WIN32) ...@@ -204,7 +204,7 @@ if(WIN32)
endif() endif()
if(NOT WITH_STATIC_LIB) if(NOT WITH_STATIC_LIB)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
) )
endif() endif()
endif() endif()
# windows inference # windows inference
本文介绍windows inference,目前只提供了静态编译,编译出paddle_fluid.lib,包含了除openblas.dll之外的所有第三方依赖库。 本文介绍windows inference,目前只提供了静态编译,编译出paddle_inference.lib,包含了除openblas.dll之外的所有第三方依赖库。
1. 下载最新的paddle_fluid.lib和openblas.dll,并把它们放在同一个目录下。 1. 下载最新的paddle_inference.lib和openblas.dll,并把它们放在同一个目录下。
2. 准备预训练好的模型文件,例如models中的模型,可以将模型用safe_inference_model接口保存下来。将模型文件放到该目录下 2. 准备预训练好的模型文件,例如models中的模型,可以将模型用safe_inference_model接口保存下来。将模型文件放到该目录下
3. 进入Paddle/paddle/fluid/inference/api/demo_ci目录,新建build目录,然后使用cmake生成vs2015的solution文件。 3. 进入Paddle/paddle/fluid/inference/api/demo_ci目录,新建build目录,然后使用cmake生成vs2015的solution文件。
其中PADDLE_LIB是前面的paddle_fluid.lib对应文件夹, CUDA_LIB指定为x64格式下的cuda系统库目录文件夹。 其中PADDLE_LIB是前面的paddle_inference.lib对应文件夹, CUDA_LIB指定为x64格式下的cuda系统库目录文件夹。
```shell ```shell
cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DWITH_MKL=OFF -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=inference_icnet -DPADDLE_LIB=D:\to_the_paddle_fluid.lib -DCUDA_LIB=D:\tools\v8.0\lib\x64 cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DWITH_MKL=OFF -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=inference_icnet -DPADDLE_LIB=D:\to_the_paddle_inference.lib -DCUDA_LIB=D:\tools\v8.0\lib\x64
``` ```
然后用vs2015打开对应的项目文件,注意使用静态链接 "/MT",生成对应的exe。将openblas.dll放到exe所在目录。 然后用vs2015打开对应的项目文件,注意使用静态链接 "/MT",生成对应的exe。将openblas.dll放到exe所在目录。
......
# Inference High-level APIs # Inference High-level APIs
This document describes the high-level inference APIs, one can use them to deploy a Paddle model for an application quickly. This document describes the high-level inference APIs, one can use them to deploy a Paddle model for an application quickly.
The APIs are described in `paddle_inference_api.h`, just one header file, and two libaries `libpaddle_fluid.so` and `libpaddle_fluid_api.so` are needed for a deployment. The APIs are described in `paddle_inference_api.h`, just one header file, and two libaries `libpaddle_inference.so` and `libpaddle_inference_io.so` are needed for a deployment.
## PaddleTensor ## PaddleTensor
We provide the `PaddleTensor` data structure to give a general tensor interface. We provide the `PaddleTensor` data structure to give a general tensor interface.
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
预测库包含: 预测库包含:
- 头文件 `paddle_inference_api.h` 定义了所有的接口 - 头文件 `paddle_inference_api.h` 定义了所有的接口
- 库文件 `libpaddle_fluid.so/.a(Linux/Mac)` `libpaddle_fluid.lib/paddle_fluid.dll(Windows)` - 库文件 `libpaddle_inference.so/.a(Linux/Mac)` `libpaddle_inference.lib/paddle_inference.dll(Windows)`
下面是详细的一些 API 概念介绍 下面是详细的一些 API 概念介绍
...@@ -76,7 +76,7 @@ CHECK(predictor->Run(slots, &outputs)); ...@@ -76,7 +76,7 @@ CHECK(predictor->Run(slots, &outputs));
// 获取 outputs ... // 获取 outputs ...
``` ```
编译时,联编 `libpaddle_fluid.a/.so(Linux/Mac)``libpaddle_fluid.lib/paddle_fluid.dll(Windows)` 便可。 编译时,联编 `libpaddle_inference.a/.so(Linux/Mac)``libpaddle_inference.lib/paddle_inference.dll(Windows)` 便可。
## 详细代码参考 ## 详细代码参考
......
...@@ -134,11 +134,15 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { ...@@ -134,11 +134,15 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
"fc_elementwise_layernorm_fuse_pass", // "fc_elementwise_layernorm_fuse_pass", //
#if CUDNN_VERSION >= 7100 // To run conv_fusion, the version of cudnn must be #if CUDNN_VERSION >= 7100 // To run conv_fusion, the version of cudnn must be
// guaranteed at least v7 // guaranteed at least v7
// cudnn8.0 has memory leak problem in conv + eltwise + act, so we
// disable the pass.
#if !(CUDNN_VERSION >= 8000 && CUDNN_VERSION < 8100)
"conv_elementwise_add_act_fuse_pass", // "conv_elementwise_add_act_fuse_pass", //
"conv_elementwise_add2_act_fuse_pass", // "conv_elementwise_add2_act_fuse_pass", //
"conv_elementwise_add_fuse_pass", // #endif
#endif // "conv_elementwise_add_fuse_pass", //
"transpose_flatten_concat_fuse_pass", // #endif //
"transpose_flatten_concat_fuse_pass", //
// following pass should be located in the last, since it will // following pass should be located in the last, since it will
// work on all fused ops. // work on all fused ops.
"runtime_context_cache_pass" "runtime_context_cache_pass"
......
...@@ -15,15 +15,15 @@ ...@@ -15,15 +15,15 @@
set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc) set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc)
cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS paddle_fluid) cc_library(paddle_inference_c SRCS ${C_API_SRCS} DEPS paddle_inference)
if(NOT ON_INFER) if(NOT ON_INFER)
return() return()
endif() endif()
# Create inference capi shared library # Create inference capi shared library
cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_fluid) cc_library(paddle_inference_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_inference)
set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c) set_target_properties(paddle_inference_c_shared PROPERTIES OUTPUT_NAME paddle_inference_c)
if(WIN32) if(WIN32)
target_link_libraries(paddle_fluid_c_shared shlwapi.lib) target_link_libraries(paddle_inference_c_shared shlwapi.lib)
endif() endif()
if (NOT APPLE AND NOT WIN32) if (NOT APPLE AND NOT WIN32)
set(INFERENCE_EXTRA_DEPS paddle_fluid_shared) set(INFERENCE_EXTRA_DEPS paddle_inference_shared)
else() else()
set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor benchmark) set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_inference_io ir_pass_manager analysis_predictor benchmark)
endif() endif()
if(WITH_GPU AND TENSORRT_FOUND) if(WITH_GPU AND TENSORRT_FOUND)
...@@ -508,10 +508,10 @@ if(WITH_GPU AND TENSORRT_FOUND) ...@@ -508,10 +508,10 @@ if(WITH_GPU AND TENSORRT_FOUND)
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${TEST_INSTANCE_NORM_MODEL}/) ARGS --infer_model=${TEST_INSTANCE_NORM_MODEL}/)
inference_analysis_test(test_analyzer_capi_gpu SRCS analyzer_capi_gpu_tester.cc inference_analysis_test(test_analyzer_capi_gpu SRCS analyzer_capi_gpu_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c
ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models)
inference_analysis_test(test_analyzer_capi_xpu SRCS analyzer_capi_xpu_tester.cc inference_analysis_test(test_analyzer_capi_xpu SRCS analyzer_capi_xpu_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c
ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models) ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models)
set(TRT_MODEL_QUANT_RESNET_DIR "${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model") set(TRT_MODEL_QUANT_RESNET_DIR "${INFERENCE_DEMO_INSTALL_DIR}/small_quant_model")
...@@ -593,11 +593,11 @@ download_data(${LITE_MODEL_INSTALL_DIR} "mul_model_fp32.tgz") ...@@ -593,11 +593,11 @@ download_data(${LITE_MODEL_INSTALL_DIR} "mul_model_fp32.tgz")
# ARGS --infer_model=${RESNET50_MODEL_DIR}) # ARGS --infer_model=${RESNET50_MODEL_DIR})
inference_analysis_test(test_analyzer_capi SRCS analyzer_capi_tester.cc inference_analysis_test(test_analyzer_capi SRCS analyzer_capi_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c
ARGS --infer_model=${RESNET50_MODEL_DIR}/model) ARGS --infer_model=${RESNET50_MODEL_DIR}/model)
inference_analysis_test(test_analyzer_capi_pd_tensor SRCS analyzer_capi_pd_tensor_tester.cc inference_analysis_test(test_analyzer_capi_pd_tensor SRCS analyzer_capi_pd_tensor_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c
ARGS --infer_model=${MOBILENET_INSTALL_DIR}/model) ARGS --infer_model=${MOBILENET_INSTALL_DIR}/model)
inference_analysis_test(test_analyzer_zerocopytensor_tensor SRCS analyzer_zerocopy_tensor_tester.cc inference_analysis_test(test_analyzer_zerocopytensor_tensor SRCS analyzer_zerocopy_tensor_tester.cc
...@@ -610,12 +610,12 @@ inference_analysis_test(test_analyzer_paddletensor_tensor SRCS analyzer_paddle_t ...@@ -610,12 +610,12 @@ inference_analysis_test(test_analyzer_paddletensor_tensor SRCS analyzer_paddle_t
if(WITH_MKLDNN) if(WITH_MKLDNN)
inference_analysis_test(test_analyzer_capi_int SRCS analyzer_capi_int_tester.cc inference_analysis_test(test_analyzer_capi_int SRCS analyzer_capi_int_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c
ARGS --infer_model=${INT8_DATA_DIR}/resnet50/model) ARGS --infer_model=${INT8_DATA_DIR}/resnet50/model)
endif() endif()
inference_analysis_test(test_analyzer_capi_ner SRCS analyzer_capi_ner_tester.cc inference_analysis_test(test_analyzer_capi_ner SRCS analyzer_capi_ner_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_inference_c
ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model) ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model)
if(WITH_GPU) if(WITH_GPU)
......
...@@ -7,12 +7,12 @@ function(train_test TARGET_NAME) ...@@ -7,12 +7,12 @@ function(train_test TARGET_NAME)
if (NOT APPLE AND NOT WIN32) if (NOT APPLE AND NOT WIN32)
cc_test(test_train_${TARGET_NAME} cc_test(test_train_${TARGET_NAME}
SRCS test_train_${TARGET_NAME}.cc SRCS test_train_${TARGET_NAME}.cc
DEPS paddle_fluid_shared DEPS paddle_inference_shared
ARGS --dirname=${PYTHON_TESTS_DIR}/book/) ARGS --dirname=${PYTHON_TESTS_DIR}/book/)
else() else()
cc_test(test_train_${TARGET_NAME} cc_test(test_train_${TARGET_NAME}
SRCS test_train_${TARGET_NAME}.cc SRCS test_train_${TARGET_NAME}.cc
DEPS paddle_fluid_api DEPS paddle_inference_io
ARGS --dirname=${PYTHON_TESTS_DIR}/book/) ARGS --dirname=${PYTHON_TESTS_DIR}/book/)
endif() endif()
if(TEST test_train_${TARGET_NAME}) if(TEST test_train_${TARGET_NAME})
......
...@@ -69,7 +69,7 @@ endif(APPLE) ...@@ -69,7 +69,7 @@ endif(APPLE)
target_link_libraries(demo_trainer target_link_libraries(demo_trainer
${MACOS_LD_FLAGS} ${MACOS_LD_FLAGS}
${ARCHIVE_START} ${ARCHIVE_START}
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_inference.so
${ARCHIVE_END} ${ARCHIVE_END}
${MATH_LIB} ${MATH_LIB}
${MKLDNN_LIB} ${MKLDNN_LIB}
......
...@@ -68,7 +68,7 @@ endif(APPLE) ...@@ -68,7 +68,7 @@ endif(APPLE)
target_link_libraries(demo_trainer target_link_libraries(demo_trainer
${MACOS_LD_FLAGS} ${MACOS_LD_FLAGS}
${ARCHIVE_START} ${ARCHIVE_START}
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_inference.so
${ARCHIVE_END} ${ARCHIVE_END}
${MATH_LIB} ${MATH_LIB}
${MKLDNN_LIB} ${MKLDNN_LIB}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册