diff --git a/CMakeLists.txt b/CMakeLists.txt index 0632ada400b01ad96494e554a36d234df359a632..d98bfcde1aa0c6a0e0026af5d7ae696ea3592c6b 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -66,7 +66,7 @@ option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FO option(WITH_SYSTEM_BLAS "Use system blas library" OFF) option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF) -option(ON_INFER "Turn on inference optimization." OFF) +option(ON_INFER "Turn on inference optimization and inference-lib generation" OFF) ################################ Internal Configurations ####################################### option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF) @@ -168,7 +168,6 @@ include(ccache) # set ccache for compilation include(util) # set unittest and link libs include(version) # set PADDLE_VERSION include(coveralls) # set code coverage -include(inference_lib) # add paddle fluid inference libraries include(configure) # add paddle env configuration include_directories("${PADDLE_SOURCE_DIR}") @@ -184,7 +183,9 @@ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") if(ON_INFER) + # you can trun off the paddle fluid and inference lib by set ON_INFER=OFF message(STATUS "On inference mode, will take place some specific optimization.") + include(inference_lib) add_definitions(-DPADDLE_ON_INFERENCE) else() #TODO(luotao), combine this warning with `make inference_lib_dist` command. diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index f00be99f07a63535dd783318cec3b1367548cd27..2bce05b2c7e54282330175e74fbd24d24eb42897 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -13,7 +13,6 @@ # limitations under the License. # make package for paddle fluid shared and static library - set(FLUID_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_install_dir" CACHE STRING "A path setting fluid shared and static libraries") diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 5a3525a686d46dbd6c201c01ef9843243d4457f1..c88e5f04286383484d506ebe0b86ffe6d7bb4254 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -38,6 +38,18 @@ endif(WIN32) add_subdirectory(api) add_subdirectory(capi) +if(WITH_TESTING) + # tests/book depends the models that generated by python/paddle/fluid/tests/book + add_subdirectory(tests/book) + if(WITH_INFERENCE_API_TEST) + add_subdirectory(tests/api) + endif() +endif() + +if(NOT ON_INFER) + return() +endif() + if(WITH_MKLDNN) set(mkldnn_quantizer_src ${CMAKE_CURRENT_SOURCE_DIR}/api/mkldnn_quantizer.cc) set(mkldnn_quantizer_cfg mkldnn_quantizer_config) @@ -59,9 +71,10 @@ if(WIN32) analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder) else(WIN32) cc_library(paddle_fluid DEPS ${fluid_modules} ${STATIC_INFERENCE_APIS} - zero_copy_tensor reset_tensor_array analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder) + zero_copy_tensor reset_tensor_array analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder) endif(WIN32) + if(NOT APPLE) # TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac. set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym") @@ -72,12 +85,13 @@ endif() if(WIN32) sep_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS} DEPS ${fluid_modules} paddle_fluid_api reset_tensor_array - analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder) + analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder) else(WIN32) cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS} - DEPS ${fluid_modules} paddle_fluid_api reset_tensor_array + DEPS ${fluid_modules} paddle_fluid_api reset_tensor_array analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder) endif() + get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) target_link_libraries(paddle_fluid_shared ${os_dependency_modules}) @@ -99,11 +113,3 @@ if(NOT APPLE AND NOT WIN32) DEPENDS paddle_fluid_shared) add_custom_target(check_symbol ALL DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol") endif() - -if(WITH_TESTING) - # tests/book depends the models that generated by python/paddle/fluid/tests/book - add_subdirectory(tests/book) - if(WITH_INFERENCE_API_TEST) - add_subdirectory(tests/api) - endif() -endif() diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 9aaea73f25106b29acf2000cf3c5c60f567b1e89..d910cc10d2f0116ac86ca496eacd9cd4fc99e655 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -17,7 +17,6 @@ if(APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") endif(APPLE) - set(inference_deps ${analysis_deps} paddle_inference_api paddle_fluid_api analysis pass naive_executor diff --git a/paddle/fluid/inference/capi/CMakeLists.txt b/paddle/fluid/inference/capi/CMakeLists.txt index c910074f4fc7b8a2dc5d777ed47e3d3c843c19f4..4f602eded3f24302f99fb230c21c39701f5cbf7e 100644 --- a/paddle/fluid/inference/capi/CMakeLists.txt +++ b/paddle/fluid/inference/capi/CMakeLists.txt @@ -1,8 +1,10 @@ set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc) -cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS paddle_fluid) -cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_fluid) +cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS + ${fluid_modules} paddle_fluid_api paddle_inference_api analysis_predictor) +cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS + ${fluid_modules} paddle_fluid_api paddle_inference_api analysis_predictor) set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c) if(WIN32) target_link_libraries(paddle_fluid_c_shared shlwapi.lib)