提交 f785229f 编写于 作者: Z zhouwei25 提交者: liuwei1031

[cherry-pick] faster build by reduce by-product, reduce linking library and...

[cherry-pick] faster build by reduce by-product, reduce linking library and fix compile warning of std=c++11(#22230)
上级 2e834eab
......@@ -48,10 +48,6 @@ if(WIN32)
SET(CMAKE_C_RESPONSE_FILE_LINK_FLAG "@")
SET(CMAKE_CXX_RESPONSE_FILE_LINK_FLAG "@")
# Specify the program to use when building static libraries
SET(CMAKE_C_CREATE_STATIC_LIBRARY "<CMAKE_AR> lib <TARGET> <LINK_FLAGS> <OBJECTS>")
SET(CMAKE_CXX_CREATE_STATIC_LIBRARY "<CMAKE_AR> lib <TARGET> <LINK_FLAGS> <OBJECTS>")
# set defination for the dll export
if (NOT MSVC)
message(FATAL "Windows build only support msvc. Which was binded by the nvcc compiler of NVIDIA.")
......
......@@ -138,19 +138,16 @@ IF (WIN32)
ENDIF(WIN32)
if (NOT "${PROTOBUF_ROOT}" STREQUAL "")
find_path(PROTOBUF_INCLUDE_DIR google/protobuf/message.h PATHS ${PROTOBUF_ROOT}/include NO_DEFAULT_PATH)
find_library(PROTOBUF_LIBRARY protobuf libprotobuf.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH)
find_library(PROTOBUF_LITE_LIBRARY protobuf-lite libprotobuf-lite.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH)
find_library(PROTOBUF_PROTOC_LIBRARY protoc libprotoc.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH)
find_program(PROTOBUF_PROTOC_EXECUTABLE protoc PATHS ${PROTOBUF_ROOT}/bin NO_DEFAULT_PATH)
if (PROTOBUF_INCLUDE_DIR AND PROTOBUF_LIBRARY AND PROTOBUF_LITE_LIBRARY AND PROTOBUF_PROTOC_LIBRARY AND PROTOBUF_PROTOC_EXECUTABLE)
message(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.")
SET(PROTOBUF_FOUND true)
SET_PROTOBUF_VERSION()
PROMPT_PROTOBUF_LIB()
else()
message(WARNING "Cannot find protobuf library in ${PROTOBUF_ROOT}")
message(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.")
endif()
endif()
......
......@@ -26,14 +26,10 @@ cache_third_party(extern_xxhash
TAG ${XXHASH_TAG}
DIR XXHASH_SOURCE_DIR)
IF(WITH_STATIC_LIB)
SET(BUILD_CMD make lib)
ELSE()
IF(APPLE)
SET(BUILD_CMD sed -i \"\" "s/-Wstrict-prototypes -Wundef/-Wstrict-prototypes -Wundef -fPIC/g" ${XXHASH_SOURCE_DIR}/Makefile && make lib)
ELSE(APPLE)
SET(BUILD_CMD sed -i "s/-Wstrict-prototypes -Wundef/-Wstrict-prototypes -Wundef -fPIC/g" ${XXHASH_SOURCE_DIR}/Makefile && make lib)
ENDIF(APPLE)
IF(APPLE)
SET(BUILD_CMD sed -i \"\" "s/-Wstrict-prototypes -Wundef/-Wstrict-prototypes -Wundef -fPIC/g" ${XXHASH_SOURCE_DIR}/Makefile && make lib)
ELSEIF(UNIX)
SET(BUILD_CMD sed -i "s/-Wstrict-prototypes -Wundef/-Wstrict-prototypes -Wundef -fPIC/g" ${XXHASH_SOURCE_DIR}/Makefile && make lib)
ENDIF()
if(WIN32)
......
......@@ -134,6 +134,32 @@ function(find_fluid_thirdparties TARGET_NAME)
endif()
endfunction(find_fluid_thirdparties)
function(create_static_lib TARGET_NAME)
set(libs ${ARGN})
list(REMOVE_DUPLICATES libs)
if(WIN32)
set(dummy_index 1)
set(dummy_offset 1)
# the dummy target would be consisted of limit size libraries
set(dummy_limit 60)
list(LENGTH libs libs_len)
foreach(lib ${libs})
list(APPEND dummy_list ${lib})
list(LENGTH dummy_list listlen)
if ((${listlen} GREATER ${dummy_limit}) OR (${dummy_offset} EQUAL ${libs_len}))
merge_static_libs(${TARGET_NAME}_dummy_${dummy_index} ${dummy_list})
set(dummy_list)
list(APPEND ${TARGET_NAME}_dummy_list ${TARGET_NAME}_dummy_${dummy_index})
MATH(EXPR dummy_index "${dummy_index}+1")
endif()
MATH(EXPR dummy_offset "${dummy_offset}+1")
endforeach()
merge_static_libs(${TARGET_NAME} ${${TARGET_NAME}_dummy_list})
else()
merge_static_libs(${TARGET_NAME} ${libs})
endif()
endfunction()
function(merge_static_libs TARGET_NAME)
set(libs ${ARGN})
list(REMOVE_DUPLICATES libs)
......@@ -302,52 +328,17 @@ function(cc_library TARGET_NAME)
endforeach()
else(cc_library_SRCS)
if(cc_library_DEPS)
merge_static_libs(${TARGET_NAME} ${cc_library_DEPS})
list(REMOVE_DUPLICATES cc_library_DEPS)
set(target_SRCS ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}_dummy.c)
file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";")
add_library(${TARGET_NAME} STATIC ${target_SRCS})
target_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
else()
message(FATAL_ERROR "Please specify source files or libraries in cc_library(${TARGET_NAME} ...).")
endif()
endif(cc_library_SRCS)
endfunction(cc_library)
# The link operation under windows may exceeds the maximum characters limit, simply break the link command
# into multiple link opeartion can fix that, say
# original:
# lib /out:target.lib a.lib b.lib c.lib d.lib
# after:
# 1. lib /out:dummy_lib_1.lib a.lib b.lib
# 2. lib /out:dummy_lib_2.lib c.lib d.lib
# 1. lib /out:target.lib dummy_lib_1.lib dummy_lib_2.lib
function(sep_library TARGET_NAME)
set(options STATIC static SHARED shared)
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS)
cmake_parse_arguments(sep_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(dummy_index 1)
set(dummy_offset 1)
# the dummy target would be consisted of limit size libraries
set(dummy_limit 50)
list(REMOVE_DUPLICATES sep_library_DEPS)
list(LENGTH sep_library_DEPS sep_all_len)
foreach(v ${sep_library_DEPS})
list(APPEND dummy_list ${v})
list(LENGTH dummy_list listlen )
if ((${listlen} GREATER ${dummy_limit}) OR (${dummy_offset} EQUAL ${sep_all_len}))
message("create dummy library ${TARGET_NAME}_dummy_lib_${dummy_index} for ${TARGET_NAME}")
cc_library(${TARGET_NAME}_dummy_lib_${dummy_index} STATIC DEPS ${dummy_list})
foreach(i ${dummy_list})
list(REMOVE_AT dummy_list 0)
endforeach()
list(APPEND ${TARGET_NAME}_dummy_list ${TARGET_NAME}_dummy_lib_${dummy_index})
MATH(EXPR dummy_index "${dummy_index}+1")
endif()
MATH(EXPR dummy_offset "${dummy_offset}+1")
endforeach()
if(${sep_library_SHARED})
cc_library(${TARGET_NAME} SHARED SRCS ${sep_library_SRCS} DEPS ${${TARGET_NAME}_dummy_list})
else(${sep_library_SHARED})
cc_library(${TARGET_NAME} STATIC SRCS ${sep_library_SRCS} DEPS ${${TARGET_NAME}_dummy_list})
endif(${sep_library_SHARED})
endfunction(sep_library)
function(cc_binary TARGET_NAME)
set(options "")
......@@ -440,7 +431,12 @@ function(nv_library TARGET_NAME)
endforeach()
else(nv_library_SRCS)
if (nv_library_DEPS)
merge_static_libs(${TARGET_NAME} ${nv_library_DEPS})
list(REMOVE_DUPLICATES nv_library_DEPS)
set(target_SRCS ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}_dummy.c)
file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";")
add_library(${TARGET_NAME} STATIC ${target_SRCS})
target_link_libraries(${TARGET_NAME} ${nv_library_DEPS})
add_dependencies(${TARGET_NAME} ${nv_library_DEPS})
else()
message(FATAL "Please specify source file or library in nv_library.")
endif()
......
......@@ -135,7 +135,7 @@ function(copy_part_of_thrid_party TARGET DST)
endfunction()
# inference library for only inference
set(inference_lib_deps third_party paddle_fluid paddle_fluid_shared paddle_fluid_c paddle_fluid_c_shared)
set(inference_lib_deps third_party paddle_fluid paddle_fluid_c paddle_fluid_shared paddle_fluid_c_shared)
add_custom_target(inference_lib_dist DEPENDS ${inference_lib_deps})
set(dst_dir "${FLUID_INFERENCE_INSTALL_DIR}/third_party/eigen3")
......@@ -171,7 +171,7 @@ copy_part_of_thrid_party(inference_lib_dist ${FLUID_INFERENCE_INSTALL_DIR})
set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
if(WIN32)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/libpaddle_fluid.*)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/*paddle_fluid.*)
else(WIN32)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*)
endif(WIN32)
......@@ -180,7 +180,6 @@ copy(inference_lib_dist
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib}
DSTS ${FLUID_INFERENCE_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_INSTALL_DIR}/paddle/lib)
# CAPI inference library for only inference
set(FLUID_INFERENCE_C_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_inference_c_install_dir" CACHE STRING
"A path setting CAPI fluid inference shared")
......@@ -188,22 +187,14 @@ copy_part_of_thrid_party(inference_lib_dist ${FLUID_INFERENCE_C_INSTALL_DIR})
set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
if(WIN32)
set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_fluid_c.dll
${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_fluid_c.lib)
set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_fluid_c.*)
else(WIN32)
set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*)
set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/libpaddle_fluid_c.*)
endif(WIN32)
if(WIN32)
copy(inference_lib_dist
SRCS ${src_dir}/inference/capi/c_api.h ${paddle_fluid_c_lib}
DSTS ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/lib
${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/lib)
else()
copy(inference_lib_dist
SRCS ${src_dir}/inference/capi/c_api.h ${paddle_fluid_c_lib}
copy(inference_lib_dist
SRCS ${src_dir}/inference/capi/c_api.h ${paddle_fluid_c_lib}
DSTS ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/lib)
endif()
# fluid library for both train and inference
set(fluid_lib_deps inference_lib_dist)
......@@ -279,11 +270,17 @@ function(version version_file)
"WITH_MKL: ${WITH_MKL}\n"
"WITH_MKLDNN: ${WITH_MKLDNN}\n"
"WITH_GPU: ${WITH_GPU}\n")
if (WITH_GPU)
if(WITH_GPU)
file(APPEND ${version_file}
"CUDA version: ${CUDA_VERSION}\n"
"CUDNN version: v${CUDNN_MAJOR_VERSION}\n")
endif ()
endif()
if(TENSORRT_FOUND)
file(APPEND ${version_file}
"WITH_TENSORRT: ${TENSORRT_FOUND}\n"
"TENSORRT_ROOT: ${TENSORRT_ROOT}\n")
endif()
endfunction()
version(${FLUID_INSTALL_DIR}/version.txt)
version(${FLUID_INFERENCE_INSTALL_DIR}/version.txt)
......
......@@ -3,9 +3,6 @@ if(NOT WITH_GPU)
endif()
if(WIN32)
if("${TENSORRT_ROOT}" STREQUAL "")
message(WARNING "Please specify the TensorRT root path: TENSORRT_ROOT.")
endif()
string(REPLACE "\\" "/" TENSORRT_ROOT "${TENSORRT_ROOT}")
set(TR_INFER_LIB nvinfer.lib)
set(TR_INFER_RT nvinfer.dll)
......@@ -40,15 +37,15 @@ find_library(TENSORRT_LIBRARY NAMES ${TR_INFER_LIB} ${TR_INFER_RT}
DOC "Path to TensorRT library.")
if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY)
if(WITH_DSO)
set(TENSORRT_FOUND ON)
endif(WITH_DSO)
if(WITH_DSO)
set(TENSORRT_FOUND ON)
endif(WITH_DSO)
else()
set(TENSORRT_FOUND OFF)
if(WITH_DSO)
message(WARNING "TensorRT is NOT found.")
message(WARNING "TensorRT is NOT found when WITH_DSO is ON.")
else(WITH_DSO)
message(WARNING "TensorRT is disabled because WITH_DSO is OFF.")
message(STATUS "TensorRT is disabled because WITH_DSO is OFF.")
endif(WITH_DSO)
endif()
......
......@@ -262,14 +262,9 @@ message(STATUS "branch: ${PADDLE_BRANCH}")
configure_file(commit.h.in commit.h)
set(FLUID_FRAMEWORK_MODULES proto_desc memory lod_tensor executor data_feed_proto layer)
if(WIN32)
sep_library(paddle_framework DEPS ${FLUID_FRAMEWORK_MODULES})
else(WIN32)
cc_library(paddle_framework DEPS ${FLUID_FRAMEWORK_MODULES})
endif(WIN32)
cc_library(paddle_framework DEPS ${FLUID_FRAMEWORK_MODULES})
cc_library(paddle_framework_shared
SHARED SRCS executor.cc operator.cc
${CMAKE_CURRENT_SOURCE_DIR}/c/c_api.cc
......
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if(WITH_TESTING)
include(tests/test.cmake) # some generic cmake funtion for inference
endif()
set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor data_feed_proto)
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal?
cc_library(paddle_fluid_api
SRCS io.cc
DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS})
DEPS paddle_framework ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS})
# analysis and tensorrt must be added before creating static library,
# otherwise, there would be undefined reference to them in static library.
......@@ -25,19 +38,9 @@ if (WITH_LITE)
add_subdirectory(lite)
endif()
# fluid_modules exclude API-interface in inference/api and inference/capi
get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES)
get_property(fluid_third_partys GLOBAL PROPERTY FLUID_THRID_PARTYS)
if (WIN32)
list(APPEND fluid_third_partys gflags glog protobuf cblas)
endif(WIN32)
# paddle_fluid_origin exclude inference api interface
if(WIN32)
sep_library(paddle_fluid_origin DEPS ${fluid_modules} paddle_fluid_api)
else(WIN32)
cc_library(paddle_fluid_origin DEPS ${fluid_modules} paddle_fluid_api)
endif(WIN32)
add_subdirectory(api)
add_subdirectory(capi)
......@@ -54,47 +57,35 @@ if(NOT ON_INFER)
return()
endif()
if(WITH_MKLDNN)
set(mkldnn_quantizer_src ${CMAKE_CURRENT_SOURCE_DIR}/api/mkldnn_quantizer.cc)
set(mkldnn_quantizer_cfg mkldnn_quantizer_config)
# Create static inference library if needed
# All static libs in inference/api
set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor zero_copy_tensor reset_tensor_array
analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg})
create_static_lib(paddle_fluid ${fluid_modules} ${STATIC_INFERENCE_API})
if(NOT APPLE)
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym")
set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
endif()
set(STATIC_INFERENCE_APIS paddle_fluid_api paddle_inference_api analysis_predictor)
if (ANAKIN_FOUND)
if(ANAKIN_FOUND)
set(ANAKIN_SHARED_INFERENCE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/api/api_anakin_engine.cc)
endif()
set(SHARED_INFERENCE_SRCS
io.cc ${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed.cc ${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_set.cc ${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed_factory.cc ${CMAKE_CURRENT_SOURCE_DIR}/../framework/dataset_factory.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api_impl.cc
io.cc
${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed.cc
${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed_factory.cc
${CMAKE_CURRENT_SOURCE_DIR}/../framework/dataset_factory.cc
${CMAKE_CURRENT_SOURCE_DIR}/api/api.cc
${CMAKE_CURRENT_SOURCE_DIR}/api/api_impl.cc
${CMAKE_CURRENT_SOURCE_DIR}/api/analysis_predictor.cc
${mkldnn_quantizer_src}
${CMAKE_CURRENT_SOURCE_DIR}/api/details/zero_copy_tensor.cc
${mkldnn_quantizer_src_file}
${ANAKIN_SHARED_INFERENCE_SRCS})
if(WIN32)
sep_library(paddle_fluid DEPS ${fluid_modules} ${STATIC_INFERENCE_APIS} zero_copy_tensor reset_tensor_array
analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder)
else(WIN32)
cc_library(paddle_fluid DEPS ${fluid_modules} ${STATIC_INFERENCE_APIS}
zero_copy_tensor reset_tensor_array analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder)
endif(WIN32)
if(NOT APPLE)
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym")
set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
endif()
# Create shared library
if(WIN32)
sep_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS}
DEPS ${fluid_modules} paddle_fluid_api reset_tensor_array
analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder)
else(WIN32)
cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS}
DEPS ${fluid_modules} paddle_fluid_api reset_tensor_array
analysis_config ${mkldnn_quantizer_cfg} paddle_pass_builder)
endif()
# Create shared inference library defaultly
cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS}
DEPS ${fluid_modules} analysis_predictor)
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(paddle_fluid_shared ${os_dependency_modules})
......
......@@ -14,8 +14,7 @@ cc_library(ir_pass_manager SRCS ir_pass_manager.cc DEPS graph pass ${INFER_IR_PA
cc_library(argument INTERFACE SRCS argument.cc DEPS scope proto_desc)
cc_library(analysis_pass INTERFACE SRCS analysis_pass.cc DEPS proto_desc)
cc_library(analysis SRCS
analyzer.cc
cc_library(analysis SRCS analyzer.cc
DEPS ${analysis_deps} analysis_helper
analysis_pass ${INFER_IR_PASSES}
)
......
......@@ -17,29 +17,14 @@ if(APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move")
endif(APPLE)
set(inference_deps ${analysis_deps}
paddle_inference_api paddle_fluid_api
analysis pass naive_executor
${GLOB_PASS_LIB})
if(WITH_GPU AND TENSORRT_FOUND)
set(inference_deps ${inference_deps} tensorrt_engine tensorrt_converter)
endif()
if (ANAKIN_SUBGRAPH)
set(inference_deps ${inference_deps} anakin_op_converter anakin_engine)
endif()
if(WITH_NGRAPH)
set(inference_deps ${inference_deps} ngraph)
endif()
add_subdirectory(details)
if(WITH_MKLDNN)
set(mkldnn_quantizer_src mkldnn_quantizer.cc)
set(mkldnn_quantizer_src ${CMAKE_CURRENT_SOURCE_DIR}/mkldnn_quantizer.cc)
set(mkldnn_quantizer_cfg mkldnn_quantizer_config)
cc_library(${mkldnn_quantizer_cfg} SRCS mkldnn_quantizer_config.cc DEPS lod_tensor paddle_pass_builder)
set(mkldnn_quantizer_src_file ${mkldnn_quantizer_src} PARENT_SCOPE)
set(mkldnn_quantizer_cfg ${mkldnn_quantizer_cfg} PARENT_SCOPE)
endif()
cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder)
......@@ -48,12 +33,26 @@ if(WITH_NGRAPH)
else(WITH_NGRAPH)
cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc)
endif(WITH_NGRAPH)
cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS paddle_inference_api zero_copy_tensor
reset_tensor_array analysis_config paddle_pass_builder ir_pass_manager op_compatible_info ${inference_deps})
cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS
lod_tensor scope paddle_pass_builder reset_tensor_array analysis_config
paddle_pass_builder zero_copy_tensor
reset_tensor_array)
cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor scope reset_tensor_array
analysis_config zero_copy_tensor trainer_desc_proto)
set(inference_deps ${analysis_deps} paddle_inference_api analysis naive_executor ${GLOB_PASS_LIB})
if(WITH_GPU AND TENSORRT_FOUND)
set(inference_deps ${inference_deps} tensorrt_engine tensorrt_converter)
endif()
if (ANAKIN_SUBGRAPH)
set(inference_deps ${inference_deps} anakin_op_converter anakin_engine)
endif()
if(WITH_NGRAPH)
set(inference_deps ${inference_deps} ngraph)
endif()
cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps}
zero_copy_tensor ir_pass_manager op_compatible_info)
cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api)
......
......@@ -5,8 +5,7 @@
预测库包含:
- 头文件 `paddle_inference_api.h` 定义了所有的接口
- 库文件`libpaddle_fluid.so``libpaddle_fluid.a`
- 库文件 `libpaddle_inference_api.so``libpaddle_inference_api.a`
- 库文件 `libpaddle_fluid.so/.a(Linux/Mac)` `libpaddle_fluid.lib/paddle_fluid.dll(Windows)`
下面是详细的一些 API 概念介绍
......@@ -79,7 +78,7 @@ CHECK(predictor->Run(slots, &outputs));
// 获取 outputs ...
```
编译时,联编 `libpaddle_fluid.a/.so``libpaddle_inference_api.a/.so` 便可。
编译时,联编 `libpaddle_fluid.a/.so(Linux/Mac)``libpaddle_fluid.lib/paddle_fluid.dll(Windows)` 便可。
## 详细代码参考
......
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc)
cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS
${fluid_modules} paddle_fluid_api paddle_inference_api analysis_predictor)
cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS
${fluid_modules} paddle_fluid_api paddle_inference_api analysis_predictor)
cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS ${fluid_modules} analysis_predictor)
if(NOT ON_INFER)
return()
endif()
# Create inference capi shared library
cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS ${fluid_modules} analysis_predictor)
set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c)
if(WIN32)
target_link_libraries(paddle_fluid_c_shared shlwapi.lib)
......
......@@ -7,45 +7,45 @@ nv_library(tensorrt_converter
DEPS tensorrt_engine tensorrt_plugin operator scope framework_proto op_registry)
nv_test(test_op_converter SRCS test_op_converter.cc DEPS
${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_converter)
paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_converter)
# TODO(xingzhaolong): fix the the following ci ut error.
#nv_test(test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor)
#nv_test(test_trt_mul_op SRCS test_mul_op.cc mul_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine mul_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine mul_op)
#nv_test(test_trt_fc_op SRCS test_fc_op.cc fc_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine mul_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine mul_op)
#nv_test(test_trt_activation_op SRCS test_activation_op.cc activation_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine activation_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine activation_op)
#nv_test(test_trt_conv_op SRCS test_conv2d_op.cc conv2d_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine conv_op conv_transpose_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine conv_op conv_transpose_op)
#nv_test(test_trt_pool2d_op SRCS test_pool2d_op.cc pool2d_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine pool_op tensorrt_plugin)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine pool_op tensorrt_plugin)
#nv_test(test_trt_elementwise_op SRCS test_elementwise_op.cc elementwise_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin
# elementwise_add_op elementwise_mul_op)
#nv_test(test_trt_softmax_op SRCS test_softmax_op.cc softmax_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine softmax_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine softmax_op)
#nv_test(test_trt_batch_norm_op SRCS test_batch_norm_op.cc batch_norm_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine batch_norm_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine batch_norm_op)
#nv_test(test_trt_concat_op SRCS test_concat_op.cc concat_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine concat_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine concat_op)
#nv_test(test_trt_dropout_op SRCS test_dropout_op.cc dropout_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine dropout_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine dropout_op)
#nv_test(test_trt_pad_op SRCS test_pad_op.cc pad_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine pad_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine pad_op)
#nv_test(test_trt_split_op SRCS test_split_op.cc split_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin
# split_op concat_op)
#nv_test(test_trt_prelu_op SRCS test_prelu_op.cc prelu_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin
# prelu_op)
#nv_test(test_trt_leaky_relu_op SRCS test_leaky_relu_op.cc leaky_relu_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine activation_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine activation_op)
#nv_test(test_shuffle_channel_op SRCS test_shuffle_channel_op.cc shuffle_channel_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine shuffle_channel_op)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine shuffle_channel_op)
#nv_test(test_swish_op SRCS test_swish_op.cc swish_op.cc
# DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine activation_op tensorrt_plugin)
# DEPS paddle_framework ${GLOB_OPERATOR_DEPS} tensorrt_engine activation_op tensorrt_plugin)
......@@ -16,7 +16,7 @@ function(inference_test TARGET_NAME)
string(REGEX REPLACE "^_$" "" arg "${arg}")
cc_test(test_inference_${TARGET_NAME}${arg}
SRCS test_inference_${TARGET_NAME}.cc
DEPS paddle_fluid_origin
DEPS paddle_fluid_api
ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}${arg}.inference.model)
set_tests_properties(test_inference_${TARGET_NAME}${arg}
PROPERTIES DEPENDS test_${TARGET_NAME})
......@@ -44,7 +44,7 @@ inference_test(word2vec)
# TODO(TJ): clean me up
cc_test(test_inference_nlp
SRCS test_inference_nlp.cc
DEPS paddle_fluid_origin
DEPS paddle_fluid_api
ARGS
--model_path=${PADDLE_BINARY_DIR}/python/paddle/fluid/tests/book/recognize_digits_mlp.inference.model)
set_tests_properties(test_inference_nlp PROPERTIES LABELS "RUN_TYPE=DIST")
......@@ -11,10 +11,7 @@ cc_library(malloc SRCS malloc.cc DEPS
place enforce allocator_facade profiler ${MKLDNN_CTX_DEPS})
cc_library(memcpy SRCS memcpy.cc DEPS place)
cc_library(memory
DEPS
malloc
memcpy)
cc_library(memory DEPS malloc memcpy)
if (WITH_GPU)
nv_test(malloc_test
......
......@@ -16,7 +16,7 @@ function(train_test TARGET_NAME)
string(REGEX REPLACE "^_$" "" arg "${arg}")
cc_test(test_train_${TARGET_NAME}${arg}
SRCS test_train_${TARGET_NAME}.cc
DEPS paddle_fluid_origin
DEPS paddle_fluid_api
ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}${arg}.train.model/)
set_tests_properties(test_train_${TARGET_NAME}${arg}
PROPERTIES DEPENDS test_${TARGET_NAME})
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册