提交 330b173c 编写于 作者: J Jeng Bai-Cheng 提交者: Zhaolong Xing

Better TensorRT support (#20858)

* Fix TensorRT detection bug

1. Add new search path for TensorRT at tensorrt.cmake
2. Add better debug message
3. Fix the bug of detection of TensorRT version

In NVIDIA official docker image, TensorRT headers are located at
`/usr/include/x86_64-linux-gnu` and TensorRT libraries are located
at `/usr/lib/x86_64-linux-gnu`, so using `-DTENSORRT_ROOT` will
fail to detect TensorRT.

There is no debug/warning message to tell developer that TensorRT
is failed to be detected.

In later version of TensorRT (e.g. v6), `NV_TENSORRT_MAJOR` is
defined at `NvInferVersion.h` instead of `NvInfer.h`, so add
compatibility fix.

* Fix TensorRT variables in CMake

1. Replace `${TENSORRT_ROOT}/include` with `${TENSORRT_INCLUDE_DIR}`
2. Replace `${TENSORRT_ROOT}/lib` with `${TENSORRT_LIBRARY}`

Manually type path may locate incorrect path of TensorRT. Use the
paths detected by system instead.

* Fix TensorRT library path

1. Add new variable - `${TENSORRT_LIBRARY_DIR}`
2. Fix TensorRT library path

inference_lib.cmake and setup.py.in need the path of TensorRT library
instead of the file of TensorRT library, so add new variable to fix it.

* Add more general search rule for TensoRT

Let system detect architecture instead of manually assign it, so
replace `x86_64-linux-gnu` with `${CMAKE_LIBRARY_ARCHITECTURE}`.

* Add more general search rule for TensorRT

Remove duplicate search rules for TensorRT libraries. Use
`${TENSORRT_LIBRARY_DIR}` to get full path of libnvinfer.so

test=develop
上级 d8b6cf2b
...@@ -145,7 +145,7 @@ endif () ...@@ -145,7 +145,7 @@ endif ()
if (TENSORRT_FOUND) if (TENSORRT_FOUND)
set(dst_dir "${FLUID_INFERENCE_INSTALL_DIR}/third_party/install/tensorrt") set(dst_dir "${FLUID_INFERENCE_INSTALL_DIR}/third_party/install/tensorrt")
copy(inference_lib_dist copy(inference_lib_dist
SRCS ${TENSORRT_ROOT}/include/Nv*.h ${TENSORRT_ROOT}/lib/*nvinfer* SRCS ${TENSORRT_INCLUDE_DIR}/Nv*.h ${TENSORRT_LIBRARY_DIR}/*nvinfer*
DSTS ${dst_dir}/include ${dst_dir}/lib) DSTS ${dst_dir}/include ${dst_dir}/lib)
endif () endif ()
......
...@@ -19,13 +19,23 @@ endif() ...@@ -19,13 +19,23 @@ endif()
find_path(TENSORRT_INCLUDE_DIR NvInfer.h find_path(TENSORRT_INCLUDE_DIR NvInfer.h
PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/include PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/include
${TENSORRT_ROOT}/include/${CMAKE_LIBRARY_ARCHITECTURE}
$ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/include $ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/include
$ENV{TENSORRT_ROOT}/include/${CMAKE_LIBRARY_ARCHITECTURE}
NO_DEFAULT_PATH NO_DEFAULT_PATH
) )
find_library(TENSORRT_LIBRARY NAMES ${TR_INFER_LIB} ${TR_INFER_RT} find_path(TENSORRT_LIBRARY_DIR NAMES ${TR_INFER_LIB} ${TR_INFER_RT}
PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/lib PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/lib
${TENSORRT_ROOT}/lib/${CMAKE_LIBRARY_ARCHITECTURE}
$ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/lib $ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/lib
$ENV{TENSORRT_ROOT}/lib/${CMAKE_LIBRARY_ARCHITECTURE}
NO_DEFAULT_PATH
DOC "Path to TensorRT library."
)
find_library(TENSORRT_LIBRARY NAMES ${TR_INFER_LIB} ${TR_INFER_RT}
PATHS ${TENSORRT_LIBRARY_DIR}
NO_DEFAULT_PATH NO_DEFAULT_PATH
DOC "Path to TensorRT library.") DOC "Path to TensorRT library.")
...@@ -35,12 +45,28 @@ if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY) ...@@ -35,12 +45,28 @@ if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY)
endif(WITH_DSO) endif(WITH_DSO)
else() else()
set(TENSORRT_FOUND OFF) set(TENSORRT_FOUND OFF)
if(WITH_DSO)
message(WARNING "TensorRT is NOT found.")
else(WITH_DSO)
message(WARNING "TensorRT is disabled because WITH_DSO is OFF.")
endif(WITH_DSO)
endif() endif()
if(TENSORRT_FOUND) if(TENSORRT_FOUND)
file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS) file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
"${TENSORRT_VERSION_FILE_CONTENTS}") "${TENSORRT_VERSION_FILE_CONTENTS}")
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
"${TENSORRT_VERSION_FILE_CONTENTS}")
endif()
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
message(SEND_ERROR "Failed to detect TensorRT version.")
endif()
string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1" string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}") TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
......
...@@ -175,8 +175,8 @@ package_data['paddle.libs']=[('libwarpctc' if os.name != 'nt' else 'warpctc') + ...@@ -175,8 +175,8 @@ package_data['paddle.libs']=[('libwarpctc' if os.name != 'nt' else 'warpctc') +
shutil.copy('${WARPCTC_LIBRARIES}', libs_path) shutil.copy('${WARPCTC_LIBRARIES}', libs_path)
if '${TENSORRT_FOUND}' == 'ON' and os.name == 'nt': if '${TENSORRT_FOUND}' == 'ON' and os.name == 'nt':
shutil.copy(os.path.join('${TENSORRT_ROOT}', 'lib', '${TR_INFER_RT}'), libs_path) shutil.copy(os.path.join('${TENSORRT_LIBRARY_DIR}', '${TR_INFER_RT}'), libs_path)
shutil.copy(os.path.join('${TENSORRT_ROOT}', 'lib', '${TR_INFER_PLUGIN_RT}'), libs_path) shutil.copy(os.path.join('${TENSORRT_LIBRARY_DIR}', '${TR_INFER_PLUGIN_RT}'), libs_path)
package_data['paddle.libs'] += ['${TR_INFER_RT}', '${TR_INFER_PLUGIN_RT}'] package_data['paddle.libs'] += ['${TR_INFER_RT}', '${TR_INFER_PLUGIN_RT}']
if '${WITH_MKL}' == 'ON': if '${WITH_MKL}' == 'ON':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册