From d4682247e183beed0c176e0e3b051dfab9e20069 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 16 Apr 2018 23:04:42 +0800 Subject: [PATCH] auto find tensorrt library --- CMakeLists.txt | 7 +---- Dockerfile | 2 +- cmake/configure.cmake | 10 ++++++ cmake/tensorrt.cmake | 33 ++++++++++++++++++++ paddle/fluid/inference/CMakeLists.txt | 2 +- paddle/fluid/platform/dynload/CMakeLists.txt | 2 +- 6 files changed, 47 insertions(+), 9 deletions(-) create mode 100644 cmake/tensorrt.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index de47086db..23bbe829a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,7 +39,6 @@ option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_F option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) -option(WITH_TENSORRT "Compile PaddlePaddle with TensorRT support." OFF) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) @@ -180,13 +179,9 @@ set(EXTERNAL_LIBS if(WITH_GPU) include(cuda) + include(tensorrt) endif(WITH_GPU) -# TensorRT depends on GPU. -if (NOT WITH_GPU) - set(WITH_TENSORRT OFF) -endif() - if(WITH_AMD_GPU) find_package(HIP) include(hip) diff --git a/Dockerfile b/Dockerfile index 9097bb657..870304a6a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -46,7 +46,7 @@ ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin RUN curl -s -q https://glide.sh/get | sh # Install TensorRT -# The unnecessary files has been removed to make the library small. +# The unnecessary files has been removed to make the library small. It only contains include and lib now. RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \ tar -xz -C /usr/local && \ cp -rf /usr/local/TensorRT/include /usr && \ diff --git a/cmake/configure.cmake b/cmake/configure.cmake index f726405c4..e490397cc 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -80,6 +80,16 @@ if(WITH_GPU) # Include cuda and cudnn include_directories(${CUDNN_INCLUDE_DIR}) include_directories(${CUDA_TOOLKIT_INCLUDE}) + + if(TENSORRT_FOUND) + if(${CUDA_VERSION_MAJOR} VERSION_LESS 8) + message(FATAL_ERROR "TensorRT needs CUDA >= 8.0 to compile") + endif() + if(${CUDNN_MAJOR_VERSION} VERSION_LESS 7) + message(FATAL_ERROR "TensorRT needs CUDNN >= 7.0 to compile") + endif() + include_directories(${TENSORRT_INCLUDE_DIR}) + endif() elseif(WITH_AMD_GPU) add_definitions(-DPADDLE_WITH_HIP) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__HIP_PLATFORM_HCC__") diff --git a/cmake/tensorrt.cmake b/cmake/tensorrt.cmake new file mode 100644 index 000000000..0c07d36be --- /dev/null +++ b/cmake/tensorrt.cmake @@ -0,0 +1,33 @@ +if(NOT WITH_GPU) + return() +endif() + +set(TENSORRT_ROOT "/usr" CACHE PATH "TENSORRT ROOT") +find_path(TENSORRT_INCLUDE_DIR NvInfer.h + PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/include + $ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/include + NO_DEFAULT_PATH +) + +find_library(TENSORRT_LIBRARY NAMES libnvinfer.so libnvinfer.a + PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/lib + $ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/lib + NO_DEFAULT_PATH + DOC "Path to TensorRT library.") + +if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY) + set(TENSORRT_FOUND ON) +else() + set(TENSORRT_FOUND OFF) +endif() + +if(TENSORRT_FOUND) + file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS) + string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION + "${TENSORRT_VERSION_FILE_CONTENTS}") + string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1" + TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}") + + message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. " + "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ") +endif() diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 8494edee6..cc45bfe9b 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -21,7 +21,7 @@ endif() if(WITH_TESTING) add_subdirectory(tests/book) - if (WITH_TENSORRT) + if (TENSORRT_FOUND) add_subdirectory(tensorrt) endif() endif() diff --git a/paddle/fluid/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt index b93b925a7..364c4901b 100644 --- a/paddle/fluid/platform/dynload/CMakeLists.txt +++ b/paddle/fluid/platform/dynload/CMakeLists.txt @@ -1,7 +1,7 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) list(APPEND CUDA_SRCS cublas.cc cudnn.cc curand.cc nccl.cc) -if (WITH_TENSORRT) +if (TENSORRT_FOUND) list(APPEND CUDA_SRCS tensorrt.cc) endif() -- GitLab