Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
038dbb38
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
038dbb38
编写于
4月 17, 2018
作者:
T
Tao Luo
提交者:
GitHub
4月 17, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #9958 from luotao1/find_tensorrt
auto find tensorrt library and install in user root
上级
fc6f0be2
d4682247
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
47 addition
and
9 deletion
+47
-9
CMakeLists.txt
CMakeLists.txt
+1
-6
Dockerfile
Dockerfile
+1
-1
cmake/configure.cmake
cmake/configure.cmake
+10
-0
cmake/tensorrt.cmake
cmake/tensorrt.cmake
+33
-0
paddle/fluid/inference/CMakeLists.txt
paddle/fluid/inference/CMakeLists.txt
+1
-1
paddle/fluid/platform/dynload/CMakeLists.txt
paddle/fluid/platform/dynload/CMakeLists.txt
+1
-1
未找到文件。
CMakeLists.txt
浏览文件 @
038dbb38
...
...
@@ -39,7 +39,6 @@ option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_F
option
(
WITH_AMD_GPU
"Compile PaddlePaddle with AMD GPU"
OFF
)
option
(
WITH_AVX
"Compile PaddlePaddle with AVX intrinsics"
${
AVX_FOUND
}
)
option
(
WITH_MKL
"Compile PaddlePaddle with MKL support."
${
AVX_FOUND
}
)
option
(
WITH_TENSORRT
"Compile PaddlePaddle with TensorRT support."
OFF
)
option
(
WITH_DSO
"Compile PaddlePaddle with dynamic linked CUDA"
ON
)
option
(
WITH_TESTING
"Compile PaddlePaddle with unit testing"
OFF
)
option
(
WITH_SWIG_PY
"Compile PaddlePaddle with inference api"
ON
)
...
...
@@ -180,13 +179,9 @@ set(EXTERNAL_LIBS
if
(
WITH_GPU
)
include
(
cuda
)
include
(
tensorrt
)
endif
(
WITH_GPU
)
# TensorRT depends on GPU.
if
(
NOT WITH_GPU
)
set
(
WITH_TENSORRT OFF
)
endif
()
if
(
WITH_AMD_GPU
)
find_package
(
HIP
)
include
(
hip
)
...
...
Dockerfile
浏览文件 @
038dbb38
...
...
@@ -46,7 +46,7 @@ ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
RUN
curl
-s
-q
https://glide.sh/get | sh
# Install TensorRT
# The unnecessary files has been removed to make the library small.
# The unnecessary files has been removed to make the library small.
It only contains include and lib now.
RUN
wget
-qO-
http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz |
\
tar
-xz
-C
/usr/local
&&
\
cp
-rf
/usr/local/TensorRT/include /usr
&&
\
...
...
cmake/configure.cmake
浏览文件 @
038dbb38
...
...
@@ -80,6 +80,16 @@ if(WITH_GPU)
# Include cuda and cudnn
include_directories
(
${
CUDNN_INCLUDE_DIR
}
)
include_directories
(
${
CUDA_TOOLKIT_INCLUDE
}
)
if
(
TENSORRT_FOUND
)
if
(
${
CUDA_VERSION_MAJOR
}
VERSION_LESS 8
)
message
(
FATAL_ERROR
"TensorRT needs CUDA >= 8.0 to compile"
)
endif
()
if
(
${
CUDNN_MAJOR_VERSION
}
VERSION_LESS 7
)
message
(
FATAL_ERROR
"TensorRT needs CUDNN >= 7.0 to compile"
)
endif
()
include_directories
(
${
TENSORRT_INCLUDE_DIR
}
)
endif
()
elseif
(
WITH_AMD_GPU
)
add_definitions
(
-DPADDLE_WITH_HIP
)
set
(
CMAKE_C_FLAGS
"
${
CMAKE_C_FLAGS
}
-D__HIP_PLATFORM_HCC__"
)
...
...
cmake/tensorrt.cmake
0 → 100644
浏览文件 @
038dbb38
if
(
NOT WITH_GPU
)
return
()
endif
()
set
(
TENSORRT_ROOT
"/usr"
CACHE PATH
"TENSORRT ROOT"
)
find_path
(
TENSORRT_INCLUDE_DIR NvInfer.h
PATHS
${
TENSORRT_ROOT
}
${
TENSORRT_ROOT
}
/include
$ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/include
NO_DEFAULT_PATH
)
find_library
(
TENSORRT_LIBRARY NAMES libnvinfer.so libnvinfer.a
PATHS
${
TENSORRT_ROOT
}
${
TENSORRT_ROOT
}
/lib
$ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/lib
NO_DEFAULT_PATH
DOC
"Path to TensorRT library."
)
if
(
TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY
)
set
(
TENSORRT_FOUND ON
)
else
()
set
(
TENSORRT_FOUND OFF
)
endif
()
if
(
TENSORRT_FOUND
)
file
(
READ
${
TENSORRT_INCLUDE_DIR
}
/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS
)
string
(
REGEX MATCH
"define NV_TENSORRT_MAJOR +([0-9]+)"
TENSORRT_MAJOR_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
string
(
REGEX REPLACE
"define NV_TENSORRT_MAJOR +([0-9]+)"
"
\\
1"
TENSORRT_MAJOR_VERSION
"
${
TENSORRT_MAJOR_VERSION
}
"
)
message
(
STATUS
"Current TensorRT header is
${
TENSORRT_INCLUDE_DIR
}
/NvInfer.h. "
"Current TensorRT version is v
${
TENSORRT_MAJOR_VERSION
}
. "
)
endif
()
paddle/fluid/inference/CMakeLists.txt
浏览文件 @
038dbb38
...
...
@@ -21,7 +21,7 @@ endif()
if
(
WITH_TESTING
)
add_subdirectory
(
tests/book
)
if
(
WITH_TENSORRT
)
if
(
TENSORRT_FOUND
)
add_subdirectory
(
tensorrt
)
endif
()
endif
()
paddle/fluid/platform/dynload/CMakeLists.txt
浏览文件 @
038dbb38
cc_library
(
dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce
)
list
(
APPEND CUDA_SRCS cublas.cc cudnn.cc curand.cc nccl.cc
)
if
(
WITH_TENSORRT
)
if
(
TENSORRT_FOUND
)
list
(
APPEND CUDA_SRCS tensorrt.cc
)
endif
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录