未验证 提交 3b02c71e 编写于 作者: C cnn 提交者: GitHub

[dev] fix paddle inference name problem, and update doc (#2430)

* fix paddle inference name problem, and update doc

* set default value

* fix inference lib name in static

* update deploy doc, fix build error of windows

* add annotation of PADDLE_LIB_NAME

* set min_subgraph_size=33 as default, fix bug of fcos_dcn_r50_fpn_1x
上级 988574fe
...@@ -4,10 +4,10 @@ project(PaddleObjectDetector CXX C) ...@@ -4,10 +4,10 @@ project(PaddleObjectDetector CXX C)
option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON) option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_TENSORRT "Compile demo with TensorRT." OFF) option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
option(USE_PADDLE_20RC1 "Compile demo with paddle_inference_lib 2.0rc1" ON)
SET(PADDLE_DIR "" CACHE PATH "Location of libraries") SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
SET(PADDLE_LIB_NAME "" CACHE STRING "libpaddle_inference")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries") SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(CUDA_LIB "" CACHE PATH "Location of libraries") SET(CUDA_LIB "" CACHE PATH "Location of libraries")
SET(CUDNN_LIB "" CACHE PATH "Location of libraries") SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
...@@ -153,41 +153,23 @@ endif() ...@@ -153,41 +153,23 @@ endif()
if (WIN32) if (WIN32)
if (USE_PADDLE_20RC1) if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}")
# 2.0rc1 win32 shared lib name is paddle_fluid.dll and paddle_fluid.lib
if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
else() else()
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
else()
# before 2.0rc1 win32 shared lib name is libpaddle_fluid.dll and libpaddle_fluid.lib
if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif() endif()
endif() endif()
if (WIN32) if (WIN32)
if (USE_PADDLE_20RC1) set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
# 2.0rc1 win32 shared lib name is paddle_fluid.dll and paddle_fluid.lib
set(DEPS ${PADDLE_DIR}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
# before 2.0rc1 win32 shared lib name is libpaddle_fluid.dll and libpaddle_fluid.lib
set(DEPS ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
else() else()
# linux shared lib name is libpaddle_fluid.so set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
endif() endif()
message("PADDLE_LIB_NAME:" ${PADDLE_LIB_NAME})
message("DEPS:" $DEPS) message("DEPS:" $DEPS)
if (NOT WIN32) if (NOT WIN32)
...@@ -248,12 +230,12 @@ if (WIN32 AND WITH_MKL) ...@@ -248,12 +230,12 @@ if (WIN32 AND WITH_MKL)
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll
) )
endif() endif()
if (WIN32)
if (WIN32 AND USE_PADDLE_20RC1)
add_custom_command(TARGET main POST_BUILD add_custom_command(TARGET main POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/paddle_fluid.dll ./release/paddle_fluid.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll
) )
endif() endif()
# Jetson平台编译指南
## 说明
`NVIDIA Jetson`设备是具有`NVIDIA GPU`的嵌入式设备,可以将目标检测算法部署到该设备上。本文档是在`Jetson`硬件上部署`PaddleDetection`模型的教程。
本文档以`Jetson TX2`硬件、`JetPack 4.3`版本为例进行说明。
`Jetson`平台的开发指南请参考[NVIDIA Jetson Linux Developer Guide](https://docs.nvidia.com/jetson/l4t/index.html).
## Jetson环境搭建
`Jetson`系统软件安装,请参考[NVIDIA Jetson Linux Developer Guide](https://docs.nvidia.com/jetson/l4t/index.html).
* (1) 查看硬件系统的l4t的版本号
```
cat /etc/nv_tegra_release
```
* (2) 根据硬件,选择硬件可安装的`JetPack`版本,硬件和`JetPack`版本对应关系请参考[jetpack-archive](https://developer.nvidia.com/embedded/jetpack-archive).
* (3) 下载`JetPack`,请参考[NVIDIA Jetson Linux Developer Guide](https://docs.nvidia.com/jetson/l4t/index.html) 中的`Preparing a Jetson Developer Kit for Use`章节内容进行刷写系统镜像。
## 下载或编译`Paddle`预测库
本文档使用`Paddle``JetPack4.3`上预先编译好的预测库,请根据硬件在[安装与编译 Linux 预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html) 中选择对应版本的`Paddle`预测库。
这里选择[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.0-nv-jetson-jetpack4.3-all/paddle_inference.tgz), `Paddle`版本`2.0.0-rc0`,`CUDA`版本`10.0`,`CUDNN`版本`7.6``TensorRT`版本`6`
若需要自己在`Jetson`平台上自定义编译`Paddle`库,请参考文档[安装与编译 Linux 预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)`NVIDIA Jetson嵌入式硬件预测库源码编译`部分内容。
### Step1: 下载代码
`git clone https://github.com/PaddlePaddle/PaddleDetection.git`
**说明**:其中`C++`预测代码在`/root/projects/PaddleDetection/deploy/cpp` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
解压下载的[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.1-nv-jetson-jetpack4.3-all/paddle_inference.tgz)
下载并解压后`/root/projects/fluid_inference`目录包含内容为:
```
fluid_inference
├── paddle # paddle核心库和头文件
|
├── third_party # 第三方依赖库和头文件
|
└── version.txt # 版本和编译信息
```
**注意:** 预编译库`nv-jetson-cuda10-cudnn7.6-trt6`使用的`GCC`版本是`7.5.0`,其他都是使用`GCC 4.8.5`编译的。使用高版本的GCC可能存在`ABI`兼容性问题,建议降级或[自行编译预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)
### Step4: 编译
编译`cmake`的命令在`scripts/build.sh`中,请根据实际情况修改主要参数,其主要内容说明如下:
注意,`TX2`平台的`CUDA``CUDNN`需要通过`JetPack`安装。
```
# 是否使用GPU(即是否使用 CUDA)
WITH_GPU=ON
# 是否使用MKL or openblas,TX2需要设置为OFF
WITH_MKL=OFF
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=ON
# TensorRT 的include路径
TENSORRT_INC_DIR=/usr/include/aarch64-linux-gnu
# TensorRT 的lib路径
TENSORRT_LIB_DIR=/usr/lib/aarch64-linux-gnu
# Paddle 预测库路径
PADDLE_DIR=/path/to/fluid_inference/
# Paddle 预测库名称
PADDLE_LIB_NAME=paddle_inference
# Paddle 的预测库是否使用静态库来编译
# 使用TensorRT时,Paddle的预测库通常为动态库
WITH_STATIC_LIB=OFF
# CUDA 的 lib 路径
CUDA_LIB=/usr/local/cuda-10.0/lib64
# CUDNN 的 lib 路径
CUDNN_LIB=/usr/lib/aarch64-linux-gnu
# OPENCV_DIR 的路径
# linux平台请下载:https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2,并解压到deps文件夹下
# TX2平台请下载:https://paddlemodels.bj.bcebos.com/TX2_JetPack4.3_opencv_3.4.10_gcc7.5.0.zip,并解压到deps文件夹下
OPENCV_DIR=/path/to/opencv
# 请检查以上各个路径是否正确
# 以下无需改动
cmake .. \
-DWITH_GPU=${WITH_GPU} \
-DWITH_MKL=OFF \
-DWITH_TENSORRT=${WITH_TENSORRT} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DPADDLE_DIR=${PADDLE_DIR} \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} \
-DPADDLE_LIB_NAME={PADDLE_LIB_NAME}
make
```
例如设置如下:
```
# 是否使用GPU(即是否使用 CUDA)
WITH_GPU=ON
# 是否使用MKL or openblas
WITH_MKL=OFF
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=OFF
# TensorRT 的include路径
TENSORRT_INC_DIR=/usr/include/aarch64-linux-gnu
# TensorRT 的lib路径
TENSORRT_LIB_DIR=/usr/lib/aarch64-linux-gnu
# Paddle 预测库路径
PADDLE_DIR=/home/nvidia/PaddleDetection_infer/fluid_inference/
# Paddle 预测库名称
PADDLE_LIB_NAME=paddle_inference
# Paddle 的预测库是否使用静态库来编译
# 使用TensorRT时,Paddle的预测库通常为动态库
WITH_STATIC_LIB=OFF
# CUDA 的 lib 路径
CUDA_LIB=/usr/local/cuda-10.0/lib64
# CUDNN 的 lib 路径
CUDNN_LIB=/usr/lib/aarch64-linux-gnu/
```
修改脚本设置好主要参数后,执行`build`脚本:
```shell
sh ./scripts/build.sh
```
### Step5: 预测及可视化
编译成功后,预测入口程序为`build/main`其主要命令参数说明如下:
| 参数 | 说明 |
| ---- | ---- |
| --model_dir | 导出的预测模型所在路径 |
| --image_path | 要预测的图片文件路径 |
| --video_path | 要预测的视频文件路径 |
| --camera_id | Option | 用来预测的摄像头ID,默认为-1(表示不使用摄像头预测)|
| --use_gpu | 是否使用 GPU 预测, 支持值为0或1(默认值为0)|
| --gpu_id | 指定进行推理的GPU device id(默认值为0)|
| --run_mode | 使用GPU时,默认为fluid, 可选(fluid/trt_fp32/trt_fp16)|
| --run_benchmark | 是否重复预测来进行benchmark测速 |
| --output_dir | 输出图片所在的文件夹, 默认为output |
**注意**: 如果同时设置了`video_path``image_path`,程序仅预测`video_path`
`样例一`
```shell
#不使用`GPU`测试图片 `/root/projects/images/test.jpeg`
./main --model_dir=/root/projects/models/yolov3_darknet --image_path=/root/projects/images/test.jpeg
```
图片文件`可视化预测结果`会保存在当前目录下`output.jpg`文件中。
`样例二`:
```shell
#使用 `GPU`预测视频`/root/projects/videos/test.mp4`
./main --model_dir=/root/projects/models/yolov3_darknet --video_path=/root/projects/images/test.mp4 --use_gpu=1
```
视频文件目前支持`.mp4`格式的预测,`可视化预测结果`会保存在当前目录下`output.mp4`文件中。
## 性能测试
测试环境为:硬件: TX2,JetPack版本: 4.3, Paddle预测库: 1.8.4,CUDA: 10.0, CUDNN: 7.5, TensorRT: 5.0.
去掉前100轮warmup时间,测试100轮的平均时间,单位ms/image,只计算模型运行时间,不包括数据的处理和拷贝。
|模型 | 输入| AnalysisPredictor(ms) |
|---|----|---|
| yolov3_mobilenet_v1 | 608*608 | 56.243858
| faster_rcnn_r50_1x | 1333*1333 | 73.552460
| faster_rcnn_r50_vd_fpn_2x | 1344*1344 | 87.582146
| mask_rcnn_r50_fpn_1x | 1344*1344 | 107.317848
| mask_rcnn_r50_vd_fpn_2x | 1344*1344 | 87.98.708122
| ppyolo_r18vd | 320*320 | 22.876789
| ppyolo_2x | 608*608 | 68.562050
# Linux平台编译指南 # Linux平台编译指南
## 说明 ## 说明
本文档在 `Linux`平台使用`GCC 4.8.5``GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。 本文档在 `Linux`平台使用`GCC 8.2`测试过,如果需要使用其他G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。
## 前置条件 ## 前置条件
* G++ 4.8.2 ~ 4.9.4 * G++ 8.2
* CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要) * CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+ * CMake 3.0+
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/guides/05_inference_deployment/inference/build_and_install_lib_cn.html) PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)
下载并解压后`/root/projects/fluid_inference`目录包含内容为: 下载并解压后`/root/projects/fluid_inference`目录包含内容为:
...@@ -58,6 +58,9 @@ TENSORRT_LIB_DIR=/path/to/TensorRT/lib ...@@ -58,6 +58,9 @@ TENSORRT_LIB_DIR=/path/to/TensorRT/lib
# Paddle 预测库路径 # Paddle 预测库路径
PADDLE_DIR=/path/to/fluid_inference PADDLE_DIR=/path/to/fluid_inference
# Paddle 预测库名称
PADDLE_LIB_NAME=paddle_inference
# CUDA 的 lib 路径 # CUDA 的 lib 路径
CUDA_LIB=/path/to/cuda/lib CUDA_LIB=/path/to/cuda/lib
...@@ -76,7 +79,8 @@ cmake .. \ ...@@ -76,7 +79,8 @@ cmake .. \
-DPADDLE_DIR=${PADDLE_DIR} \ -DPADDLE_DIR=${PADDLE_DIR} \
-DCUDA_LIB=${CUDA_LIB} \ -DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} -DOPENCV_DIR=${OPENCV_DIR} \
-DPADDLE_LIB_NAME={PADDLE_LIB_NAME}
make make
``` ```
......
...@@ -24,7 +24,7 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git ...@@ -24,7 +24,7 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/guides/05_inference_deployment/inference/windows_cpp_inference.html) PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/windows_cpp_inference.html)
解压后`D:\projects\fluid_inference`目录包含内容为: 解压后`D:\projects\fluid_inference`目录包含内容为:
``` ```
...@@ -62,18 +62,18 @@ cd D:\projects\PaddleDetection\deploy\cpp ...@@ -62,18 +62,18 @@ cd D:\projects\PaddleDetection\deploy\cpp
| *CUDNN_LIB | CUDNN的库路径 | | *CUDNN_LIB | CUDNN的库路径 |
| OPENCV_DIR | OpenCV的安装路径, | | OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 | | PADDLE_DIR | Paddle预测库的路径 |
| USE_PADDLE_20RC1 | 是否使用2.0rc1预测库。如果使用2.0rc1,在windows环境下预测库名称发生变化,且仅支持动态库方式编译 | | PADDLE_LIB_NAME | Paddle 预测库名称 |
**注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉 **注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉
执行如下命令项目文件: 执行如下命令项目文件:
``` ```
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DOPENCV_DIR=path_to_opencv cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=path_to_opencv
``` ```
例如: 例如:
``` ```
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6
``` ```
3. 编译 3. 编译
......
...@@ -38,7 +38,7 @@ class ImageBlob { ...@@ -38,7 +38,7 @@ class ImageBlob {
// Buffer for image data after preprocessing // Buffer for image data after preprocessing
std::vector<float> im_data_; std::vector<float> im_data_;
// in net data shape(after pad) // in net data shape(after pad)
std::vector<int> in_net_shape_; std::vector<float> in_net_shape_;
// Evaluation image width and height // Evaluation image width and height
//std::vector<float> eval_im_size_f_; //std::vector<float> eval_im_size_f_;
// Scale factor for image size to origin image size // Scale factor for image size to origin image size
......
...@@ -7,17 +7,17 @@ WITH_MKL=ON ...@@ -7,17 +7,17 @@ WITH_MKL=ON
# 是否集成 TensorRT(仅WITH_GPU=ON 有效) # 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=OFF WITH_TENSORRT=OFF
# 是否使用2.0rc1预测库 # paddle 预测库lib名称,由于不同平台不同版本预测库lib名称不同,请查看所下载的预测库中`paddle_inference/lib/`文件夹下`lib`的名称
USE_PADDLE_20RC1=ON PADDLE_LIB_NAME=libpaddle_inference
# TensorRT 的include路径 # TensorRT 的include路径
TENSORRT_INC_DIR=/path/to/tensorrt/lib TENSORRT_INC_DIR=/path/to/tensorrt/include
# TensorRT 的lib路径 # TensorRT 的lib路径
TENSORRT_LIB_DIR=/path/to/tensorrt/include TENSORRT_LIB_DIR=/path/to/tensorrt/lib
# Paddle 预测库路径 # Paddle 预测库路径
PADDLE_DIR=/path/to/fluid_inference/ PADDLE_DIR=/paddle/to/paddle_inference
# CUDA 的 lib 路径 # CUDA 的 lib 路径
CUDA_LIB=/path/to/cuda/lib CUDA_LIB=/path/to/cuda/lib
...@@ -72,7 +72,8 @@ cmake .. \ ...@@ -72,7 +72,8 @@ cmake .. \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \ -DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} -DOPENCV_DIR=${OPENCV_DIR} \
-DPADDLE_LIB_NAME=${PADDLE_LIB_NAME}
make make
echo "make finished!" echo "make finished!"
...@@ -207,9 +207,6 @@ int main(int argc, char** argv) { ...@@ -207,9 +207,6 @@ int main(int argc, char** argv) {
return -1; return -1;
} }
// Load model and create a object detector // Load model and create a object detector
const std::vector<int> trt_min_shape = {1, FLAGS_trt_min_shape, FLAGS_trt_min_shape};
const std::vector<int> trt_max_shape = {1, FLAGS_trt_max_shape, FLAGS_trt_max_shape};
const std::vector<int> trt_opt_shape = {1, FLAGS_trt_opt_shape, FLAGS_trt_opt_shape};
PaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_run_mode, PaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_run_mode,
FLAGS_gpu_id, FLAGS_use_dynamic_shape, FLAGS_trt_min_shape, FLAGS_gpu_id, FLAGS_use_dynamic_shape, FLAGS_trt_min_shape,
FLAGS_trt_max_shape, FLAGS_trt_opt_shape); FLAGS_trt_max_shape, FLAGS_trt_opt_shape);
......
...@@ -68,9 +68,9 @@ void ObjectDetector::LoadModel(const std::string& model_dir, ...@@ -68,9 +68,9 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
// set use dynamic shape // set use dynamic shape
if (use_dynamic_shape) { if (use_dynamic_shape) {
// set DynamicShsape for image tensor // set DynamicShsape for image tensor
const std::vector<int> min_input_shape = {1, trt_min_shape, trt_min_shape}; const std::vector<int> min_input_shape = {1, 3, trt_min_shape, trt_min_shape};
const std::vector<int> max_input_shape = {1, trt_max_shape, trt_max_shape}; const std::vector<int> max_input_shape = {1, 3, trt_max_shape, trt_max_shape};
const std::vector<int> opt_input_shape = {1, trt_opt_shape, trt_opt_shape}; const std::vector<int> opt_input_shape = {1, 3, trt_opt_shape, trt_opt_shape};
const std::map<std::string, std::vector<int>> map_min_input_shape = {{"image", min_input_shape}}; const std::map<std::string, std::vector<int>> map_min_input_shape = {{"image", min_input_shape}};
const std::map<std::string, std::vector<int>> map_max_input_shape = {{"image", max_input_shape}}; const std::map<std::string, std::vector<int>> map_max_input_shape = {{"image", max_input_shape}};
const std::map<std::string, std::vector<int>> map_opt_input_shape = {{"image", opt_input_shape}}; const std::map<std::string, std::vector<int>> map_opt_input_shape = {{"image", opt_input_shape}};
......
...@@ -26,8 +26,8 @@ void InitInfo::Run(cv::Mat* im, ImageBlob* data) { ...@@ -26,8 +26,8 @@ void InitInfo::Run(cv::Mat* im, ImageBlob* data) {
}; };
data->scale_factor_ = {1., 1.}; data->scale_factor_ = {1., 1.};
data->in_net_shape_ = { data->in_net_shape_ = {
static_cast<int>(im->rows), static_cast<float>(im->rows),
static_cast<int>(im->cols) static_cast<float>(im->cols)
}; };
} }
...@@ -63,12 +63,12 @@ void Permute::Run(cv::Mat* im, ImageBlob* data) { ...@@ -63,12 +63,12 @@ void Permute::Run(cv::Mat* im, ImageBlob* data) {
void Resize::Run(cv::Mat* im, ImageBlob* data) { void Resize::Run(cv::Mat* im, ImageBlob* data) {
auto resize_scale = GenerateScale(*im); auto resize_scale = GenerateScale(*im);
data->im_shape_ = { data->im_shape_ = {
static_cast<int>(im->cols * resize_scale.first), static_cast<float>(im->cols * resize_scale.first),
static_cast<int>(im->rows * resize_scale.second) static_cast<float>(im->rows * resize_scale.second)
}; };
data->in_net_shape_ = { data->in_net_shape_ = {
static_cast<int>(im->cols * resize_scale.first), static_cast<float>(im->cols * resize_scale.first),
static_cast<int>(im->rows * resize_scale.second) static_cast<float>(im->rows * resize_scale.second)
}; };
cv::resize( cv::resize(
*im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_); *im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_);
...@@ -126,8 +126,8 @@ void PadStride::Run(cv::Mat* im, ImageBlob* data) { ...@@ -126,8 +126,8 @@ void PadStride::Run(cv::Mat* im, ImageBlob* data) {
cv::BORDER_CONSTANT, cv::BORDER_CONSTANT,
cv::Scalar(0)); cv::Scalar(0));
data->in_net_shape_ = { data->in_net_shape_ = {
static_cast<int>(im->rows), static_cast<float>(im->rows),
static_cast<int>(im->cols), static_cast<float>(im->cols),
}; };
} }
......
...@@ -3,10 +3,11 @@ project(PaddleObjectDetector CXX C) ...@@ -3,10 +3,11 @@ project(PaddleObjectDetector CXX C)
option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON) option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
option(WITH_TENSORRT "Compile demo with TensorRT." OFF) option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
SET(PADDLE_DIR "" CACHE PATH "Location of libraries") SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
SET(PADDLE_LIB_NAME "" CACHE STRING "libpaddle_inference")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries") SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(CUDA_LIB "" CACHE PATH "Location of libraries") SET(CUDA_LIB "" CACHE PATH "Location of libraries")
SET(CUDNN_LIB "" CACHE PATH "Location of libraries") SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
...@@ -36,6 +37,7 @@ endif() ...@@ -36,6 +37,7 @@ endif()
if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "") if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "")
message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir") message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir")
endif() endif()
message("PADDLE_DIR IS:"${PADDLE_DIR})
if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "") if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv") message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
...@@ -70,6 +72,8 @@ link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib") ...@@ -70,6 +72,8 @@ link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib")
link_directories("${PADDLE_DIR}/paddle/lib/") link_directories("${PADDLE_DIR}/paddle/lib/")
link_directories("${CMAKE_CURRENT_BINARY_DIR}") link_directories("${CMAKE_CURRENT_BINARY_DIR}")
if (WIN32) if (WIN32)
include_directories("${PADDLE_DIR}/paddle/fluid/inference") include_directories("${PADDLE_DIR}/paddle/fluid/inference")
include_directories("${PADDLE_DIR}/paddle/include") include_directories("${PADDLE_DIR}/paddle/include")
...@@ -89,10 +93,6 @@ if (WIN32) ...@@ -89,10 +93,6 @@ if (WIN32)
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT")
if (WITH_STATIC_LIB)
safe_set_static_flag()
add_definitions(-DSTATIC_LIB)
endif()
else() else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11")
set(CMAKE_STATIC_LIBRARY_PREFIX "") set(CMAKE_STATIC_LIBRARY_PREFIX "")
...@@ -113,8 +113,8 @@ endif() ...@@ -113,8 +113,8 @@ endif()
if (NOT WIN32) if (NOT WIN32)
if (WITH_TENSORRT AND WITH_GPU) if (WITH_TENSORRT AND WITH_GPU)
include_directories("${TENSORRT_INC_DIR}") include_directories("${TENSORRT_INC_DIR}/")
link_directories("${TENSORRT_LIB_DIR}") link_directories("${TENSORRT_LIB_DIR}/")
endif() endif()
endif(NOT WIN32) endif(NOT WIN32)
...@@ -148,31 +148,30 @@ if(WITH_MKL) ...@@ -148,31 +148,30 @@ if(WITH_MKL)
endif () endif ()
endif() endif()
else() else()
if (WIN32)
set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif() endif()
if (WIN32) if (WIN32)
if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}") if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
else() else()
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
endif() endif()
endif() endif()
if(WITH_STATIC_LIB)
set(DEPS if (WIN32)
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
else() else()
set(DEPS set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX})
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
endif() endif()
message("PADDLE_LIB_NAME:" ${PADDLE_LIB_NAME})
message("DEPS:" $DEPS)
if (NOT WIN32) if (NOT WIN32)
set(DEPS ${DEPS} set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB} ${MATH_LIB} ${MKLDNN_LIB}
...@@ -220,6 +219,7 @@ endif() ...@@ -220,6 +219,7 @@ endif()
set(DEPS ${DEPS} ${OpenCV_LIBS}) set(DEPS ${DEPS} ${OpenCV_LIBS})
add_executable(main src/main.cc src/preprocess_op.cc src/object_detector.cc) add_executable(main src/main.cc src/preprocess_op.cc src/object_detector.cc)
ADD_DEPENDENCIES(main ext-yaml-cpp) ADD_DEPENDENCIES(main ext-yaml-cpp)
message("DEPS:" $DEPS)
target_link_libraries(main ${DEPS}) target_link_libraries(main ${DEPS})
if (WIN32 AND WITH_MKL) if (WIN32 AND WITH_MKL)
...@@ -230,5 +230,12 @@ if (WIN32 AND WITH_MKL) ...@@ -230,5 +230,12 @@ if (WIN32 AND WITH_MKL)
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll
)
endif()
if (WIN32)
add_custom_command(TARGET main POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll
) )
endif() endif()
...@@ -34,7 +34,7 @@ cat /etc/nv_tegra_release ...@@ -34,7 +34,7 @@ cat /etc/nv_tegra_release
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
解压下载的[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.0-nv-jetson-jetpack4.3-all/paddle_inference.tgz) 解压下载的[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.1-nv-jetson-jetpack4.3-all/paddle_inference.tgz)
下载并解压后`/root/projects/fluid_inference`目录包含内容为: 下载并解压后`/root/projects/fluid_inference`目录包含内容为:
``` ```
...@@ -74,6 +74,9 @@ TENSORRT_LIB_DIR=/usr/lib/aarch64-linux-gnu ...@@ -74,6 +74,9 @@ TENSORRT_LIB_DIR=/usr/lib/aarch64-linux-gnu
# Paddle 预测库路径 # Paddle 预测库路径
PADDLE_DIR=/path/to/fluid_inference/ PADDLE_DIR=/path/to/fluid_inference/
# Paddle 预测库名称
PADDLE_LIB_NAME=paddle_inference
# Paddle 的预测库是否使用静态库来编译 # Paddle 的预测库是否使用静态库来编译
# 使用TensorRT时,Paddle的预测库通常为动态库 # 使用TensorRT时,Paddle的预测库通常为动态库
WITH_STATIC_LIB=OFF WITH_STATIC_LIB=OFF
...@@ -101,7 +104,8 @@ cmake .. \ ...@@ -101,7 +104,8 @@ cmake .. \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \ -DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} -DOPENCV_DIR=${OPENCV_DIR} \
-DPADDLE_LIB_NAME={PADDLE_LIB_NAME}
make make
``` ```
......
# Linux平台编译指南 # Linux平台编译指南
## 说明 ## 说明
本文档在 `Linux`平台使用`GCC 4.8.5``GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#zhijiexiazaianzhuang) 。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。 本文档在 `Linux`平台使用`GCC 8.2`测试过,如果需要使用其他G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。
## 前置条件 ## 前置条件
* G++ 4.8.2 ~ 4.9.4 * G++ 8.2
* CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要) * CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+ * CMake 3.0+
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#linux) PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)
下载并解压后`/root/projects/fluid_inference`目录包含内容为: 下载并解压后`/root/projects/fluid_inference`目录包含内容为:
...@@ -58,9 +58,8 @@ TENSORRT_LIB_DIR=/path/to/TensorRT/lib ...@@ -58,9 +58,8 @@ TENSORRT_LIB_DIR=/path/to/TensorRT/lib
# Paddle 预测库路径 # Paddle 预测库路径
PADDLE_DIR=/path/to/fluid_inference PADDLE_DIR=/path/to/fluid_inference
# Paddle 的预测库是否使用静态库来编译 # Paddle 预测库名称
# 使用TensorRT时,Paddle的预测库通常为动态库 PADDLE_LIB_NAME=paddle_inference
WITH_STATIC_LIB=OFF
# CUDA 的 lib 路径 # CUDA 的 lib 路径
CUDA_LIB=/path/to/cuda/lib CUDA_LIB=/path/to/cuda/lib
...@@ -68,10 +67,6 @@ CUDA_LIB=/path/to/cuda/lib ...@@ -68,10 +67,6 @@ CUDA_LIB=/path/to/cuda/lib
# CUDNN 的 lib 路径 # CUDNN 的 lib 路径
CUDNN_LIB=/path/to/cudnn/lib CUDNN_LIB=/path/to/cudnn/lib
# 修改脚本设置好主要参数后,执行`build`脚本:
sh ./scripts/build.sh
# 请检查以上各个路径是否正确 # 请检查以上各个路径是否正确
# 以下无需改动 # 以下无需改动
...@@ -82,10 +77,10 @@ cmake .. \ ...@@ -82,10 +77,10 @@ cmake .. \
-DTENSORRT_LIB_DIR=${TENSORRT_LIB_DIR} \ -DTENSORRT_LIB_DIR=${TENSORRT_LIB_DIR} \
-DTENSORRT_INC_DIR=${TENSORRT_INC_DIR} \ -DTENSORRT_INC_DIR=${TENSORRT_INC_DIR} \
-DPADDLE_DIR=${PADDLE_DIR} \ -DPADDLE_DIR=${PADDLE_DIR} \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \ -DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} -DOPENCV_DIR=${OPENCV_DIR} \
-DPADDLE_LIB_NAME={PADDLE_LIB_NAME}
make make
``` ```
...@@ -94,6 +89,7 @@ make ...@@ -94,6 +89,7 @@ make
```shell ```shell
sh ./scripts/build.sh sh ./scripts/build.sh
``` ```
**注意**: OPENCV依赖OPENBLAS,Ubuntu用户需确认系统是否已存在`libopenblas.so`。如未安装,可执行apt-get install libopenblas-dev进行安装。 **注意**: OPENCV依赖OPENBLAS,Ubuntu用户需确认系统是否已存在`libopenblas.so`。如未安装,可执行apt-get install libopenblas-dev进行安装。
### Step5: 预测及可视化 ### Step5: 预测及可视化
......
...@@ -24,7 +24,7 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git ...@@ -24,7 +24,7 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/windows_cpp_inference.html#windows) PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/windows_cpp_inference.html)
解压后`D:\projects\fluid_inference`目录包含内容为: 解压后`D:\projects\fluid_inference`目录包含内容为:
``` ```
...@@ -62,18 +62,23 @@ cd D:\projects\PaddleDetection\deploy\cpp ...@@ -62,18 +62,23 @@ cd D:\projects\PaddleDetection\deploy\cpp
| *CUDNN_LIB | CUDNN的库路径 | | *CUDNN_LIB | CUDNN的库路径 |
| OPENCV_DIR | OpenCV的安装路径, | | OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 | | PADDLE_DIR | Paddle预测库的路径 |
| PADDLE_LIB_NAME | Paddle 预测库名称 |
**注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉
执行如下命令项目文件:
``` ```
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DOPENCV_DIR=path_to_opencv cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=path_to_opencv
``` ```
例如: 例如:
``` ```
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6
``` ```
3. 编译 3. 编译
`Visual Studio 16 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,点击`生成`->`全部生成` `Visual Studio 16 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,将编译模式设置为`Release`,点击`生成`->`全部生成
### Step5: 预测及可视化 ### Step5: 预测及可视化
......
...@@ -7,21 +7,17 @@ WITH_MKL=ON ...@@ -7,21 +7,17 @@ WITH_MKL=ON
# 是否集成 TensorRT(仅WITH_GPU=ON 有效) # 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=OFF WITH_TENSORRT=OFF
# 是否使用2.0rc1预测库 # paddle 预测库lib名称,由于不同平台不同版本预测库lib名称不同,请查看所下载的预测库中`paddle_inference/lib/`文件夹下`lib`的名称
USE_PADDLE_20RC1=OFF PADDLE_LIB_NAME=libpaddle_inference
# TensorRT 的include路径 # TensorRT 的include路径
TENSORRT_INC_DIR=/path/to/tensorrt/lib TENSORRT_INC_DIR=/path/to/tensorrt/include
# TensorRT 的lib路径 # TensorRT 的lib路径
TENSORRT_LIB_DIR=/path/to/tensorrt/include TENSORRT_LIB_DIR=/path/to/tensorrt/lib
# Paddle 预测库路径 # Paddle 预测库路径
PADDLE_DIR=/path/to/fluid_inference/ PADDLE_DIR=/paddle/to/paddle_inference
# Paddle 的预测库是否使用静态库来编译
# 使用TensorRT时,Paddle的预测库通常为动态库
WITH_STATIC_LIB=OFF
# CUDA 的 lib 路径 # CUDA 的 lib 路径
CUDA_LIB=/path/to/cuda/lib CUDA_LIB=/path/to/cuda/lib
...@@ -39,11 +35,11 @@ then ...@@ -39,11 +35,11 @@ then
echo "set OPENCV_DIR for x86_64" echo "set OPENCV_DIR for x86_64"
# linux系统通过以下命令下载预编译的opencv # linux系统通过以下命令下载预编译的opencv
mkdir -p $(pwd)/deps && cd $(pwd)/deps mkdir -p $(pwd)/deps && cd $(pwd)/deps
wget -c https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2 wget -c https://paddledet.bj.bcebos.com/data/opencv3.4.6gcc8.2ffmpeg.zip
tar xvfj opencv3.4.6gcc4.8ffmpeg.tar.gz2 && cd .. unzip opencv3.4.6gcc8.2ffmpeg.zip && cd ..
# set OPENCV_DIR # set OPENCV_DIR
OPENCV_DIR=$(pwd)/deps/opencv3.4.6gcc4.8ffmpeg/ OPENCV_DIR=$(pwd)/deps/opencv3.4.6gcc8.2ffmpeg
elif [ "$MACHINE_TYPE" = "aarch64" ] elif [ "$MACHINE_TYPE" = "aarch64" ]
then then
...@@ -76,7 +72,8 @@ cmake .. \ ...@@ -76,7 +72,8 @@ cmake .. \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \ -DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} -DOPENCV_DIR=${OPENCV_DIR} \
-DPADDLE_LIB_NAME=${PADDLE_LIB_NAME}
make make
echo "make finished!" echo "make finished!"
...@@ -37,7 +37,7 @@ TRT_MIN_SUBGRAPH = { ...@@ -37,7 +37,7 @@ TRT_MIN_SUBGRAPH = {
'EfficientDet': 40, 'EfficientDet': 40,
'Face': 3, 'Face': 3,
'TTFNet': 3, 'TTFNet': 3,
'FCOS': 3, 'FCOS': 33,
'SOLOv2': 60, 'SOLOv2': 60,
} }
RESIZE_SCALE_SET = { RESIZE_SCALE_SET = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册