diff --git a/deploy/cpp/CMakeLists.txt b/deploy/cpp/CMakeLists.txt index 453be9bd742d2f4a6c10ebf3c2908a29d51c397f..0bc0be9aa949dfb89f726555bac16066127502fb 100644 --- a/deploy/cpp/CMakeLists.txt +++ b/deploy/cpp/CMakeLists.txt @@ -4,10 +4,10 @@ project(PaddleObjectDetector CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(WITH_TENSORRT "Compile demo with TensorRT." OFF) -option(USE_PADDLE_20RC1 "Compile demo with paddle_inference_lib 2.0rc1" ON) SET(PADDLE_DIR "" CACHE PATH "Location of libraries") +SET(PADDLE_LIB_NAME "" CACHE STRING "libpaddle_inference") SET(OPENCV_DIR "" CACHE PATH "Location of libraries") SET(CUDA_LIB "" CACHE PATH "Location of libraries") SET(CUDNN_LIB "" CACHE PATH "Location of libraries") @@ -153,41 +153,23 @@ endif() if (WIN32) - if (USE_PADDLE_20RC1) - # 2.0rc1 win32 shared lib name is paddle_fluid.dll and paddle_fluid.lib - if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}") + if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}") set(DEPS - ${PADDLE_DIR}/paddle/fluid/inference/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - else() - set(DEPS - ${PADDLE_DIR}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() + ${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}) else() - # before 2.0rc1 win32 shared lib name is libpaddle_fluid.dll and libpaddle_fluid.lib - if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}") - set(DEPS - ${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - else() - set(DEPS - ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() + set(DEPS + ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if (WIN32) - if (USE_PADDLE_20RC1) - # 2.0rc1 win32 shared lib name is paddle_fluid.dll and paddle_fluid.lib - set(DEPS ${PADDLE_DIR}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - else() - # before 2.0rc1 win32 shared lib name is libpaddle_fluid.dll and libpaddle_fluid.lib - set(DEPS ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() + set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}) else() - # linux shared lib name is libpaddle_fluid.so - set(DEPS ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) + set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() +message("PADDLE_LIB_NAME:" ${PADDLE_LIB_NAME}) message("DEPS:" $DEPS) if (NOT WIN32) @@ -248,12 +230,12 @@ if (WIN32 AND WITH_MKL) COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll ) endif() - -if (WIN32 AND USE_PADDLE_20RC1) +if (WIN32) add_custom_command(TARGET main POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/paddle_fluid.dll ./release/paddle_fluid.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll ) endif() diff --git a/deploy/cpp/docs/Jetson_build.md b/deploy/cpp/docs/Jetson_build.md new file mode 100644 index 0000000000000000000000000000000000000000..27130f58020fb08b69d8f8662986cfd45b8aa44e --- /dev/null +++ b/deploy/cpp/docs/Jetson_build.md @@ -0,0 +1,199 @@ +# Jetson平台编译指南 + +## 说明 +`NVIDIA Jetson`设备是具有`NVIDIA GPU`的嵌入式设备,可以将目标检测算法部署到该设备上。本文档是在`Jetson`硬件上部署`PaddleDetection`模型的教程。 + +本文档以`Jetson TX2`硬件、`JetPack 4.3`版本为例进行说明。 + +`Jetson`平台的开发指南请参考[NVIDIA Jetson Linux Developer Guide](https://docs.nvidia.com/jetson/l4t/index.html). + +## Jetson环境搭建 +`Jetson`系统软件安装,请参考[NVIDIA Jetson Linux Developer Guide](https://docs.nvidia.com/jetson/l4t/index.html). + +* (1) 查看硬件系统的l4t的版本号 +``` +cat /etc/nv_tegra_release +``` +* (2) 根据硬件,选择硬件可安装的`JetPack`版本,硬件和`JetPack`版本对应关系请参考[jetpack-archive](https://developer.nvidia.com/embedded/jetpack-archive). + +* (3) 下载`JetPack`,请参考[NVIDIA Jetson Linux Developer Guide](https://docs.nvidia.com/jetson/l4t/index.html) 中的`Preparing a Jetson Developer Kit for Use`章节内容进行刷写系统镜像。 + +## 下载或编译`Paddle`预测库 +本文档使用`Paddle`在`JetPack4.3`上预先编译好的预测库,请根据硬件在[安装与编译 Linux 预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html) 中选择对应版本的`Paddle`预测库。 + +这里选择[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.0-nv-jetson-jetpack4.3-all/paddle_inference.tgz), `Paddle`版本`2.0.0-rc0`,`CUDA`版本`10.0`,`CUDNN`版本`7.6`,`TensorRT`版本`6`。 + +若需要自己在`Jetson`平台上自定义编译`Paddle`库,请参考文档[安装与编译 Linux 预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html) 的`NVIDIA Jetson嵌入式硬件预测库源码编译`部分内容。 + +### Step1: 下载代码 + + `git clone https://github.com/PaddlePaddle/PaddleDetection.git` + +**说明**:其中`C++`预测代码在`/root/projects/PaddleDetection/deploy/cpp` 目录,该目录不依赖任何`PaddleDetection`下其他目录。 + + +### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference + +解压下载的[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.1-nv-jetson-jetpack4.3-all/paddle_inference.tgz) 。 + +下载并解压后`/root/projects/fluid_inference`目录包含内容为: +``` +fluid_inference +├── paddle # paddle核心库和头文件 +| +├── third_party # 第三方依赖库和头文件 +| +└── version.txt # 版本和编译信息 +``` + +**注意:** 预编译库`nv-jetson-cuda10-cudnn7.6-trt6`使用的`GCC`版本是`7.5.0`,其他都是使用`GCC 4.8.5`编译的。使用高版本的GCC可能存在`ABI`兼容性问题,建议降级或[自行编译预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。 + + +### Step4: 编译 + +编译`cmake`的命令在`scripts/build.sh`中,请根据实际情况修改主要参数,其主要内容说明如下: + +注意,`TX2`平台的`CUDA`、`CUDNN`需要通过`JetPack`安装。 + +``` +# 是否使用GPU(即是否使用 CUDA) +WITH_GPU=ON + +# 是否使用MKL or openblas,TX2需要设置为OFF +WITH_MKL=OFF + +# 是否集成 TensorRT(仅WITH_GPU=ON 有效) +WITH_TENSORRT=ON + +# TensorRT 的include路径 +TENSORRT_INC_DIR=/usr/include/aarch64-linux-gnu + +# TensorRT 的lib路径 +TENSORRT_LIB_DIR=/usr/lib/aarch64-linux-gnu + +# Paddle 预测库路径 +PADDLE_DIR=/path/to/fluid_inference/ + +# Paddle 预测库名称 +PADDLE_LIB_NAME=paddle_inference + +# Paddle 的预测库是否使用静态库来编译 +# 使用TensorRT时,Paddle的预测库通常为动态库 +WITH_STATIC_LIB=OFF + +# CUDA 的 lib 路径 +CUDA_LIB=/usr/local/cuda-10.0/lib64 + +# CUDNN 的 lib 路径 +CUDNN_LIB=/usr/lib/aarch64-linux-gnu + +# OPENCV_DIR 的路径 +# linux平台请下载:https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2,并解压到deps文件夹下 +# TX2平台请下载:https://paddlemodels.bj.bcebos.com/TX2_JetPack4.3_opencv_3.4.10_gcc7.5.0.zip,并解压到deps文件夹下 +OPENCV_DIR=/path/to/opencv + +# 请检查以上各个路径是否正确 + +# 以下无需改动 +cmake .. \ + -DWITH_GPU=${WITH_GPU} \ + -DWITH_MKL=OFF \ + -DWITH_TENSORRT=${WITH_TENSORRT} \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DPADDLE_DIR=${PADDLE_DIR} \ + -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ + -DCUDA_LIB=${CUDA_LIB} \ + -DCUDNN_LIB=${CUDNN_LIB} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DPADDLE_LIB_NAME={PADDLE_LIB_NAME} +make +``` + +例如设置如下: +``` +# 是否使用GPU(即是否使用 CUDA) +WITH_GPU=ON + +# 是否使用MKL or openblas +WITH_MKL=OFF + +# 是否集成 TensorRT(仅WITH_GPU=ON 有效) +WITH_TENSORRT=OFF + +# TensorRT 的include路径 +TENSORRT_INC_DIR=/usr/include/aarch64-linux-gnu + +# TensorRT 的lib路径 +TENSORRT_LIB_DIR=/usr/lib/aarch64-linux-gnu + +# Paddle 预测库路径 +PADDLE_DIR=/home/nvidia/PaddleDetection_infer/fluid_inference/ + +# Paddle 预测库名称 +PADDLE_LIB_NAME=paddle_inference + +# Paddle 的预测库是否使用静态库来编译 +# 使用TensorRT时,Paddle的预测库通常为动态库 +WITH_STATIC_LIB=OFF + +# CUDA 的 lib 路径 +CUDA_LIB=/usr/local/cuda-10.0/lib64 + +# CUDNN 的 lib 路径 +CUDNN_LIB=/usr/lib/aarch64-linux-gnu/ +``` + +修改脚本设置好主要参数后,执行`build`脚本: + ```shell + sh ./scripts/build.sh + ``` + +### Step5: 预测及可视化 +编译成功后,预测入口程序为`build/main`其主要命令参数说明如下: +| 参数 | 说明 | +| ---- | ---- | +| --model_dir | 导出的预测模型所在路径 | +| --image_path | 要预测的图片文件路径 | +| --video_path | 要预测的视频文件路径 | +| --camera_id | Option | 用来预测的摄像头ID,默认为-1(表示不使用摄像头预测)| +| --use_gpu | 是否使用 GPU 预测, 支持值为0或1(默认值为0)| +| --gpu_id | 指定进行推理的GPU device id(默认值为0)| +| --run_mode | 使用GPU时,默认为fluid, 可选(fluid/trt_fp32/trt_fp16)| +| --run_benchmark | 是否重复预测来进行benchmark测速 | +| --output_dir | 输出图片所在的文件夹, 默认为output | + +**注意**: 如果同时设置了`video_path`和`image_path`,程序仅预测`video_path`。 + + +`样例一`: +```shell +#不使用`GPU`测试图片 `/root/projects/images/test.jpeg` +./main --model_dir=/root/projects/models/yolov3_darknet --image_path=/root/projects/images/test.jpeg +``` + +图片文件`可视化预测结果`会保存在当前目录下`output.jpg`文件中。 + + +`样例二`: +```shell +#使用 `GPU`预测视频`/root/projects/videos/test.mp4` +./main --model_dir=/root/projects/models/yolov3_darknet --video_path=/root/projects/images/test.mp4 --use_gpu=1 +``` +视频文件目前支持`.mp4`格式的预测,`可视化预测结果`会保存在当前目录下`output.mp4`文件中。 + + +## 性能测试 +测试环境为:硬件: TX2,JetPack版本: 4.3, Paddle预测库: 1.8.4,CUDA: 10.0, CUDNN: 7.5, TensorRT: 5.0. + +去掉前100轮warmup时间,测试100轮的平均时间,单位ms/image,只计算模型运行时间,不包括数据的处理和拷贝。 + + +|模型 | 输入| AnalysisPredictor(ms) | +|---|----|---| +| yolov3_mobilenet_v1 | 608*608 | 56.243858 +| faster_rcnn_r50_1x | 1333*1333 | 73.552460 +| faster_rcnn_r50_vd_fpn_2x | 1344*1344 | 87.582146 +| mask_rcnn_r50_fpn_1x | 1344*1344 | 107.317848 +| mask_rcnn_r50_vd_fpn_2x | 1344*1344 | 87.98.708122 +| ppyolo_r18vd | 320*320 | 22.876789 +| ppyolo_2x | 608*608 | 68.562050 diff --git a/deploy/cpp/docs/linux_build.md b/deploy/cpp/docs/linux_build.md index 1ddb7a150e11e4ec6c5fa1b62d6606fc60719a83..60c609663002c05e75f811ae1431e58cf4a333da 100644 --- a/deploy/cpp/docs/linux_build.md +++ b/deploy/cpp/docs/linux_build.md @@ -1,10 +1,10 @@ # Linux平台编译指南 ## 说明 -本文档在 `Linux`平台使用`GCC 4.8.5` 和 `GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。 +本文档在 `Linux`平台使用`GCC 8.2`测试过,如果需要使用其他G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。 ## 前置条件 -* G++ 4.8.2 ~ 4.9.4 +* G++ 8.2 * CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要) * CMake 3.0+ @@ -19,7 +19,7 @@ ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/guides/05_inference_deployment/inference/build_and_install_lib_cn.html) +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html) 下载并解压后`/root/projects/fluid_inference`目录包含内容为: @@ -58,6 +58,9 @@ TENSORRT_LIB_DIR=/path/to/TensorRT/lib # Paddle 预测库路径 PADDLE_DIR=/path/to/fluid_inference +# Paddle 预测库名称 +PADDLE_LIB_NAME=paddle_inference + # CUDA 的 lib 路径 CUDA_LIB=/path/to/cuda/lib @@ -76,7 +79,8 @@ cmake .. \ -DPADDLE_DIR=${PADDLE_DIR} \ -DCUDA_LIB=${CUDA_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \ - -DOPENCV_DIR=${OPENCV_DIR} + -DOPENCV_DIR=${OPENCV_DIR} \ + -DPADDLE_LIB_NAME={PADDLE_LIB_NAME} make ``` diff --git a/deploy/cpp/docs/windows_vs2019_build.md b/deploy/cpp/docs/windows_vs2019_build.md index 7cfb63a62757225e7db17f7435d4847584c02825..aca5d3ab970cb4aa9aac9a6ea26b2a7c621acac2 100644 --- a/deploy/cpp/docs/windows_vs2019_build.md +++ b/deploy/cpp/docs/windows_vs2019_build.md @@ -24,7 +24,7 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/guides/05_inference_deployment/inference/windows_cpp_inference.html) +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/windows_cpp_inference.html) 解压后`D:\projects\fluid_inference`目录包含内容为: ``` @@ -62,18 +62,18 @@ cd D:\projects\PaddleDetection\deploy\cpp | *CUDNN_LIB | CUDNN的库路径 | | OPENCV_DIR | OpenCV的安装路径, | | PADDLE_DIR | Paddle预测库的路径 | -| USE_PADDLE_20RC1 | 是否使用2.0rc1预测库。如果使用2.0rc1,在windows环境下预测库名称发生变化,且仅支持动态库方式编译 | +| PADDLE_LIB_NAME | Paddle 预测库名称 | **注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉 执行如下命令项目文件: ``` -cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DOPENCV_DIR=path_to_opencv +cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=path_to_opencv ``` 例如: ``` -cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 +cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 ``` 3. 编译 diff --git a/deploy/cpp/include/preprocess_op.h b/deploy/cpp/include/preprocess_op.h index e48639b553867dff859cef4816558df7654edcfb..26a91cc9eb74008919cbac12e736afff6bb9ad72 100644 --- a/deploy/cpp/include/preprocess_op.h +++ b/deploy/cpp/include/preprocess_op.h @@ -38,7 +38,7 @@ class ImageBlob { // Buffer for image data after preprocessing std::vector im_data_; // in net data shape(after pad) - std::vector in_net_shape_; + std::vector in_net_shape_; // Evaluation image width and height //std::vector eval_im_size_f_; // Scale factor for image size to origin image size diff --git a/deploy/cpp/scripts/build.sh b/deploy/cpp/scripts/build.sh index a32b1d383256f0d775dba41d16b99468558e9135..803fbab7d7fe7cfbc7ce1bcc7543692a24d27cb4 100644 --- a/deploy/cpp/scripts/build.sh +++ b/deploy/cpp/scripts/build.sh @@ -7,17 +7,17 @@ WITH_MKL=ON # 是否集成 TensorRT(仅WITH_GPU=ON 有效) WITH_TENSORRT=OFF -# 是否使用2.0rc1预测库 -USE_PADDLE_20RC1=ON +# paddle 预测库lib名称,由于不同平台不同版本预测库lib名称不同,请查看所下载的预测库中`paddle_inference/lib/`文件夹下`lib`的名称 +PADDLE_LIB_NAME=libpaddle_inference # TensorRT 的include路径 -TENSORRT_INC_DIR=/path/to/tensorrt/lib +TENSORRT_INC_DIR=/path/to/tensorrt/include # TensorRT 的lib路径 -TENSORRT_LIB_DIR=/path/to/tensorrt/include +TENSORRT_LIB_DIR=/path/to/tensorrt/lib # Paddle 预测库路径 -PADDLE_DIR=/path/to/fluid_inference/ +PADDLE_DIR=/paddle/to/paddle_inference # CUDA 的 lib 路径 CUDA_LIB=/path/to/cuda/lib @@ -72,7 +72,8 @@ cmake .. \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \ - -DOPENCV_DIR=${OPENCV_DIR} + -DOPENCV_DIR=${OPENCV_DIR} \ + -DPADDLE_LIB_NAME=${PADDLE_LIB_NAME} make echo "make finished!" diff --git a/deploy/cpp/src/main.cc b/deploy/cpp/src/main.cc index 6fb49769288f12f7c71345f03f689d6a67757f19..00dd325680b39df3900689e98fae6f32b58ec99e 100644 --- a/deploy/cpp/src/main.cc +++ b/deploy/cpp/src/main.cc @@ -207,9 +207,6 @@ int main(int argc, char** argv) { return -1; } // Load model and create a object detector - const std::vector trt_min_shape = {1, FLAGS_trt_min_shape, FLAGS_trt_min_shape}; - const std::vector trt_max_shape = {1, FLAGS_trt_max_shape, FLAGS_trt_max_shape}; - const std::vector trt_opt_shape = {1, FLAGS_trt_opt_shape, FLAGS_trt_opt_shape}; PaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_run_mode, FLAGS_gpu_id, FLAGS_use_dynamic_shape, FLAGS_trt_min_shape, FLAGS_trt_max_shape, FLAGS_trt_opt_shape); diff --git a/deploy/cpp/src/object_detector.cc b/deploy/cpp/src/object_detector.cc index 969425dcedc1ec532951d7919aff215026a423ff..95b8dbb2217cd9dd28761f82724214a68a1a0ebe 100644 --- a/deploy/cpp/src/object_detector.cc +++ b/deploy/cpp/src/object_detector.cc @@ -68,9 +68,9 @@ void ObjectDetector::LoadModel(const std::string& model_dir, // set use dynamic shape if (use_dynamic_shape) { // set DynamicShsape for image tensor - const std::vector min_input_shape = {1, trt_min_shape, trt_min_shape}; - const std::vector max_input_shape = {1, trt_max_shape, trt_max_shape}; - const std::vector opt_input_shape = {1, trt_opt_shape, trt_opt_shape}; + const std::vector min_input_shape = {1, 3, trt_min_shape, trt_min_shape}; + const std::vector max_input_shape = {1, 3, trt_max_shape, trt_max_shape}; + const std::vector opt_input_shape = {1, 3, trt_opt_shape, trt_opt_shape}; const std::map> map_min_input_shape = {{"image", min_input_shape}}; const std::map> map_max_input_shape = {{"image", max_input_shape}}; const std::map> map_opt_input_shape = {{"image", opt_input_shape}}; diff --git a/deploy/cpp/src/preprocess_op.cc b/deploy/cpp/src/preprocess_op.cc index 8edd3eb1f2b7957649f7075bbcd20ba582c841a7..6a2be41799620ec681aed18db05ab5df4b281052 100644 --- a/deploy/cpp/src/preprocess_op.cc +++ b/deploy/cpp/src/preprocess_op.cc @@ -26,8 +26,8 @@ void InitInfo::Run(cv::Mat* im, ImageBlob* data) { }; data->scale_factor_ = {1., 1.}; data->in_net_shape_ = { - static_cast(im->rows), - static_cast(im->cols) + static_cast(im->rows), + static_cast(im->cols) }; } @@ -63,12 +63,12 @@ void Permute::Run(cv::Mat* im, ImageBlob* data) { void Resize::Run(cv::Mat* im, ImageBlob* data) { auto resize_scale = GenerateScale(*im); data->im_shape_ = { - static_cast(im->cols * resize_scale.first), - static_cast(im->rows * resize_scale.second) + static_cast(im->cols * resize_scale.first), + static_cast(im->rows * resize_scale.second) }; data->in_net_shape_ = { - static_cast(im->cols * resize_scale.first), - static_cast(im->rows * resize_scale.second) + static_cast(im->cols * resize_scale.first), + static_cast(im->rows * resize_scale.second) }; cv::resize( *im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_); @@ -126,8 +126,8 @@ void PadStride::Run(cv::Mat* im, ImageBlob* data) { cv::BORDER_CONSTANT, cv::Scalar(0)); data->in_net_shape_ = { - static_cast(im->rows), - static_cast(im->cols), + static_cast(im->rows), + static_cast(im->cols), }; } diff --git a/static/deploy/cpp/CMakeLists.txt b/static/deploy/cpp/CMakeLists.txt index 0517825795dce9ae4c550e174eb642bd2273d6bd..0bc0be9aa949dfb89f726555bac16066127502fb 100644 --- a/static/deploy/cpp/CMakeLists.txt +++ b/static/deploy/cpp/CMakeLists.txt @@ -3,10 +3,11 @@ project(PaddleObjectDetector CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) -option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) -option(WITH_TENSORRT "Compile demo with TensorRT." OFF) +option(WITH_TENSORRT "Compile demo with TensorRT." OFF) + SET(PADDLE_DIR "" CACHE PATH "Location of libraries") +SET(PADDLE_LIB_NAME "" CACHE STRING "libpaddle_inference") SET(OPENCV_DIR "" CACHE PATH "Location of libraries") SET(CUDA_LIB "" CACHE PATH "Location of libraries") SET(CUDNN_LIB "" CACHE PATH "Location of libraries") @@ -36,6 +37,7 @@ endif() if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "") message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir") endif() +message("PADDLE_DIR IS:"${PADDLE_DIR}) if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "") message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv") @@ -70,6 +72,8 @@ link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib") link_directories("${PADDLE_DIR}/paddle/lib/") link_directories("${CMAKE_CURRENT_BINARY_DIR}") + + if (WIN32) include_directories("${PADDLE_DIR}/paddle/fluid/inference") include_directories("${PADDLE_DIR}/paddle/include") @@ -89,10 +93,6 @@ if (WIN32) set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT") - if (WITH_STATIC_LIB) - safe_set_static_flag() - add_definitions(-DSTATIC_LIB) - endif() else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11") set(CMAKE_STATIC_LIBRARY_PREFIX "") @@ -113,8 +113,8 @@ endif() if (NOT WIN32) if (WITH_TENSORRT AND WITH_GPU) - include_directories("${TENSORRT_INC_DIR}") - link_directories("${TENSORRT_LIB_DIR}") + include_directories("${TENSORRT_INC_DIR}/") + link_directories("${TENSORRT_LIB_DIR}/") endif() endif(NOT WIN32) @@ -148,31 +148,30 @@ if(WITH_MKL) endif () endif() else() - if (WIN32) - set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) - else() - set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() + set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() + if (WIN32) - if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}") + if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}") set(DEPS - ${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) + ${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS - ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) + ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() -if(WITH_STATIC_LIB) - set(DEPS - ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) + +if (WIN32) + set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}) else() - set(DEPS - ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) + set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() +message("PADDLE_LIB_NAME:" ${PADDLE_LIB_NAME}) +message("DEPS:" $DEPS) + if (NOT WIN32) set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} @@ -220,6 +219,7 @@ endif() set(DEPS ${DEPS} ${OpenCV_LIBS}) add_executable(main src/main.cc src/preprocess_op.cc src/object_detector.cc) ADD_DEPENDENCIES(main ext-yaml-cpp) +message("DEPS:" $DEPS) target_link_libraries(main ${DEPS}) if (WIN32 AND WITH_MKL) @@ -230,5 +230,12 @@ if (WIN32 AND WITH_MKL) COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll + ) +endif() + +if (WIN32) + add_custom_command(TARGET main POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll ) endif() diff --git a/static/deploy/cpp/docs/Jetson_build.md b/static/deploy/cpp/docs/Jetson_build.md index 0262ffbd1bf8bea2d62fcac7fda5080c6d472697..14850700e71cb8f3219b5ff2d4d01b5a30a9b250 100644 --- a/static/deploy/cpp/docs/Jetson_build.md +++ b/static/deploy/cpp/docs/Jetson_build.md @@ -34,7 +34,7 @@ cat /etc/nv_tegra_release ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference -解压下载的[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.0-nv-jetson-jetpack4.3-all/paddle_inference.tgz) 。 +解压下载的[nv_jetson_cuda10_cudnn7.6_trt6(jetpack4.3)](https://paddle-inference-lib.bj.bcebos.com/2.0.1-nv-jetson-jetpack4.3-all/paddle_inference.tgz) 。 下载并解压后`/root/projects/fluid_inference`目录包含内容为: ``` @@ -74,6 +74,9 @@ TENSORRT_LIB_DIR=/usr/lib/aarch64-linux-gnu # Paddle 预测库路径 PADDLE_DIR=/path/to/fluid_inference/ +# Paddle 预测库名称 +PADDLE_LIB_NAME=paddle_inference + # Paddle 的预测库是否使用静态库来编译 # 使用TensorRT时,Paddle的预测库通常为动态库 WITH_STATIC_LIB=OFF @@ -101,7 +104,8 @@ cmake .. \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \ - -DOPENCV_DIR=${OPENCV_DIR} + -DOPENCV_DIR=${OPENCV_DIR} \ + -DPADDLE_LIB_NAME={PADDLE_LIB_NAME} make ``` diff --git a/static/deploy/cpp/docs/linux_build.md b/static/deploy/cpp/docs/linux_build.md index ab95dc786ba766e88d3ec069ef79124b506e9900..60c609663002c05e75f811ae1431e58cf4a333da 100644 --- a/static/deploy/cpp/docs/linux_build.md +++ b/static/deploy/cpp/docs/linux_build.md @@ -1,10 +1,10 @@ # Linux平台编译指南 ## 说明 -本文档在 `Linux`平台使用`GCC 4.8.5` 和 `GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#zhijiexiazaianzhuang) 。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。 +本文档在 `Linux`平台使用`GCC 8.2`测试过,如果需要使用其他G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。本文档使用的预置的opencv库是在ubuntu 16.04上用gcc4.8编译的,如果需要在ubuntu 16.04以外的系统环境编译,那么需自行编译opencv库。 ## 前置条件 -* G++ 4.8.2 ~ 4.9.4 +* G++ 8.2 * CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要) * CMake 3.0+ @@ -19,7 +19,7 @@ ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#linux) +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/build_and_install_lib_cn.html) 下载并解压后`/root/projects/fluid_inference`目录包含内容为: @@ -58,9 +58,8 @@ TENSORRT_LIB_DIR=/path/to/TensorRT/lib # Paddle 预测库路径 PADDLE_DIR=/path/to/fluid_inference -# Paddle 的预测库是否使用静态库来编译 -# 使用TensorRT时,Paddle的预测库通常为动态库 -WITH_STATIC_LIB=OFF +# Paddle 预测库名称 +PADDLE_LIB_NAME=paddle_inference # CUDA 的 lib 路径 CUDA_LIB=/path/to/cuda/lib @@ -68,10 +67,6 @@ CUDA_LIB=/path/to/cuda/lib # CUDNN 的 lib 路径 CUDNN_LIB=/path/to/cudnn/lib -# 修改脚本设置好主要参数后,执行`build`脚本: -sh ./scripts/build.sh - - # 请检查以上各个路径是否正确 # 以下无需改动 @@ -82,10 +77,10 @@ cmake .. \ -DTENSORRT_LIB_DIR=${TENSORRT_LIB_DIR} \ -DTENSORRT_INC_DIR=${TENSORRT_INC_DIR} \ -DPADDLE_DIR=${PADDLE_DIR} \ - -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \ - -DOPENCV_DIR=${OPENCV_DIR} + -DOPENCV_DIR=${OPENCV_DIR} \ + -DPADDLE_LIB_NAME={PADDLE_LIB_NAME} make ``` @@ -94,6 +89,7 @@ make ```shell sh ./scripts/build.sh ``` + **注意**: OPENCV依赖OPENBLAS,Ubuntu用户需确认系统是否已存在`libopenblas.so`。如未安装,可执行apt-get install libopenblas-dev进行安装。 ### Step5: 预测及可视化 diff --git a/static/deploy/cpp/docs/windows_vs2019_build.md b/static/deploy/cpp/docs/windows_vs2019_build.md index 7b8dcff078281d39773d862c48df2f88d645105b..aca5d3ab970cb4aa9aac9a6ea26b2a7c621acac2 100644 --- a/static/deploy/cpp/docs/windows_vs2019_build.md +++ b/static/deploy/cpp/docs/windows_vs2019_build.md @@ -24,7 +24,7 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/windows_cpp_inference.html#windows) +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/windows_cpp_inference.html) 解压后`D:\projects\fluid_inference`目录包含内容为: ``` @@ -62,18 +62,23 @@ cd D:\projects\PaddleDetection\deploy\cpp | *CUDNN_LIB | CUDNN的库路径 | | OPENCV_DIR | OpenCV的安装路径, | | PADDLE_DIR | Paddle预测库的路径 | +| PADDLE_LIB_NAME | Paddle 预测库名称 | +**注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉 + +执行如下命令项目文件: ``` -cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DOPENCV_DIR=path_to_opencv +cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=path_to_opencv ``` 例如: ``` -cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 +cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\fluid_inference -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 ``` 3. 编译 -用`Visual Studio 16 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,点击`生成`->`全部生成` +用`Visual Studio 16 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,将编译模式设置为`Release`,点击`生成`->`全部生成 + ### Step5: 预测及可视化 diff --git a/static/deploy/cpp/scripts/build.sh b/static/deploy/cpp/scripts/build.sh index fb6ca625e9bcc1a84dbf6728e809b08e6432d075..803fbab7d7fe7cfbc7ce1bcc7543692a24d27cb4 100644 --- a/static/deploy/cpp/scripts/build.sh +++ b/static/deploy/cpp/scripts/build.sh @@ -7,21 +7,17 @@ WITH_MKL=ON # 是否集成 TensorRT(仅WITH_GPU=ON 有效) WITH_TENSORRT=OFF -# 是否使用2.0rc1预测库 -USE_PADDLE_20RC1=OFF +# paddle 预测库lib名称,由于不同平台不同版本预测库lib名称不同,请查看所下载的预测库中`paddle_inference/lib/`文件夹下`lib`的名称 +PADDLE_LIB_NAME=libpaddle_inference # TensorRT 的include路径 -TENSORRT_INC_DIR=/path/to/tensorrt/lib +TENSORRT_INC_DIR=/path/to/tensorrt/include # TensorRT 的lib路径 -TENSORRT_LIB_DIR=/path/to/tensorrt/include +TENSORRT_LIB_DIR=/path/to/tensorrt/lib # Paddle 预测库路径 -PADDLE_DIR=/path/to/fluid_inference/ - -# Paddle 的预测库是否使用静态库来编译 -# 使用TensorRT时,Paddle的预测库通常为动态库 -WITH_STATIC_LIB=OFF +PADDLE_DIR=/paddle/to/paddle_inference # CUDA 的 lib 路径 CUDA_LIB=/path/to/cuda/lib @@ -39,11 +35,11 @@ then echo "set OPENCV_DIR for x86_64" # linux系统通过以下命令下载预编译的opencv mkdir -p $(pwd)/deps && cd $(pwd)/deps - wget -c https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2 - tar xvfj opencv3.4.6gcc4.8ffmpeg.tar.gz2 && cd .. + wget -c https://paddledet.bj.bcebos.com/data/opencv3.4.6gcc8.2ffmpeg.zip + unzip opencv3.4.6gcc8.2ffmpeg.zip && cd .. # set OPENCV_DIR - OPENCV_DIR=$(pwd)/deps/opencv3.4.6gcc4.8ffmpeg/ + OPENCV_DIR=$(pwd)/deps/opencv3.4.6gcc8.2ffmpeg elif [ "$MACHINE_TYPE" = "aarch64" ] then @@ -76,7 +72,8 @@ cmake .. \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \ - -DOPENCV_DIR=${OPENCV_DIR} + -DOPENCV_DIR=${OPENCV_DIR} \ + -DPADDLE_LIB_NAME=${PADDLE_LIB_NAME} make echo "make finished!" diff --git a/static/ppdet/utils/export_utils.py b/static/ppdet/utils/export_utils.py index 1904e7cfd9ba7497c2aa86139f2cdbc599c70799..3579ddb495f8e6055a512f9d8fe32897ca7abe22 100644 --- a/static/ppdet/utils/export_utils.py +++ b/static/ppdet/utils/export_utils.py @@ -37,7 +37,7 @@ TRT_MIN_SUBGRAPH = { 'EfficientDet': 40, 'Face': 3, 'TTFNet': 3, - 'FCOS': 3, + 'FCOS': 33, 'SOLOv2': 60, } RESIZE_SCALE_SET = {