提交 75d4c0f6 编写于 作者: J Jack Zhou 提交者: qingqing01

Update CMakeLists.txt to adapt to the directory structure of win v1.6 inference lib (#20)

2. change code style
3. add error messages: display detail about input errors and config file errors
上级 df6cfd8d
cmake_minimum_required(VERSION 3.0) cmake_minimum_required(VERSION 3.0)
project(cpp_inference_demo CXX C) project(cpp_inference_demo CXX C)
message("cmake module path: ${CMAKE_MODULE_PATH}")
message("cmake root path: ${CMAKE_ROOT}")
option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON) option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
...@@ -70,6 +69,7 @@ link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib") ...@@ -70,6 +69,7 @@ link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
link_directories("${CMAKE_CURRENT_BINARY_DIR}") link_directories("${CMAKE_CURRENT_BINARY_DIR}")
if (WIN32) if (WIN32)
include_directories("${PADDLE_DIR}/paddle/fluid/inference") include_directories("${PADDLE_DIR}/paddle/fluid/inference")
include_directories("${PADDLE_DIR}/paddle/include")
link_directories("${PADDLE_DIR}/paddle/fluid/inference") link_directories("${PADDLE_DIR}/paddle/fluid/inference")
include_directories("${OPENCV_DIR}/build/include") include_directories("${OPENCV_DIR}/build/include")
include_directories("${OPENCV_DIR}/opencv/build/include") include_directories("${OPENCV_DIR}/opencv/build/include")
...@@ -134,6 +134,7 @@ if(WITH_MKL) ...@@ -134,6 +134,7 @@ if(WITH_MKL)
else () else ()
set(MATH_LIB ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} set(MATH_LIB ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
execute_process(COMMAND cp -r ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
endif () endif ()
set(MKLDNN_PATH "${PADDLE_DIR}/third_party/install/mkldnn") set(MKLDNN_PATH "${PADDLE_DIR}/third_party/install/mkldnn")
if(EXISTS ${MKLDNN_PATH}) if(EXISTS ${MKLDNN_PATH})
...@@ -148,22 +149,22 @@ else() ...@@ -148,22 +149,22 @@ else()
set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif() endif()
if(WITH_STATIC_LIB) if(WIN32)
if (WIN32) if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else () else()
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
endif() endif()
else() endif()
if (WIN32)
if(WITH_STATIC_LIB)
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else () else()
set(DEPS set(DEPS
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif() endif()
if (NOT WIN32) if (NOT WIN32)
...@@ -242,8 +243,6 @@ if (NOT WIN32) ...@@ -242,8 +243,6 @@ if (NOT WIN32)
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libippicv${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libippicv${CMAKE_STATIC_LIBRARY_SUFFIX})
endif() endif()
endif() endif()
# message(${CMAKE_CXX_FLAGS})
# set(CMAKE_CXX_FLAGS "-g ${CMAKE_CXX_FLAGS}")
SET(PADDLESEG_INFERENCE_SRCS preprocessor/preprocessor.cpp SET(PADDLESEG_INFERENCE_SRCS preprocessor/preprocessor.cpp
preprocessor/preprocessor_detection.cpp predictor/detection_predictor.cpp preprocessor/preprocessor_detection.cpp predictor/detection_predictor.cpp
...@@ -265,7 +264,7 @@ if (WIN32) ...@@ -265,7 +264,7 @@ if (WIN32)
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
) )
endif() endif()
......
...@@ -65,7 +65,7 @@ deploy ...@@ -65,7 +65,7 @@ deploy
完成编译后,便生成了需要的可执行文件和链接库。这里以我们基于`faster rcnn`检测模型为例,介绍部署图像检测模型的通用流程。 完成编译后,便生成了需要的可执行文件和链接库。这里以我们基于`faster rcnn`检测模型为例,介绍部署图像检测模型的通用流程。
### 1. 下载模型文件 ### 4.1. 下载模型文件
我们提供faster rcnn,faster rcnn+fpn模型用于预测coco17数据集,可在以下链接下载:[faster rcnn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50.zip) 我们提供faster rcnn,faster rcnn+fpn模型用于预测coco17数据集,可在以下链接下载:[faster rcnn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50.zip)
[faster rcnn + fpn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50_fpn.zip) [faster rcnn + fpn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50_fpn.zip)
...@@ -83,7 +83,7 @@ faster_rcnn_pp50/ ...@@ -83,7 +83,7 @@ faster_rcnn_pp50/
**假设**`Linux`上对应的路径则为`/root/projects/models/faster_rcnn_pp50/` **假设**`Linux`上对应的路径则为`/root/projects/models/faster_rcnn_pp50/`
### 2. 修改配置 ### 4.2. 修改配置
`inference`源代码(即本目录)的`conf`目录下提供了示例基于faster rcnn的配置文件`detection_rcnn.yaml`, 相关的字段含义和说明如下: `inference`源代码(即本目录)的`conf`目录下提供了示例基于faster rcnn的配置文件`detection_rcnn.yaml`, 相关的字段含义和说明如下:
...@@ -127,7 +127,9 @@ DEPLOY: ...@@ -127,7 +127,9 @@ DEPLOY:
``` ```
修改字段`MODEL_PATH`的值为你在**上一步**下载并解压的模型文件所放置的目录即可。更多配置文件字段介绍,请参考文档[预测部署方案配置文件说明](./docs/configuration.md) 修改字段`MODEL_PATH`的值为你在**上一步**下载并解压的模型文件所放置的目录即可。更多配置文件字段介绍,请参考文档[预测部署方案配置文件说明](./docs/configuration.md)
### 3. 执行预测 **注意**在使用CPU版本预测库时,`USE_GPU`的值必须设为0,否则无法正常预测。
### 4.3. 执行预测
在终端中切换到生成的可执行文件所在目录为当前目录(Windows系统为`cmd`)。 在终端中切换到生成的可执行文件所在目录为当前目录(Windows系统为`cmd`)。
...@@ -168,3 +170,4 @@ python vis.py --img_path=../build/images/detection_rcnn/000000087038.jpg --img_r ...@@ -168,3 +170,4 @@ python vis.py --img_path=../build/images/detection_rcnn/000000087038.jpg --img_r
```检测结果图:``` ```检测结果图:```
![检测结果](./demo_images/000000087038.jpg.png) ![检测结果](./demo_images/000000087038.jpg.png)
...@@ -13,6 +13,6 @@ DEPLOY: ...@@ -13,6 +13,6 @@ DEPLOY:
CHANNELS : 3 CHANNELS : 3
PRE_PROCESSOR: "DetectionPreProcessor" PRE_PROCESSOR: "DetectionPreProcessor"
PREDICTOR_MODE: "ANALYSIS" PREDICTOR_MODE: "ANALYSIS"
BATCH_SIZE : 3 BATCH_SIZE : 1
RESIZE_MAX_SIZE: 1333 RESIZE_MAX_SIZE: 1333
FEEDS_SIZE: 3 FEEDS_SIZE: 3
...@@ -23,18 +23,24 @@ int main(int argc, char** argv) { ...@@ -23,18 +23,24 @@ int main(int argc, char** argv) {
// 0. parse args // 0. parse args
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) { if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) {
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model --input_dir=/directory/of/your/input/images"; std::cout << "Usage: ./predictor --conf=/config/path/to/your/model "
<< "--input_dir=/directory/of/your/input/images" << std::endl;
return -1; return -1;
} }
// 1. create a predictor and init it with conf // 1. create a predictor and init it with conf
PaddleSolution::DetectionPredictor predictor; PaddleSolution::DetectionPredictor predictor;
if (predictor.init(FLAGS_conf) != 0) { if (predictor.init(FLAGS_conf) != 0) {
#ifdef _WIN32
std::cerr << "Fail to init predictor" << std::endl;
#else
LOG(FATAL) << "Fail to init predictor"; LOG(FATAL) << "Fail to init predictor";
#endif
return -1; return -1;
} }
// 2. get all the images with extension '.jpeg' at input_dir // 2. get all the images with extension '.jpeg' at input_dir
auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir, ".jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG"); auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir,
".jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG");
// 3. predict // 3. predict
predictor.predict(imgs); predictor.predict(imgs);
......
...@@ -70,6 +70,6 @@ DEPLOY: ...@@ -70,6 +70,6 @@ DEPLOY:
# 含义: 输入张量的个数。大部分模型不需要设置。 默认值为1. # 含义: 输入张量的个数。大部分模型不需要设置。 默认值为1.
FEEDS_SIZE: 2 FEEDS_SIZE: 2
# 类型: optional int # 类型: optional int
# 含义: 将图像的边变为该字段的值的整数倍。默认值为1。 # 含义: 将图像的边变为该字段的值的整数倍。在使用fpn模型时需要设为32。默认值为1。
COARSEST_STRIDE: 32 COARSEST_STRIDE: 32
``` ```
\ No newline at end of file
...@@ -5,22 +5,30 @@ ...@@ -5,22 +5,30 @@
## 前置条件 ## 前置条件
* G++ 4.8.2 ~ 4.9.4 * G++ 4.8.2 ~ 4.9.4
* CUDA 8.0/ CUDA 9.0 * CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+ * CMake 3.0+
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示** 请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示**
### Step1: 下载代码 ### Step1: 下载代码
1. `mkdir -p /root/projects/paddle_models && cd /root/projects/paddle_models` 1. `git clone https://github.com/PaddlePaddle/PaddleDetection.git`
2. `git clone https://github.com/PaddlePaddle/models.git`
`C++`预测代码在`/root/projects/paddle_models/models/PaddleCV/PaddleDetection/inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。 `C++`预测代码在`/root/projects/PaddleDetection/inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
目前仅支持`CUDA 8``CUDA 9`,请点击 [PaddlePaddle预测库下载地址](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html)下载对应的版本(develop版本)。 PaddlePaddle C++ 预测库主要分为CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。以下为各版本C++预测库的下载链接:
| 版本 | 链接 |
| ---- | ---- |
| CPU版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cpu_1.6.1.tgz) |
| CUDA 9.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda97_1.6.1.tgz) |
| CUDA 10.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda10_1.6.1.tgz) |
针对不同的CPU类型、不同的指令集,官方提供更多可用的预测库版本,目前已经推出1.6版本的预测库。其余版本具体请参考以下链接:[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html)
下载并解压后`/root/projects/fluid_inference`目录包含内容为: 下载并解压后`/root/projects/fluid_inference`目录包含内容为:
...@@ -53,25 +61,33 @@ make install ...@@ -53,25 +61,33 @@ make install
### Step4: 编译 ### Step4: 编译
`CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下: `CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下:(带*表示仅在使用**GPU版本**预测库时指定,其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1版本CUDA库**
| 参数名 | 含义 | | 参数名 | 含义 |
| ---- | ---- | | ---- | ---- |
| CUDA_LIB | cuda的库路径 | | * CUDA_LIB | CUDA的库路径 |
| CUDNN_LIB | cuDnn的库路径| | * CUDNN_LIB | cudnn的库路径|
| OPENCV_DIR | OpenCV的安装路径 | | OPENCV_DIR | OpenCV的安装路径 |
| PADDLE_DIR | Paddle预测库的路径 | | PADDLE_DIR | Paddle预测库的路径 |
执行下列操作时,**注意**把对应的参数改为你的上述依赖库实际路径: 在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径:
```shell ```shell
cd /root/projects/paddle_models/models/PaddleCV/PaddleDetection/inference cd /root/projects/PaddleDetection/inference
mkdir build && cd build mkdir build && cd build
cmake .. -DWITH_GPU=ON -DPADDLE_DIR=/root/projects/fluid_inference -DCUDA_LIB=/usr/local/cuda/lib64/ -DOPENCV_DIR=/root/projects/opencv3/ -DCUDNN_LIB=/usr/local/cuda/lib64/ cmake .. -DWITH_GPU=ON -DPADDLE_DIR=/root/projects/fluid_inference -DCUDA_LIB=/usr/local/cuda/lib64/ -DOPENCV_DIR=/root/projects/opencv3/ -DCUDNN_LIB=/usr/local/cuda/lib64/ -DWITH_STATIC_LIB=OFF
make make
``` ```
在使用**CPU版本**预测库进行编译时,可执行下列操作:
```shell
cd /root/projects/PaddleDetection/inference
mkdir build && cd build
cmake .. -DWITH_GPU=OFF -DPADDLE_DIR=/root/projects/fluid_inference -DOPENCV_DIR=/root/projects/opencv3/ -DWITH_STATIC_LIB=OFF
make
```
### Step5: 预测及可视化 ### Step5: 预测及可视化
......
...@@ -5,27 +5,28 @@ ...@@ -5,27 +5,28 @@
## 前置条件 ## 前置条件
* Visual Studio 2015 * Visual Studio 2015
* CUDA 8.0/ CUDA 9.0 * CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+ * CMake 3.0+
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `D:\projects`演示** 请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `D:\projects`演示**
### Step1: 下载代码 ### Step1: 下载代码
1. 打开`cmd`, 执行 `cd D:\projects\paddle_models` 1. 打开`cmd`, 执行 `cd D:\projects`
2. `git clone https://github.com/PaddlePaddle/models.git` 2. `git clone https://github.com/PaddlePaddle/PaddleDetection.git`
`C++`预测库代码在`D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。 `C++`预测库代码在`D:\projects\PaddleDetection\inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录 PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接:
| CUDA | GPU | 下载地址 | | 版本 | 链接 |
|------|------|--------| | ---- | ---- |
| 8.0 | Yes | [fluid_inference.zip](https://bj.bcebos.com/v1/paddleseg/fluid_inference_win.zip) | | CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) |
| 9.0 | Yes | [fluid_inference_cuda90.zip](https://paddleseg.bj.bcebos.com/fluid_inference_cuda9_cudnn7.zip) | | CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) |
| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) |
解压后`D:\projects\fluid_inference`目录包含内容为: 解压后`D:\projects\fluid_inference`目录包含内容为:
``` ```
...@@ -57,34 +58,51 @@ fluid_inference ...@@ -57,34 +58,51 @@ fluid_inference
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
``` ```
* CMAKE编译工程 三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
* PADDLE_DIR: fluid_inference预测库路径
* CUDA_LIB: CUDA动态库目录, 请根据实际安装情况调整
* OPENCV_DIR: OpenCV解压目录
| 参数名 | 含义 |
| ---- | ---- |
| *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 |
在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径:
```bash
# 切换到预测库所在目录
cd /d D:\projects\PaddleDetection\inference
# 创建构建目录, 重新构建只需要删除该目录即可
mkdir build
cd build
# cmake构建VS项目
D:\projects\PaddleDetection\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v9.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64
``` ```
在使用**CPU版本**预测库进行编译时,可执行下列操作:
```bash
# 切换到预测库所在目录 # 切换到预测库所在目录
cd /d D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference cd /d D:\projects\PaddleDetection\inference
# 创建构建目录, 重新构建只需要删除该目录即可 # 创建构建目录, 重新构建只需要删除该目录即可
mkdir build mkdir build
cd build cd build
# cmake构建VS项目 # cmake构建VS项目
D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v9.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64 D:\projects\PaddleDetection\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=OFF -DPADDLE_DIR=D:\projects\fluid_inference -DOPENCV_DIR=D:\projects\opencv -T host=x64
``` ```
这里的`cmake`参数`-G`, 表示生成对应的VS版本的工程,可以根据自己的`VS`版本调整,具体请参考[cmake文档](https://cmake.org/cmake/help/v3.15/manual/cmake-generators.7.html) 这里的`cmake`参数`-G`, 表示生成对应的VS版本的工程,可以根据自己的`VS`版本调整,具体请参考[cmake文档](https://cmake.org/cmake/help/v3.15/manual/cmake-generators.7.html)
* 生成可执行文件 * 生成可执行文件
``` ```
D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build> msbuild /m /p:Configuration=Release cpp_inference_demo.sln D:\projects\PaddleDetection\inference\build> msbuild /m /p:Configuration=Release cpp_inference_demo.sln
``` ```
### Step5: 预测及可视化 ### Step5: 预测及可视化
上述`Visual Studio 2015`编译产出的可执行文件在`build\release`目录下,切换到该目录: 上述`Visual Studio 2015`编译产出的可执行文件在`build\release`目录下,切换到该目录:
``` ```
cd /d D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build\release cd /d D:\projects\PaddleDetection\inference\build\release
``` ```
之后执行命令: 之后执行命令:
......
...@@ -6,7 +6,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com ...@@ -6,7 +6,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com
## 前置条件 ## 前置条件
* Visual Studio 2019 * Visual Studio 2019
* CUDA 8.0/ CUDA 9.0 * CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+ * CMake 3.0+
请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。 请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。
...@@ -15,20 +15,21 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com ...@@ -15,20 +15,21 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com
### Step1: 下载代码 ### Step1: 下载代码
1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/models/archive/develop.zip) 1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleDetection/archive/master.zip)
2. 解压,解压后目录重命名为`paddle_models` 2. 解压,解压后目录重命名为`PaddleDetection`
以下代码目录路径为`D:\projects\paddle_models` 为例。 以下代码目录路径为`D:\projects\PaddleDetection` 为例。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录 PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接:
| CUDA | GPU | 下载地址 | | 版本 | 链接 |
|------|------|--------| | ---- | ---- |
| 8.0 | Yes | [fluid_inference.zip](https://bj.bcebos.com/v1/paddleseg/fluid_inference_win.zip) | | CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) |
| 9.0 | Yes | [fluid_inference_cuda90.zip](https://paddleseg.bj.bcebos.com/fluid_inference_cuda9_cudnn7.zip) | | CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) |
| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) |
解压后`D:\projects\fluid_inference`目录包含内容为: 解压后`D:\projects\fluid_inference`目录包含内容为:
``` ```
...@@ -39,7 +40,6 @@ fluid_inference ...@@ -39,7 +40,6 @@ fluid_inference
| |
└── version.txt # 版本和编译信息 └── version.txt # 版本和编译信息
``` ```
**注意:** `CUDA90`版本解压后目录名称为`fluid_inference_cuda90`。
### Step3: 安装配置OpenCV ### Step3: 安装配置OpenCV
...@@ -67,16 +67,17 @@ fluid_inference ...@@ -67,16 +67,17 @@ fluid_inference
4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径 4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径
![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png) 三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
三个编译参数的含义说明如下:
| 参数名 | 含义 | | 参数名 | 含义 |
| ---- | ---- | | ---- | ---- |
| CUDA_LIB | cuda的库路径 | | *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, | | OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 | | PADDLE_DIR | Paddle预测库的路径 |
**注意**在使用CPU版本预测库时,需要把CUDA_LIB的勾去掉。
![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png)
**设置完成后**, 点击上图中`保存并生成CMake缓存以加载变量`。 **设置完成后**, 点击上图中`保存并生成CMake缓存以加载变量`。
5. 点击`生成`->`全部生成` 5. 点击`生成`->`全部生成`
...@@ -89,7 +90,7 @@ fluid_inference ...@@ -89,7 +90,7 @@ fluid_inference
上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录: 上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
``` ```
cd D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build\x64-Release cd D:\projects\PaddleDetection\inference\out\build\x64-Release
``` ```
之后执行命令: 之后执行命令:
......
...@@ -14,23 +14,24 @@ ...@@ -14,23 +14,24 @@
#pragma once #pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <paddle_inference_api.h>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <thread> #include <thread>
#include <chrono> #include <chrono>
#include <algorithm> #include <algorithm>
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <paddle_inference_api.h>
#include <utils/conf_parser.h> #include "utils/conf_parser.h"
#include <utils/utils.h> #include "utils/utils.h"
#include <preprocessor/preprocessor.h> #include "preprocessor/preprocessor.h"
namespace PaddleSolution { namespace PaddleSolution {
class DetectionPredictor { class DetectionPredictor {
public: public:
// init a predictor with a yaml config file // init a predictor with a yaml config file
int init(const std::string& conf); int init(const std::string& conf);
...@@ -48,5 +49,5 @@ namespace PaddleSolution { ...@@ -48,5 +49,5 @@ namespace PaddleSolution {
PaddleSolution::PaddleModelConfigPaser _model_config; PaddleSolution::PaddleModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor; std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor; std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
}; };
} } // namespace PaddleSolution
...@@ -16,14 +16,18 @@ ...@@ -16,14 +16,18 @@
#include "preprocessor.h" #include "preprocessor.h"
#include "preprocessor_detection.h" #include "preprocessor_detection.h"
#include <iostream>
namespace PaddleSolution { namespace PaddleSolution {
std::shared_ptr<ImagePreProcessor> create_processor(const std::string& conf_file) { std::shared_ptr<ImagePreProcessor> create_processor(const std::string& conf_file) {
auto config = std::make_shared<PaddleSolution::PaddleModelConfigPaser>(); auto config = std::make_shared<PaddleSolution::PaddleModelConfigPaser>();
if (!config->load_config(conf_file)) { if (!config->load_config(conf_file)) {
LOG(FATAL) << "fail to laod conf file [" << conf_file << "]"; #ifdef _WIN32
std::cerr << "fail to load conf file [" << conf_file << "]" << std::endl;
#else
LOG(FATAL) << "fail to load conf file [" << conf_file << "]";
#endif
return nullptr; return nullptr;
} }
...@@ -34,10 +38,13 @@ namespace PaddleSolution { ...@@ -34,10 +38,13 @@ namespace PaddleSolution {
} }
return p; return p;
} }
#ifdef _WIN32
std::cerr << "unknown processor_name [" << config->_pre_processor << "],"
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "]"; << "please check whether PRE_PROCESSOR is set correctly" << std::endl;
#else
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "],"
<< "please check whether PRE_PROCESSOR is set correctly";
#endif
return nullptr; return nullptr;
} }
} } // namespace PaddleSolution
...@@ -26,17 +26,23 @@ ...@@ -26,17 +26,23 @@
namespace PaddleSolution { namespace PaddleSolution {
class ImagePreProcessor { class ImagePreProcessor {
protected: protected:
ImagePreProcessor() {}; ImagePreProcessor() {}
public: public:
virtual ~ImagePreProcessor() {} virtual ~ImagePreProcessor() {}
virtual bool single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) { virtual bool single_process(const std::string& fname,
float* data,
int* ori_w,
int* ori_h) {
return true; return true;
} }
virtual bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) { virtual bool batch_process(const std::vector<std::string>& imgs,
float* data,
int* ori_w,
int* ori_h) {
return true; return true;
} }
...@@ -44,21 +50,28 @@ public: ...@@ -44,21 +50,28 @@ public:
return true; return true;
} }
virtual bool batch_process(const std::vector<std::string>& imgs, float* data) { virtual bool batch_process(const std::vector<std::string>& imgs,
float* data) {
return true; return true;
} }
virtual bool single_process(const std::string& fname, std::vector<float> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) { virtual bool single_process(const std::string& fname,
std::vector<float> &data,
int* ori_w, int* ori_h,
int* resize_w, int* resize_h,
float* scale_ratio) {
return true; return true;
} }
virtual bool batch_process(const std::vector<std::string>& imgs, std::vector<std::vector<float>> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) { virtual bool batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio) {
return true; return true;
} }
}; // end of class ImagePreProcessor }; // end of class ImagePreProcessor
std::shared_ptr<ImagePreProcessor> create_processor(const std::string &config_file); std::shared_ptr<ImagePreProcessor>
create_processor(const std::string &config_file);
} // end of namespace paddle_solution } // namespace PaddleSolution
...@@ -12,59 +12,70 @@ ...@@ -12,59 +12,70 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <glog/logging.h>
#include <thread> #include <thread>
#include <mutex> #include <mutex>
#include <glog/logging.h>
#include "preprocessor_detection.h" #include "preprocessor_detection.h"
#include "utils/utils.h" #include "utils/utils.h"
namespace PaddleSolution { namespace PaddleSolution {
bool DetectionPreProcessor::single_process(const std::string& fname, std::vector<float> &vec_data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) { bool DetectionPreProcessor::single_process(const std::string& fname,
std::vector<float> &vec_data,
int* ori_w, int* ori_h,
int* resize_w, int* resize_h,
float* scale_ratio) {
cv::Mat im1 = cv::imread(fname, -1); cv::Mat im1 = cv::imread(fname, -1);
cv::Mat im; cv::Mat im;
if(_config->_feeds_size == 3) { // faster rcnn if (_config->_feeds_size == 3) { // faster rcnn
im1.convertTo(im, CV_32FC3, 1/255.0); im1.convertTo(im, CV_32FC3, 1/255.0);
} } else if (_config->_feeds_size == 2) { // yolo v3
else if(_config->_feeds_size == 2){ //yolo v3
im = im1; im = im1;
} }
if (im.data == nullptr || im.empty()) { if (im.data == nullptr || im.empty()) {
#ifdef _WIN32
std::cerr << "Failed to open image: " << fname << std::endl;
#else
LOG(ERROR) << "Failed to open image: " << fname; LOG(ERROR) << "Failed to open image: " << fname;
#endif
return false; return false;
} }
int channels = im.channels(); int channels = im.channels();
if (channels == 1) { if (channels == 1) {
cv::cvtColor(im, im, cv::COLOR_GRAY2BGR); cv::cvtColor(im, im, cv::COLOR_GRAY2BGR);
} }
channels = im.channels(); channels = im.channels();
if (channels != 3 && channels != 4) { if (channels != 3 && channels != 4) {
#ifdef _WIN32
std::cerr << "Only support rgb(gray) and rgba image." << std::endl;
#else
LOG(ERROR) << "Only support rgb(gray) and rgba image."; LOG(ERROR) << "Only support rgb(gray) and rgba image.";
#endif
return false; return false;
} }
*ori_w = im.cols; *ori_w = im.cols;
*ori_h = im.rows; *ori_h = im.rows;
cv::cvtColor(im, im, cv::COLOR_BGR2RGB); cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
//channels = im.channels(); // channels = im.channels();
//resize // resize
int rw = im.cols; int rw = im.cols;
int rh = im.rows; int rh = im.rows;
float im_scale_ratio; float im_scale_ratio;
utils::scaling(_config->_resize_type, rw, rh, _config->_resize[0], _config->_resize[1], _config->_target_short_size, _config->_resize_max_size, im_scale_ratio); utils::scaling(_config->_resize_type, rw, rh, _config->_resize[0],
_config->_resize[1], _config->_target_short_size,
_config->_resize_max_size, im_scale_ratio);
cv::Size resize_size(rw, rh); cv::Size resize_size(rw, rh);
*resize_w = rw; *resize_w = rw;
*resize_h = rh; *resize_h = rh;
*scale_ratio = im_scale_ratio; *scale_ratio = im_scale_ratio;
if (*ori_h != rh || *ori_w != rw) { if (*ori_h != rh || *ori_w != rw) {
cv::Mat im_temp; cv::Mat im_temp;
if(_config->_resize_type == utils::SCALE_TYPE::UNPADDING) { if (_config->_resize_type == utils::SCALE_TYPE::UNPADDING) {
cv::resize(im, im_temp, resize_size, 0, 0, cv::INTER_LINEAR); cv::resize(im, im_temp, resize_size, 0, 0, cv::INTER_LINEAR);
} } else if (_config->_resize_type == utils::SCALE_TYPE::RANGE_SCALING) {
else if(_config->_resize_type == utils::SCALE_TYPE::RANGE_SCALING) { cv::resize(im, im_temp, cv::Size(), im_scale_ratio,
cv::resize(im, im_temp, cv::Size(), im_scale_ratio, im_scale_ratio, cv::INTER_LINEAR); im_scale_ratio, cv::INTER_LINEAR);
} }
im = im_temp; im = im_temp;
} }
...@@ -81,11 +92,10 @@ namespace PaddleSolution { ...@@ -81,11 +92,10 @@ namespace PaddleSolution {
for (int w = 0; w < rw; ++w) { for (int w = 0; w < rw; ++w) {
for (int c = 0; c < channels; ++c) { for (int c = 0; c < channels; ++c) {
int top_index = (c * rh + h) * rw + w; int top_index = (c * rh + h) * rw + w;
float pixel;// = static_cast<float>(fptr[im_index]);// / 255.0; float pixel;
if(_config->_feeds_size == 2){ //yolo v3 if (_config->_feeds_size == 2) { // yolo v3
pixel = static_cast<float>(uptr[im_index++]) / 255.0; pixel = static_cast<float>(uptr[im_index++]) / 255.0;
} } else if (_config->_feeds_size == 3) {
else if(_config->_feeds_size == 3){
pixel = fptr[im_index++]; pixel = fptr[im_index++];
} }
pixel = (pixel - pmean[c]) / pscale[c]; pixel = (pixel - pmean[c]) / pscale[c];
...@@ -94,9 +104,12 @@ namespace PaddleSolution { ...@@ -94,9 +104,12 @@ namespace PaddleSolution {
} }
} }
return true; return true;
} }
bool DetectionPreProcessor::batch_process(const std::vector<std::string>& imgs, std::vector<std::vector<float>> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) { bool DetectionPreProcessor::batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio) {
auto ic = _config->_channels; auto ic = _config->_channels;
auto iw = _config->_resize[0]; auto iw = _config->_resize[0];
auto ih = _config->_resize[1]; auto ih = _config->_resize[1];
...@@ -108,9 +121,11 @@ namespace PaddleSolution { ...@@ -108,9 +121,11 @@ namespace PaddleSolution {
int* resize_width = &resize_w[i]; int* resize_width = &resize_w[i];
int* resize_height = &resize_h[i]; int* resize_height = &resize_h[i];
float* sr = &scale_ratio[i]; float* sr = &scale_ratio[i];
threads.emplace_back([this, &data, i, path, width, height, resize_width, resize_height, sr] { threads.emplace_back([this, &data, i, path, width, height,
resize_width, resize_height, sr] {
std::vector<float> buffer; std::vector<float> buffer;
single_process(path, buffer, width, height, resize_width, resize_height, sr); single_process(path, buffer, width, height, resize_width,
resize_height, sr);
data[i] = buffer; data[i] = buffer;
}); });
} }
...@@ -120,11 +135,10 @@ namespace PaddleSolution { ...@@ -120,11 +135,10 @@ namespace PaddleSolution {
} }
} }
return true; return true;
} }
bool DetectionPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config) { bool DetectionPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config) {
_config = config; _config = config;
return true; return true;
}
} }
} // namespace PaddleSolution
...@@ -18,19 +18,23 @@ ...@@ -18,19 +18,23 @@
namespace PaddleSolution { namespace PaddleSolution {
class DetectionPreProcessor : public ImagePreProcessor { class DetectionPreProcessor : public ImagePreProcessor {
public: public:
DetectionPreProcessor() : _config(nullptr) { DetectionPreProcessor() : _config(nullptr) {
}; }
bool init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config); bool init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config);
bool single_process(const std::string& fname, std::vector<float> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio); bool single_process(const std::string& fname, std::vector<float> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio);
bool batch_process(const std::vector<std::string>& imgs, std::vector<std::vector<float>> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio); bool batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio);
private: private:
std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> _config; std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> _config;
}; };
} } // namespace PaddleSolution
...@@ -134,7 +134,8 @@ _DETECTIONBOX = _descriptor.Descriptor( ...@@ -134,7 +134,8 @@ _DETECTIONBOX = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[], oneofs=[],
serialized_start=43, serialized_start=43,
serialized_end=175, ) serialized_end=175,
)
_DETECTIONRESULT = _descriptor.Descriptor( _DETECTIONRESULT = _descriptor.Descriptor(
name='DetectionResult', name='DetectionResult',
...@@ -185,7 +186,8 @@ _DETECTIONRESULT = _descriptor.Descriptor( ...@@ -185,7 +186,8 @@ _DETECTIONRESULT = _descriptor.Descriptor(
extension_ranges=[], extension_ranges=[],
oneofs=[], oneofs=[],
serialized_start=177, serialized_start=177,
serialized_end=267, ) serialized_end=267,
)
_DETECTIONRESULT.fields_by_name['detection_boxes'].message_type = _DETECTIONBOX _DETECTIONRESULT.fields_by_name['detection_boxes'].message_type = _DETECTIONBOX
DESCRIPTOR.message_types_by_name['DetectionBox'] = _DETECTIONBOX DESCRIPTOR.message_types_by_name['DetectionBox'] = _DETECTIONBOX
...@@ -193,9 +195,8 @@ DESCRIPTOR.message_types_by_name['DetectionResult'] = _DETECTIONRESULT ...@@ -193,9 +195,8 @@ DESCRIPTOR.message_types_by_name['DetectionResult'] = _DETECTIONRESULT
DetectionBox = _reflection.GeneratedProtocolMessageType( DetectionBox = _reflection.GeneratedProtocolMessageType(
'DetectionBox', 'DetectionBox',
(_message.Message, ), (_message.Message,),
dict( dict(DESCRIPTOR=_DETECTIONBOX,
DESCRIPTOR=_DETECTIONBOX,
__module__='detection_result_pb2' __module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox) # @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox)
)) ))
...@@ -203,9 +204,8 @@ _sym_db.RegisterMessage(DetectionBox) ...@@ -203,9 +204,8 @@ _sym_db.RegisterMessage(DetectionBox)
DetectionResult = _reflection.GeneratedProtocolMessageType( DetectionResult = _reflection.GeneratedProtocolMessageType(
'DetectionResult', 'DetectionResult',
(_message.Message, ), (_message.Message,),
dict( dict(DESCRIPTOR=_DETECTIONRESULT,
DESCRIPTOR=_DETECTIONRESULT,
__module__='detection_result_pb2' __module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult) # @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult)
)) ))
......
...@@ -85,8 +85,8 @@ if __name__ == "__main__": ...@@ -85,8 +85,8 @@ if __name__ == "__main__":
for box in detection_result.detection_boxes: for box in detection_result.detection_boxes:
if box.score >= Flags.threshold: if box.score >= Flags.threshold:
box_class = getattr(box, 'class') box_class = getattr(box, 'class')
text_class_score_str = "%s %.2f" % ( text_class_score_str = "%s %.2f" % (class2LabelMap.get(
class2LabelMap.get(str(box_class)), box.score) str(box_class)), box.score)
text_point = (int(box.left_top_x), int(box.left_top_y)) text_point = (int(box.left_top_x), int(box.left_top_y))
ptLeftTop = (int(box.left_top_x), int(box.left_top_y)) ptLeftTop = (int(box.left_top_x), int(box.left_top_y))
...@@ -106,8 +106,8 @@ if __name__ == "__main__": ...@@ -106,8 +106,8 @@ if __name__ == "__main__":
text_box_left_top = (text_point[0], text_box_left_top = (text_point[0],
text_point[1] - text_size[0][1]) text_point[1] - text_size[0][1])
text_box_right_bottom = ( text_box_right_bottom = (text_point[0] +
text_point[0] + text_size[0][0], text_point[1]) text_size[0][0], text_point[1])
cv2.rectangle(img, text_box_left_top, cv2.rectangle(img, text_box_left_top,
text_box_right_bottom, color, -1, 8) text_box_right_bottom, color, -1, 8)
......
...@@ -13,16 +13,17 @@ ...@@ -13,16 +13,17 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <yaml-cpp/yaml.h>
#include <iostream> #include <iostream>
#include <vector> #include <vector>
#include <string> #include <string>
#include <map> #include <map>
#include <yaml-cpp/yaml.h>
namespace PaddleSolution { namespace PaddleSolution {
class PaddleModelConfigPaser { class PaddleModelConfigPaser {
std::map<std::string, int> _scaling_map; std::map<std::string, int> _scaling_map;
public: public:
PaddleModelConfigPaser() PaddleModelConfigPaser()
:_class_num(0), :_class_num(0),
...@@ -33,13 +34,11 @@ namespace PaddleSolution { ...@@ -33,13 +34,11 @@ namespace PaddleSolution {
_model_file_name("__model__"), _model_file_name("__model__"),
_param_file_name("__params__"), _param_file_name("__params__"),
_scaling_map{{"UNPADDING", 0}, _scaling_map{{"UNPADDING", 0},
{"RANGE_SCALING",1}}, {"RANGE_SCALING", 1}},
_feeds_size(1), _feeds_size(1),
_coarsest_stride(1) _coarsest_stride(1) {}
{
} ~PaddleModelConfigPaser() {}
~PaddleModelConfigPaser() {
}
void reset() { void reset() {
_crop_size.clear(); _crop_size.clear();
...@@ -54,7 +53,7 @@ namespace PaddleSolution { ...@@ -54,7 +53,7 @@ namespace PaddleSolution {
_batch_size = 1; _batch_size = 1;
_model_file_name = "__model__"; _model_file_name = "__model__";
_model_path = "./"; _model_path = "./";
_param_file_name="__params__"; _param_file_name = "__params__";
_resize_type = 0; _resize_type = 0;
_resize_max_size = 0; _resize_max_size = 0;
_feeds_size = 1; _feeds_size = 1;
...@@ -84,83 +83,143 @@ namespace PaddleSolution { ...@@ -84,83 +83,143 @@ namespace PaddleSolution {
} }
bool load_config(const std::string& conf_file) { bool load_config(const std::string& conf_file) {
reset(); reset();
YAML::Node config;
YAML::Node config = YAML::LoadFile(conf_file); try {
config = YAML::LoadFile(conf_file);
} catch(...) {
return false;
}
// 1. get resize // 1. get resize
if (config["DEPLOY"]["EVAL_CROP_SIZE"].IsDefined()) {
auto str = config["DEPLOY"]["EVAL_CROP_SIZE"].as<std::string>(); auto str = config["DEPLOY"]["EVAL_CROP_SIZE"].as<std::string>();
_resize = parse_str_to_vec<int>(process_parenthesis(str)); _resize = parse_str_to_vec<int>(process_parenthesis(str));
} else {
std::cerr << "Please set EVAL_CROP_SIZE: (xx, xx)" << std::endl;
return false;
}
// 0. get crop_size // 0. get crop_size
if(config["DEPLOY"]["CROP_SIZE"].IsDefined()) { if (config["DEPLOY"]["CROP_SIZE"].IsDefined()) {
auto crop_str = config["DEPLOY"]["CROP_SIZE"].as<std::string>(); auto crop_str = config["DEPLOY"]["CROP_SIZE"].as<std::string>();
_crop_size = parse_str_to_vec<int>(process_parenthesis(crop_str)); _crop_size = parse_str_to_vec<int>(process_parenthesis(crop_str));
} } else {
else {
_crop_size = _resize; _crop_size = _resize;
} }
// 2. get mean // 2. get mean
if (config["DEPLOY"]["MEAN"].IsDefined()) {
for (const auto& item : config["DEPLOY"]["MEAN"]) { for (const auto& item : config["DEPLOY"]["MEAN"]) {
_mean.push_back(item.as<float>()); _mean.push_back(item.as<float>());
} }
} else {
std::cerr << "Please set MEAN: [xx, xx, xx]" << std::endl;
return false;
}
// 3. get std // 3. get std
if(config["DEPLOY"]["STD"].IsDefined()) {
for (const auto& item : config["DEPLOY"]["STD"]) { for (const auto& item : config["DEPLOY"]["STD"]) {
_std.push_back(item.as<float>()); _std.push_back(item.as<float>());
} }
} else {
std::cerr << "Please set STD: [xx, xx, xx]" << std::endl;
return false;
}
// 4. get image type // 4. get image type
if (config["DEPLOY"]["IMAGE_TYPE"].IsDefined()) {
_img_type = config["DEPLOY"]["IMAGE_TYPE"].as<std::string>(); _img_type = config["DEPLOY"]["IMAGE_TYPE"].as<std::string>();
} else {
std::cerr << "Please set IMAGE_TYPE: \"rgb\" or \"rgba\"" << std::endl;
return false;
}
// 5. get class number // 5. get class number
if (config["DEPLOY"]["NUM_CLASSES"].IsDefined()) {
_class_num = config["DEPLOY"]["NUM_CLASSES"].as<int>(); _class_num = config["DEPLOY"]["NUM_CLASSES"].as<int>();
} else {
std::cerr << "Please set NUM_CLASSES: x" << std::endl;
return false;
}
// 7. set model path // 7. set model path
if (config["DEPLOY"]["MODEL_PATH"].IsDefined()) {
_model_path = config["DEPLOY"]["MODEL_PATH"].as<std::string>(); _model_path = config["DEPLOY"]["MODEL_PATH"].as<std::string>();
} else {
std::cerr << "Please set MODEL_PATH: \"/path/to/model_dir\"" << std::endl;
return false;
}
// 8. get model file_name // 8. get model file_name
if (config["DEPLOY"]["MODEL_FILENAME"].IsDefined()) {
_model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as<std::string>(); _model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as<std::string>();
} else {
_model_file_name = "__model__";
}
// 9. get model param file name // 9. get model param file name
_param_file_name = config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>(); if (config["DEPLOY"]["PARAMS_FILENAME"].IsDefined()) {
_param_file_name
= config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>();
} else {
_param_file_name = "__params__";
}
// 10. get pre_processor // 10. get pre_processor
if (config["DEPLOY"]["PRE_PROCESSOR"].IsDefined()) {
_pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as<std::string>(); _pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as<std::string>();
} else {
std::cerr << "Please set PRE_PROCESSOR: \"DetectionPreProcessor\"" << std::endl;
return false;
}
// 11. use_gpu // 11. use_gpu
if (config["DEPLOY"]["USE_GPU"].IsDefined()) {
_use_gpu = config["DEPLOY"]["USE_GPU"].as<int>(); _use_gpu = config["DEPLOY"]["USE_GPU"].as<int>();
} else {
_use_gpu = 0;
}
// 12. predictor_mode // 12. predictor_mode
if (config["DEPLOY"]["PREDICTOR_MODE"].IsDefined()) {
_predictor_mode = config["DEPLOY"]["PREDICTOR_MODE"].as<std::string>(); _predictor_mode = config["DEPLOY"]["PREDICTOR_MODE"].as<std::string>();
} else {
std::cerr << "Please set PREDICTOR_MODE: \"NATIVE\" or \"ANALYSIS\"" << std::endl;
return false;
}
// 13. batch_size // 13. batch_size
if (config["DEPLOY"]["BATCH_SIZE"].IsDefined()) {
_batch_size = config["DEPLOY"]["BATCH_SIZE"].as<int>(); _batch_size = config["DEPLOY"]["BATCH_SIZE"].as<int>();
} else {
_batch_size = 1;
}
// 14. channels // 14. channels
if (config["DEPLOY"]["CHANNELS"].IsDefined()) {
_channels = config["DEPLOY"]["CHANNELS"].as<int>(); _channels = config["DEPLOY"]["CHANNELS"].as<int>();
} else {
std::cerr << "Please set CHANNELS: x" << std::endl;
return false;
}
// 15. target_short_size // 15. target_short_size
if(config["DEPLOY"]["TARGET_SHORT_SIZE"].IsDefined()) { if (config["DEPLOY"]["TARGET_SHORT_SIZE"].IsDefined()) {
_target_short_size = config["DEPLOY"]["TARGET_SHORT_SIZE"].as<int>(); _target_short_size = config["DEPLOY"]["TARGET_SHORT_SIZE"].as<int>();
} }
// 16.resize_type // 16.resize_type
if(config["DEPLOY"]["RESIZE_TYPE"].IsDefined() && if (config["DEPLOY"]["RESIZE_TYPE"].IsDefined() &&
_scaling_map.find(config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()) != _scaling_map.end()) { _scaling_map.find(config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()) != _scaling_map.end()) {
_resize_type = _scaling_map[config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()]; _resize_type = _scaling_map[config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()];
} } else {
else{
_resize_type = 0; _resize_type = 0;
} }
// 17.resize_max_size // 17.resize_max_size
if(config["DEPLOY"]["RESIZE_MAX_SIZE"].IsDefined()) { if (config["DEPLOY"]["RESIZE_MAX_SIZE"].IsDefined()) {
_resize_max_size = config["DEPLOY"]["RESIZE_MAX_SIZE"].as<int>(); _resize_max_size = config["DEPLOY"]["RESIZE_MAX_SIZE"].as<int>();
} }
// 18.feeds_size // 18.feeds_size
if(config["DEPLOY"]["FEEDS_SIZE"].IsDefined()){ if (config["DEPLOY"]["FEEDS_SIZE"].IsDefined()) {
_feeds_size = config["DEPLOY"]["FEEDS_SIZE"].as<int>(); _feeds_size = config["DEPLOY"]["FEEDS_SIZE"].as<int>();
} }
// 19. coarsest_stride // 19. coarsest_stride
if(config["DEPLOY"]["COARSEST_STRIDE"].IsDefined()) { if (config["DEPLOY"]["COARSEST_STRIDE"].IsDefined()) {
_coarsest_stride = config["DEPLOY"]["COARSEST_STRIDE"].as<int>(); _coarsest_stride = config["DEPLOY"]["COARSEST_STRIDE"].as<int>();
} }
return true; return true;
} }
void debug() const { void debug() const {
std::cout << "SCALE_RESIZE: (" << _resize[0] << ", "
std::cout << "SCALE_RESIZE: (" << _resize[0] << ", " << _resize[1] << ")" << std::endl; << _resize[1] << ")" << std::endl;
std::cout << "MEAN: ["; std::cout << "MEAN: [";
for (int i = 0; i < _mean.size(); ++i) { for (int i = 0; i < _mean.size(); ++i) {
...@@ -176,25 +235,27 @@ namespace PaddleSolution { ...@@ -176,25 +235,27 @@ namespace PaddleSolution {
for (int i = 0; i < _std.size(); ++i) { for (int i = 0; i < _std.size(); ++i) {
if (i != _std.size() - 1) { if (i != _std.size() - 1) {
std::cout << _std[i] << ", "; std::cout << _std[i] << ", ";
} } else {
else {
std::cout << _std[i]; std::cout << _std[i];
} }
} }
std::cout << "]" << std::endl; std::cout << "]" << std::endl;
std::cout << "DEPLOY.TARGET_SHORT_SIZE: " << _target_short_size << std::endl; std::cout << "DEPLOY.TARGET_SHORT_SIZE: " << _target_short_size
<< std::endl;
std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl; std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl;
std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl; std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl;
std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl; std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl;
std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl; std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl;
std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name << std::endl; std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name
std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name << std::endl; << std::endl;
std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name
<< std::endl;
std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl; std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl;
std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl; std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl;
std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl; std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl;
std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl; std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl;
} }
//DEPLOY.COARSEST_STRIDE // DEPLOY.COARSEST_STRIDE
int _coarsest_stride; int _coarsest_stride;
// DEPLOY.FEEDS_SIZE // DEPLOY.FEEDS_SIZE
int _feeds_size; int _feeds_size;
...@@ -232,6 +293,5 @@ namespace PaddleSolution { ...@@ -232,6 +293,5 @@ namespace PaddleSolution {
std::string _predictor_mode; std::string _predictor_mode;
// DEPLOY.BATCH_SIZE // DEPLOY.BATCH_SIZE
int _batch_size; int _batch_size;
}; };
} // namespace PaddleSolution
}
...@@ -28,22 +28,24 @@ ...@@ -28,22 +28,24 @@
#endif #endif
namespace PaddleSolution { namespace PaddleSolution {
namespace utils { namespace utils {
enum SCALE_TYPE{
enum SCALE_TYPE{
UNPADDING, UNPADDING,
RANGE_SCALING RANGE_SCALING
}; };
inline std::string path_join(const std::string& dir, const std::string& path) {
inline std::string path_join(const std::string& dir, const std::string& path) {
std::string seperator = "/"; std::string seperator = "/";
#ifdef _WIN32 #ifdef _WIN32
seperator = "\\"; seperator = "\\";
#endif #endif
return dir + seperator + path; return dir + seperator + path;
} }
#ifndef _WIN32 #ifndef _WIN32
// scan a directory and get all files with input extensions // scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts) inline std::vector<std::string> get_directory_images(const std::string& path,
{ const std::string& exts) {
std::vector<std::string> imgs; std::vector<std::string> imgs;
struct dirent *entry; struct dirent *entry;
DIR *dir = opendir(path.c_str()); DIR *dir = opendir(path.c_str());
...@@ -51,7 +53,6 @@ namespace PaddleSolution { ...@@ -51,7 +53,6 @@ namespace PaddleSolution {
closedir(dir); closedir(dir);
return imgs; return imgs;
} }
while ((entry = readdir(dir)) != NULL) { while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name; std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.'); auto ext = strrchr(entry->d_name, '.');
...@@ -64,13 +65,14 @@ namespace PaddleSolution { ...@@ -64,13 +65,14 @@ namespace PaddleSolution {
} }
sort(imgs.begin(), imgs.end()); sort(imgs.begin(), imgs.end());
return imgs; return imgs;
} }
#else #else
// scan a directory and get all files with input extensions // scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts) inline std::vector<std::string> get_directory_images(const std::string& path,
{ const std::string& exts) {
std::vector<std::string> imgs; std::vector<std::string> imgs;
for (const auto& item : std::experimental::filesystem::directory_iterator(path)) { for (const auto& item :
std::experimental::filesystem::directory_iterator(path)) {
auto suffix = item.path().extension().string(); auto suffix = item.path().extension().string();
if (exts.find(suffix) != std::string::npos && suffix.size() > 0) { if (exts.find(suffix) != std::string::npos && suffix.size() > 0) {
auto fullname = path_join(path, item.path().filename().string()); auto fullname = path_join(path, item.path().filename().string());
...@@ -79,30 +81,32 @@ namespace PaddleSolution { ...@@ -79,30 +81,32 @@ namespace PaddleSolution {
} }
sort(imgs.begin(), imgs.end()); sort(imgs.begin(), imgs.end());
return imgs; return imgs;
} }
#endif #endif
inline int scaling(int resize_type, int &w, int &h, int new_w, int new_h, int target_size, int max_size, float &im_scale_ratio) inline int scaling(int resize_type, int &w, int &h, int new_w, int new_h,
{ int target_size, int max_size, float &im_scale_ratio) {
if(w <= 0 || h <= 0 || new_w <= 0 || new_h <= 0){ if (w <= 0 || h <= 0 || new_w <= 0 || new_h <= 0) {
return -1; return -1;
} }
switch(resize_type) { switch (resize_type) {
case SCALE_TYPE::UNPADDING: case SCALE_TYPE::UNPADDING:
{ {
w = new_w; w = new_w;
h = new_h; h = new_h;
im_scale_ratio=0; im_scale_ratio = 0;
} }
break; break;
case SCALE_TYPE::RANGE_SCALING: case SCALE_TYPE::RANGE_SCALING:
{ {
int im_max_size = std::max(w, h); int im_max_size = std::max(w, h);
int im_min_size = std::min(w, h); int im_min_size = std::min(w, h);
float scale_ratio= static_cast<float>(target_size) / static_cast<float>(im_min_size); float scale_ratio = static_cast<float>(target_size)
if(max_size > 0) { / static_cast<float>(im_min_size);
if(round(scale_ratio * im_max_size) > max_size) { if (max_size > 0) {
scale_ratio = static_cast<float>(max_size) / static_cast<float>(im_max_size); if (round(scale_ratio * im_max_size) > max_size) {
scale_ratio = static_cast<float>(max_size)
/ static_cast<float>(im_max_size);
} }
} }
w = round(scale_ratio * static_cast<float>(w)); w = round(scale_ratio * static_cast<float>(w));
...@@ -112,13 +116,15 @@ namespace PaddleSolution { ...@@ -112,13 +116,15 @@ namespace PaddleSolution {
break; break;
default : default :
{ {
std::cout << "Can't support this type of scaling strategy." << std::endl; std::cout << "Can't support this type of scaling strategy."
std::cout << "Throw exception at file " << __FILE__ << " on line " << __LINE__ << std::endl; << std::endl;
std::cout << "Throw exception at file " << __FILE__
<< " on line " << __LINE__ << std::endl;
throw 0; throw 0;
} }
break; break;
} }
return 0; return 0;
}
}
} }
} // namespace utils
} // namespace PaddleSolution
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册