You need to sign in or sign up before continuing.
提交 75d4c0f6 编写于 作者: J Jack Zhou 提交者: qingqing01

Update CMakeLists.txt to adapt to the directory structure of win v1.6 inference lib (#20)

2. change code style
3. add error messages: display detail about input errors and config file errors
上级 df6cfd8d
cmake_minimum_required(VERSION 3.0)
project(cpp_inference_demo CXX C)
message("cmake module path: ${CMAKE_MODULE_PATH}")
message("cmake root path: ${CMAKE_ROOT}")
option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
......@@ -70,6 +69,7 @@ link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
link_directories("${CMAKE_CURRENT_BINARY_DIR}")
if (WIN32)
include_directories("${PADDLE_DIR}/paddle/fluid/inference")
include_directories("${PADDLE_DIR}/paddle/include")
link_directories("${PADDLE_DIR}/paddle/fluid/inference")
include_directories("${OPENCV_DIR}/build/include")
include_directories("${OPENCV_DIR}/opencv/build/include")
......@@ -134,6 +134,7 @@ if(WITH_MKL)
else ()
set(MATH_LIB ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
execute_process(COMMAND cp -r ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
endif ()
set(MKLDNN_PATH "${PADDLE_DIR}/third_party/install/mkldnn")
if(EXISTS ${MKLDNN_PATH})
......@@ -148,22 +149,22 @@ else()
set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
if(WIN32)
if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
if(WITH_STATIC_LIB)
if (WIN32)
set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else ()
set(DEPS
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
else()
if (WIN32)
set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else ()
set(DEPS
${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif()
if (NOT WIN32)
......@@ -242,8 +243,6 @@ if (NOT WIN32)
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libippicv${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
# message(${CMAKE_CXX_FLAGS})
# set(CMAKE_CXX_FLAGS "-g ${CMAKE_CXX_FLAGS}")
SET(PADDLESEG_INFERENCE_SRCS preprocessor/preprocessor.cpp
preprocessor/preprocessor_detection.cpp predictor/detection_predictor.cpp
......@@ -265,7 +264,7 @@ if (WIN32)
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
)
endif()
......
......@@ -65,7 +65,7 @@ deploy
完成编译后,便生成了需要的可执行文件和链接库。这里以我们基于`faster rcnn`检测模型为例,介绍部署图像检测模型的通用流程。
### 1. 下载模型文件
### 4.1. 下载模型文件
我们提供faster rcnn,faster rcnn+fpn模型用于预测coco17数据集,可在以下链接下载:[faster rcnn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50.zip)
[faster rcnn + fpn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50_fpn.zip)
......@@ -83,7 +83,7 @@ faster_rcnn_pp50/
**假设**`Linux`上对应的路径则为`/root/projects/models/faster_rcnn_pp50/`
### 2. 修改配置
### 4.2. 修改配置
`inference`源代码(即本目录)的`conf`目录下提供了示例基于faster rcnn的配置文件`detection_rcnn.yaml`, 相关的字段含义和说明如下:
......@@ -118,7 +118,7 @@ DEPLOY:
# 预测模式,支持 NATIVE 和 ANALYSIS
PREDICTOR_MODE: "ANALYSIS"
# 每次预测的 batch_size
BATCH_SIZE : 3
BATCH_SIZE : 3
# 长边伸缩的最大长度,-1代表无限制。
RESIZE_MAX_SIZE: 1333
# 输入的tensor数量。
......@@ -127,7 +127,9 @@ DEPLOY:
```
修改字段`MODEL_PATH`的值为你在**上一步**下载并解压的模型文件所放置的目录即可。更多配置文件字段介绍,请参考文档[预测部署方案配置文件说明](./docs/configuration.md)
### 3. 执行预测
**注意**在使用CPU版本预测库时,`USE_GPU`的值必须设为0,否则无法正常预测。
### 4.3. 执行预测
在终端中切换到生成的可执行文件所在目录为当前目录(Windows系统为`cmd`)。
......@@ -155,7 +157,7 @@ DEPLOY:
运行可视化脚本时,只需输入命令行参数图片路径、检测结果pb文件路径、目标框阈值以及类别-标签映射文件路径即可得到可视化的图片`X.png` (tools目录下提供coco17的类别标签映射文件coco17.json)。
```bash
```bash
python vis.py --img_path=../build/images/detection_rcnn/000000087038.jpg --img_result_path=../build/images/detection_rcnn/000000087038.jpg.pb --threshold=0.1 --c2l_path=coco17.json
```
......@@ -168,3 +170,4 @@ python vis.py --img_path=../build/images/detection_rcnn/000000087038.jpg --img_r
```检测结果图:```
![检测结果](./demo_images/000000087038.jpg.png)
......@@ -13,6 +13,6 @@ DEPLOY:
CHANNELS : 3
PRE_PROCESSOR: "DetectionPreProcessor"
PREDICTOR_MODE: "ANALYSIS"
BATCH_SIZE : 3
BATCH_SIZE : 1
RESIZE_MAX_SIZE: 1333
FEEDS_SIZE: 3
......@@ -23,18 +23,24 @@ int main(int argc, char** argv) {
// 0. parse args
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) {
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model --input_dir=/directory/of/your/input/images";
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model "
<< "--input_dir=/directory/of/your/input/images" << std::endl;
return -1;
}
// 1. create a predictor and init it with conf
PaddleSolution::DetectionPredictor predictor;
if (predictor.init(FLAGS_conf) != 0) {
#ifdef _WIN32
std::cerr << "Fail to init predictor" << std::endl;
#else
LOG(FATAL) << "Fail to init predictor";
#endif
return -1;
}
// 2. get all the images with extension '.jpeg' at input_dir
auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir, ".jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG");
auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir,
".jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG");
// 3. predict
predictor.predict(imgs);
......
......@@ -16,7 +16,7 @@
```yaml
# 预测部署时所有配置字段需在DEPLOY字段下
DEPLOY:
DEPLOY:
# 类型:required int
# 含义:是否使用GPU预测。 0:不使用 1:使用
USE_GPU: 1
......@@ -70,6 +70,6 @@ DEPLOY:
# 含义: 输入张量的个数。大部分模型不需要设置。 默认值为1.
FEEDS_SIZE: 2
# 类型: optional int
# 含义: 将图像的边变为该字段的值的整数倍。默认值为1。
COARSEST_STRIDE: 32
```
# 含义: 将图像的边变为该字段的值的整数倍。在使用fpn模型时需要设为32。默认值为1。
COARSEST_STRIDE: 32
```
\ No newline at end of file
......@@ -5,22 +5,30 @@
## 前置条件
* G++ 4.8.2 ~ 4.9.4
* CUDA 8.0/ CUDA 9.0
* CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示**
### Step1: 下载代码
1. `mkdir -p /root/projects/paddle_models && cd /root/projects/paddle_models`
2. `git clone https://github.com/PaddlePaddle/models.git`
1. `git clone https://github.com/PaddlePaddle/PaddleDetection.git`
`C++`预测代码在`/root/projects/paddle_models/models/PaddleCV/PaddleDetection/inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
`C++`预测代码在`/root/projects/PaddleDetection/inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
目前仅支持`CUDA 8``CUDA 9`,请点击 [PaddlePaddle预测库下载地址](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html)下载对应的版本(develop版本)。
PaddlePaddle C++ 预测库主要分为CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。以下为各版本C++预测库的下载链接:
| 版本 | 链接 |
| ---- | ---- |
| CPU版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cpu_1.6.1.tgz) |
| CUDA 9.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda97_1.6.1.tgz) |
| CUDA 10.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda10_1.6.1.tgz) |
针对不同的CPU类型、不同的指令集,官方提供更多可用的预测库版本,目前已经推出1.6版本的预测库。其余版本具体请参考以下链接:[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html)
下载并解压后`/root/projects/fluid_inference`目录包含内容为:
......@@ -53,25 +61,33 @@ make install
### Step4: 编译
`CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下:
`CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下:(带*表示仅在使用**GPU版本**预测库时指定,其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1版本CUDA库**
| 参数名 | 含义 |
| ---- | ---- |
| CUDA_LIB | cuda的库路径 |
| CUDNN_LIB | cuDnn的库路径|
| OPENCV_DIR | OpenCV的安装路径 |
| * CUDA_LIB | CUDA的库路径 |
| * CUDNN_LIB | cudnn的库路径|
| OPENCV_DIR | OpenCV的安装路径 |
| PADDLE_DIR | Paddle预测库的路径 |
执行下列操作时,**注意**把对应的参数改为你的上述依赖库实际路径:
在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径:
```shell
cd /root/projects/paddle_models/models/PaddleCV/PaddleDetection/inference
cd /root/projects/PaddleDetection/inference
mkdir build && cd build
cmake .. -DWITH_GPU=ON -DPADDLE_DIR=/root/projects/fluid_inference -DCUDA_LIB=/usr/local/cuda/lib64/ -DOPENCV_DIR=/root/projects/opencv3/ -DCUDNN_LIB=/usr/local/cuda/lib64/
cmake .. -DWITH_GPU=ON -DPADDLE_DIR=/root/projects/fluid_inference -DCUDA_LIB=/usr/local/cuda/lib64/ -DOPENCV_DIR=/root/projects/opencv3/ -DCUDNN_LIB=/usr/local/cuda/lib64/ -DWITH_STATIC_LIB=OFF
make
```
在使用**CPU版本**预测库进行编译时,可执行下列操作:
```shell
cd /root/projects/PaddleDetection/inference
mkdir build && cd build
cmake .. -DWITH_GPU=OFF -DPADDLE_DIR=/root/projects/fluid_inference -DOPENCV_DIR=/root/projects/opencv3/ -DWITH_STATIC_LIB=OFF
make
```
### Step5: 预测及可视化
......
......@@ -5,27 +5,28 @@
## 前置条件
* Visual Studio 2015
* CUDA 8.0/ CUDA 9.0
* CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `D:\projects`演示**
### Step1: 下载代码
1. 打开`cmd`, 执行 `cd D:\projects\paddle_models`
2. `git clone https://github.com/PaddlePaddle/models.git`
1. 打开`cmd`, 执行 `cd D:\projects`
2. `git clone https://github.com/PaddlePaddle/PaddleDetection.git`
`C++`预测库代码在`D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
`C++`预测库代码在`D:\projects\PaddleDetection\inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录
PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接:
| CUDA | GPU | 下载地址 |
|------|------|--------|
| 8.0 | Yes | [fluid_inference.zip](https://bj.bcebos.com/v1/paddleseg/fluid_inference_win.zip) |
| 9.0 | Yes | [fluid_inference_cuda90.zip](https://paddleseg.bj.bcebos.com/fluid_inference_cuda9_cudnn7.zip) |
| 版本 | 链接 |
| ---- | ---- |
| CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) |
| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) |
| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) |
解压后`D:\projects\fluid_inference`目录包含内容为:
```
......@@ -42,9 +43,9 @@ fluid_inference
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download)
2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv`
3. 配置环境变量,如下流程所示
- 我的电脑->属性->高级系统设置->环境变量
- 我的电脑->属性->高级系统设置->环境变量
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
### Step4: 以VS2015为例编译代码
......@@ -56,35 +57,52 @@ fluid_inference
```
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
```
三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
* CMAKE编译工程
* PADDLE_DIR: fluid_inference预测库路径
* CUDA_LIB: CUDA动态库目录, 请根据实际安装情况调整
* OPENCV_DIR: OpenCV解压目录
| 参数名 | 含义 |
| ---- | ---- |
| *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 |
在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径:
```bash
# 切换到预测库所在目录
cd /d D:\projects\PaddleDetection\inference
# 创建构建目录, 重新构建只需要删除该目录即可
mkdir build
cd build
# cmake构建VS项目
D:\projects\PaddleDetection\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v9.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64
```
在使用**CPU版本**预测库进行编译时,可执行下列操作:
```bash
# 切换到预测库所在目录
cd /d D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference
cd /d D:\projects\PaddleDetection\inference
# 创建构建目录, 重新构建只需要删除该目录即可
mkdir build
cd build
# cmake构建VS项目
D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v9.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64
D:\projects\PaddleDetection\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=OFF -DPADDLE_DIR=D:\projects\fluid_inference -DOPENCV_DIR=D:\projects\opencv -T host=x64
```
这里的`cmake`参数`-G`, 表示生成对应的VS版本的工程,可以根据自己的`VS`版本调整,具体请参考[cmake文档](https://cmake.org/cmake/help/v3.15/manual/cmake-generators.7.html)
* 生成可执行文件
```
D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build> msbuild /m /p:Configuration=Release cpp_inference_demo.sln
D:\projects\PaddleDetection\inference\build> msbuild /m /p:Configuration=Release cpp_inference_demo.sln
```
### Step5: 预测及可视化
上述`Visual Studio 2015`编译产出的可执行文件在`build\release`目录下,切换到该目录:
```
cd /d D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build\release
cd /d D:\projects\PaddleDetection\inference\build\release
```
之后执行命令:
......@@ -93,4 +111,4 @@ cd /d D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build\
detection_demo.exe --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory
```
更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md)
更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md)
\ No newline at end of file
......@@ -6,7 +6,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com
## 前置条件
* Visual Studio 2019
* CUDA 8.0/ CUDA 9.0
* CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+
请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。
......@@ -15,20 +15,21 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com
### Step1: 下载代码
1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/models/archive/develop.zip)
2. 解压,解压后目录重命名为`paddle_models`
1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleDetection/archive/master.zip)
2. 解压,解压后目录重命名为`PaddleDetection`
以下代码目录路径为`D:\projects\paddle_models` 为例。
以下代码目录路径为`D:\projects\PaddleDetection` 为例。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录
PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接:
| CUDA | GPU | 下载地址 |
|------|------|--------|
| 8.0 | Yes | [fluid_inference.zip](https://bj.bcebos.com/v1/paddleseg/fluid_inference_win.zip) |
| 9.0 | Yes | [fluid_inference_cuda90.zip](https://paddleseg.bj.bcebos.com/fluid_inference_cuda9_cudnn7.zip) |
| 版本 | 链接 |
| ---- | ---- |
| CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) |
| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) |
| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) |
解压后`D:\projects\fluid_inference`目录包含内容为:
```
......@@ -39,16 +40,15 @@ fluid_inference
|
└── version.txt # 版本和编译信息
```
**注意:** `CUDA90`版本解压后目录名称为`fluid_inference_cuda90`。
### Step3: 安装配置OpenCV
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download)
2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv`
3. 配置环境变量,如下流程所示
- 我的电脑->属性->高级系统设置->环境变量
- 我的电脑->属性->高级系统设置->环境变量
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
### Step4: 使用Visual Studio 2019直接编译CMake
......@@ -67,16 +67,17 @@ fluid_inference
4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径
![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png)
三个编译参数的含义说明如下:
三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
| 参数名 | 含义 |
| ---- | ---- |
| CUDA_LIB | cuda的库路径 |
| *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 |
**注意**在使用CPU版本预测库时,需要把CUDA_LIB的勾去掉。
![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png)
**设置完成后**, 点击上图中`保存并生成CMake缓存以加载变量`。
5. 点击`生成`->`全部生成`
......@@ -89,7 +90,7 @@ fluid_inference
上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
```
cd D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build\x64-Release
cd D:\projects\PaddleDetection\inference\out\build\x64-Release
```
之后执行命令:
......@@ -98,4 +99,4 @@ cd D:\projects\paddle_models\models\PaddleCV\PaddleDetection\inference\build\x64
detection_demo.exe --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory
```
更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md)
更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md)
\ No newline at end of file
......@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
......@@ -14,39 +14,40 @@
#pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <paddle_inference_api.h>
#include <memory>
#include <string>
#include <vector>
#include <thread>
#include <chrono>
#include <algorithm>
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <opencv2/opencv.hpp>
#include <paddle_inference_api.h>
#include <utils/conf_parser.h>
#include <utils/utils.h>
#include <preprocessor/preprocessor.h>
#include "utils/conf_parser.h"
#include "utils/utils.h"
#include "preprocessor/preprocessor.h"
namespace PaddleSolution {
class DetectionPredictor {
public:
// init a predictor with a yaml config file
int init(const std::string& conf);
// predict api
int predict(const std::vector<std::string>& imgs);
class DetectionPredictor {
public:
// init a predictor with a yaml config file
int init(const std::string& conf);
// predict api
int predict(const std::vector<std::string>& imgs);
private:
int native_predict(const std::vector<std::string>& imgs);
int analysis_predict(const std::vector<std::string>& imgs);
private:
std::vector<float> _buffer;
std::vector<std::string> _imgs_batch;
std::vector<paddle::PaddleTensor> _outputs;
private:
int native_predict(const std::vector<std::string>& imgs);
int analysis_predict(const std::vector<std::string>& imgs);
private:
std::vector<float> _buffer;
std::vector<std::string> _imgs_batch;
std::vector<paddle::PaddleTensor> _outputs;
PaddleSolution::PaddleModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
};
}
PaddleSolution::PaddleModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
};
} // namespace PaddleSolution
......@@ -16,14 +16,18 @@
#include "preprocessor.h"
#include "preprocessor_detection.h"
#include <iostream>
namespace PaddleSolution {
std::shared_ptr<ImagePreProcessor> create_processor(const std::string& conf_file) {
auto config = std::make_shared<PaddleSolution::PaddleModelConfigPaser>();
if (!config->load_config(conf_file)) {
LOG(FATAL) << "fail to laod conf file [" << conf_file << "]";
#ifdef _WIN32
std::cerr << "fail to load conf file [" << conf_file << "]" << std::endl;
#else
LOG(FATAL) << "fail to load conf file [" << conf_file << "]";
#endif
return nullptr;
}
......@@ -34,10 +38,13 @@ namespace PaddleSolution {
}
return p;
}
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "]";
#ifdef _WIN32
std::cerr << "unknown processor_name [" << config->_pre_processor << "],"
<< "please check whether PRE_PROCESSOR is set correctly" << std::endl;
#else
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "],"
<< "please check whether PRE_PROCESSOR is set correctly";
#endif
return nullptr;
}
}
} // namespace PaddleSolution
......@@ -26,39 +26,52 @@
namespace PaddleSolution {
class ImagePreProcessor {
protected:
ImagePreProcessor() {};
public:
protected:
ImagePreProcessor() {}
public:
virtual ~ImagePreProcessor() {}
virtual bool single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) {
virtual bool single_process(const std::string& fname,
float* data,
int* ori_w,
int* ori_h) {
return true;
}
virtual bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) {
virtual bool batch_process(const std::vector<std::string>& imgs,
float* data,
int* ori_w,
int* ori_h) {
return true;
}
virtual bool single_process(const std::string& fname, float* data) {
return true;
}
virtual bool batch_process(const std::vector<std::string>& imgs, float* data) {
virtual bool batch_process(const std::vector<std::string>& imgs,
float* data) {
return true;
}
virtual bool single_process(const std::string& fname, std::vector<float> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) {
return true;
}
virtual bool batch_process(const std::vector<std::string>& imgs, std::vector<std::vector<float>> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) {
return true;
virtual bool single_process(const std::string& fname,
std::vector<float> &data,
int* ori_w, int* ori_h,
int* resize_w, int* resize_h,
float* scale_ratio) {
return true;
}
}; // end of class ImagePreProcessor
std::shared_ptr<ImagePreProcessor> create_processor(const std::string &config_file);
virtual bool batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio) {
return true;
}
}; // end of class ImagePreProcessor
} // end of namespace paddle_solution
std::shared_ptr<ImagePreProcessor>
create_processor(const std::string &config_file);
} // namespace PaddleSolution
......@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
......@@ -12,119 +12,133 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <thread>
#include <mutex>
#include <glog/logging.h>
#include "preprocessor_detection.h"
#include "utils/utils.h"
namespace PaddleSolution {
bool DetectionPreProcessor::single_process(const std::string& fname, std::vector<float> &vec_data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) {
cv::Mat im1 = cv::imread(fname, -1);
cv::Mat im;
if(_config->_feeds_size == 3) { // faster rcnn
im1.convertTo(im, CV_32FC3, 1/255.0);
}
else if(_config->_feeds_size == 2){ //yolo v3
im = im1;
}
if (im.data == nullptr || im.empty()) {
LOG(ERROR) << "Failed to open image: " << fname;
return false;
}
int channels = im.channels();
if (channels == 1) {
cv::cvtColor(im, im, cv::COLOR_GRAY2BGR);
}
channels = im.channels();
if (channels != 3 && channels != 4) {
LOG(ERROR) << "Only support rgb(gray) and rgba image.";
return false;
}
*ori_w = im.cols;
*ori_h = im.rows;
cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
//channels = im.channels();
bool DetectionPreProcessor::single_process(const std::string& fname,
std::vector<float> &vec_data,
int* ori_w, int* ori_h,
int* resize_w, int* resize_h,
float* scale_ratio) {
cv::Mat im1 = cv::imread(fname, -1);
cv::Mat im;
if (_config->_feeds_size == 3) { // faster rcnn
im1.convertTo(im, CV_32FC3, 1/255.0);
} else if (_config->_feeds_size == 2) { // yolo v3
im = im1;
}
if (im.data == nullptr || im.empty()) {
#ifdef _WIN32
std::cerr << "Failed to open image: " << fname << std::endl;
#else
LOG(ERROR) << "Failed to open image: " << fname;
#endif
return false;
}
int channels = im.channels();
if (channels == 1) {
cv::cvtColor(im, im, cv::COLOR_GRAY2BGR);
}
channels = im.channels();
if (channels != 3 && channels != 4) {
#ifdef _WIN32
std::cerr << "Only support rgb(gray) and rgba image." << std::endl;
#else
LOG(ERROR) << "Only support rgb(gray) and rgba image.";
#endif
return false;
}
*ori_w = im.cols;
*ori_h = im.rows;
cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
// channels = im.channels();
//resize
int rw = im.cols;
int rh = im.rows;
float im_scale_ratio;
utils::scaling(_config->_resize_type, rw, rh, _config->_resize[0], _config->_resize[1], _config->_target_short_size, _config->_resize_max_size, im_scale_ratio);
cv::Size resize_size(rw, rh);
*resize_w = rw;
*resize_h = rh;
*scale_ratio = im_scale_ratio;
if (*ori_h != rh || *ori_w != rw) {
cv::Mat im_temp;
if(_config->_resize_type == utils::SCALE_TYPE::UNPADDING) {
cv::resize(im, im_temp, resize_size, 0, 0, cv::INTER_LINEAR);
}
else if(_config->_resize_type == utils::SCALE_TYPE::RANGE_SCALING) {
cv::resize(im, im_temp, cv::Size(), im_scale_ratio, im_scale_ratio, cv::INTER_LINEAR);
}
im = im_temp;
// resize
int rw = im.cols;
int rh = im.rows;
float im_scale_ratio;
utils::scaling(_config->_resize_type, rw, rh, _config->_resize[0],
_config->_resize[1], _config->_target_short_size,
_config->_resize_max_size, im_scale_ratio);
cv::Size resize_size(rw, rh);
*resize_w = rw;
*resize_h = rh;
*scale_ratio = im_scale_ratio;
if (*ori_h != rh || *ori_w != rw) {
cv::Mat im_temp;
if (_config->_resize_type == utils::SCALE_TYPE::UNPADDING) {
cv::resize(im, im_temp, resize_size, 0, 0, cv::INTER_LINEAR);
} else if (_config->_resize_type == utils::SCALE_TYPE::RANGE_SCALING) {
cv::resize(im, im_temp, cv::Size(), im_scale_ratio,
im_scale_ratio, cv::INTER_LINEAR);
}
im = im_temp;
}
vec_data.resize(channels * rw * rh);
float *data = vec_data.data();
vec_data.resize(channels * rw * rh);
float *data = vec_data.data();
float* pmean = _config->_mean.data();
float* pscale = _config->_std.data();
for (int h = 0; h < rh; ++h) {
const uchar* uptr = im.ptr<uchar>(h);
const float* fptr = im.ptr<float>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < channels; ++c) {
int top_index = (c * rh + h) * rw + w;
float pixel;// = static_cast<float>(fptr[im_index]);// / 255.0;
if(_config->_feeds_size == 2){ //yolo v3
pixel = static_cast<float>(uptr[im_index++]) / 255.0;
}
else if(_config->_feeds_size == 3){
pixel = fptr[im_index++];
}
pixel = (pixel - pmean[c]) / pscale[c];
data[top_index] = pixel;
float* pmean = _config->_mean.data();
float* pscale = _config->_std.data();
for (int h = 0; h < rh; ++h) {
const uchar* uptr = im.ptr<uchar>(h);
const float* fptr = im.ptr<float>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < channels; ++c) {
int top_index = (c * rh + h) * rw + w;
float pixel;
if (_config->_feeds_size == 2) { // yolo v3
pixel = static_cast<float>(uptr[im_index++]) / 255.0;
} else if (_config->_feeds_size == 3) {
pixel = fptr[im_index++];
}
pixel = (pixel - pmean[c]) / pscale[c];
data[top_index] = pixel;
}
}
return true;
}
return true;
}
bool DetectionPreProcessor::batch_process(const std::vector<std::string>& imgs, std::vector<std::vector<float>> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio) {
auto ic = _config->_channels;
auto iw = _config->_resize[0];
auto ih = _config->_resize[1];
std::vector<std::thread> threads;
for (int i = 0; i < imgs.size(); ++i) {
std::string path = imgs[i];
int* width = &ori_w[i];
int* height = &ori_h[i];
int* resize_width = &resize_w[i];
int* resize_height = &resize_h[i];
float* sr = &scale_ratio[i];
threads.emplace_back([this, &data, i, path, width, height, resize_width, resize_height, sr] {
std::vector<float> buffer;
single_process(path, buffer, width, height, resize_width, resize_height, sr);
data[i] = buffer;
});
}
for (auto& t : threads) {
if (t.joinable()) {
t.join();
}
}
return true;
bool DetectionPreProcessor::batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio) {
auto ic = _config->_channels;
auto iw = _config->_resize[0];
auto ih = _config->_resize[1];
std::vector<std::thread> threads;
for (int i = 0; i < imgs.size(); ++i) {
std::string path = imgs[i];
int* width = &ori_w[i];
int* height = &ori_h[i];
int* resize_width = &resize_w[i];
int* resize_height = &resize_h[i];
float* sr = &scale_ratio[i];
threads.emplace_back([this, &data, i, path, width, height,
resize_width, resize_height, sr] {
std::vector<float> buffer;
single_process(path, buffer, width, height, resize_width,
resize_height, sr);
data[i] = buffer;
});
}
bool DetectionPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config) {
_config = config;
return true;
for (auto& t : threads) {
if (t.joinable()) {
t.join();
}
}
return true;
}
bool DetectionPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config) {
_config = config;
return true;
}
} // namespace PaddleSolution
......@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
......@@ -18,19 +18,23 @@
namespace PaddleSolution {
class DetectionPreProcessor : public ImagePreProcessor {
class DetectionPreProcessor : public ImagePreProcessor {
public:
DetectionPreProcessor() : _config(nullptr) {
}
public:
DetectionPreProcessor() : _config(nullptr) {
};
bool init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config);
bool init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config);
bool single_process(const std::string& fname, std::vector<float> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio);
bool single_process(const std::string& fname, std::vector<float> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio);
bool batch_process(const std::vector<std::string>& imgs, std::vector<std::vector<float>> &data, int* ori_w, int* ori_h, int* resize_w, int* resize_h, float* scale_ratio);
private:
std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> _config;
};
bool batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio);
private:
std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> _config;
};
}
} // namespace PaddleSolution
......@@ -134,7 +134,8 @@ _DETECTIONBOX = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[],
serialized_start=43,
serialized_end=175, )
serialized_end=175,
)
_DETECTIONRESULT = _descriptor.Descriptor(
name='DetectionResult',
......@@ -185,7 +186,8 @@ _DETECTIONRESULT = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[],
serialized_start=177,
serialized_end=267, )
serialized_end=267,
)
_DETECTIONRESULT.fields_by_name['detection_boxes'].message_type = _DETECTIONBOX
DESCRIPTOR.message_types_by_name['DetectionBox'] = _DETECTIONBOX
......@@ -193,22 +195,20 @@ DESCRIPTOR.message_types_by_name['DetectionResult'] = _DETECTIONRESULT
DetectionBox = _reflection.GeneratedProtocolMessageType(
'DetectionBox',
(_message.Message, ),
dict(
DESCRIPTOR=_DETECTIONBOX,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox)
))
(_message.Message,),
dict(DESCRIPTOR=_DETECTIONBOX,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox)
))
_sym_db.RegisterMessage(DetectionBox)
DetectionResult = _reflection.GeneratedProtocolMessageType(
'DetectionResult',
(_message.Message, ),
dict(
DESCRIPTOR=_DETECTIONRESULT,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult)
))
(_message.Message,),
dict(DESCRIPTOR=_DETECTIONRESULT,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult)
))
_sym_db.RegisterMessage(DetectionResult)
# @@protoc_insertion_point(module_scope)
......@@ -85,8 +85,8 @@ if __name__ == "__main__":
for box in detection_result.detection_boxes:
if box.score >= Flags.threshold:
box_class = getattr(box, 'class')
text_class_score_str = "%s %.2f" % (
class2LabelMap.get(str(box_class)), box.score)
text_class_score_str = "%s %.2f" % (class2LabelMap.get(
str(box_class)), box.score)
text_point = (int(box.left_top_x), int(box.left_top_y))
ptLeftTop = (int(box.left_top_x), int(box.left_top_y))
......@@ -106,8 +106,8 @@ if __name__ == "__main__":
text_box_left_top = (text_point[0],
text_point[1] - text_size[0][1])
text_box_right_bottom = (
text_point[0] + text_size[0][0], text_point[1])
text_box_right_bottom = (text_point[0] +
text_size[0][0], text_point[1])
cv2.rectangle(img, text_box_left_top,
text_box_right_bottom, color, -1, 8)
......
......@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
......@@ -13,225 +13,285 @@
// limitations under the License.
#pragma once
#include <yaml-cpp/yaml.h>
#include <iostream>
#include <vector>
#include <string>
#include <map>
#include <yaml-cpp/yaml.h>
namespace PaddleSolution {
class PaddleModelConfigPaser {
std::map<std::string, int> _scaling_map;
public:
PaddleModelConfigPaser()
:_class_num(0),
_channels(0),
_use_gpu(0),
_batch_size(1),
_target_short_size(0),
_model_file_name("__model__"),
_param_file_name("__params__"),
_scaling_map{{"UNPADDING", 0},
{"RANGE_SCALING",1}},
_feeds_size(1),
_coarsest_stride(1)
{
}
~PaddleModelConfigPaser() {
}
class PaddleModelConfigPaser {
std::map<std::string, int> _scaling_map;
void reset() {
_crop_size.clear();
_resize.clear();
_mean.clear();
_std.clear();
_img_type.clear();
_class_num = 0;
_channels = 0;
_use_gpu = 0;
_target_short_size = 0;
_batch_size = 1;
_model_file_name = "__model__";
_model_path = "./";
_param_file_name="__params__";
_resize_type = 0;
_resize_max_size = 0;
_feeds_size = 1;
_coarsest_stride = 1;
}
public:
PaddleModelConfigPaser()
:_class_num(0),
_channels(0),
_use_gpu(0),
_batch_size(1),
_target_short_size(0),
_model_file_name("__model__"),
_param_file_name("__params__"),
_scaling_map{{"UNPADDING", 0},
{"RANGE_SCALING", 1}},
_feeds_size(1),
_coarsest_stride(1) {}
std::string process_parenthesis(const std::string& str) {
if (str.size() < 2) {
return str;
}
std::string nstr(str);
if (str[0] == '(' && str.back() == ')') {
nstr[0] = '[';
nstr[str.size() - 1] = ']';
}
return nstr;
}
~PaddleModelConfigPaser() {}
template <typename T>
std::vector<T> parse_str_to_vec(const std::string& str) {
std::vector<T> data;
auto node = YAML::Load(str);
for (const auto& item : node) {
data.push_back(item.as<T>());
}
return data;
void reset() {
_crop_size.clear();
_resize.clear();
_mean.clear();
_std.clear();
_img_type.clear();
_class_num = 0;
_channels = 0;
_use_gpu = 0;
_target_short_size = 0;
_batch_size = 1;
_model_file_name = "__model__";
_model_path = "./";
_param_file_name = "__params__";
_resize_type = 0;
_resize_max_size = 0;
_feeds_size = 1;
_coarsest_stride = 1;
}
std::string process_parenthesis(const std::string& str) {
if (str.size() < 2) {
return str;
}
std::string nstr(str);
if (str[0] == '(' && str.back() == ')') {
nstr[0] = '[';
nstr[str.size() - 1] = ']';
}
return nstr;
}
bool load_config(const std::string& conf_file) {
reset();
template <typename T>
std::vector<T> parse_str_to_vec(const std::string& str) {
std::vector<T> data;
auto node = YAML::Load(str);
for (const auto& item : node) {
data.push_back(item.as<T>());
}
return data;
}
YAML::Node config = YAML::LoadFile(conf_file);
// 1. get resize
bool load_config(const std::string& conf_file) {
reset();
YAML::Node config;
try {
config = YAML::LoadFile(conf_file);
} catch(...) {
return false;
}
// 1. get resize
if (config["DEPLOY"]["EVAL_CROP_SIZE"].IsDefined()) {
auto str = config["DEPLOY"]["EVAL_CROP_SIZE"].as<std::string>();
_resize = parse_str_to_vec<int>(process_parenthesis(str));
} else {
std::cerr << "Please set EVAL_CROP_SIZE: (xx, xx)" << std::endl;
return false;
}
// 0. get crop_size
if (config["DEPLOY"]["CROP_SIZE"].IsDefined()) {
auto crop_str = config["DEPLOY"]["CROP_SIZE"].as<std::string>();
_crop_size = parse_str_to_vec<int>(process_parenthesis(crop_str));
} else {
_crop_size = _resize;
}
// 0. get crop_size
if(config["DEPLOY"]["CROP_SIZE"].IsDefined()) {
auto crop_str = config["DEPLOY"]["CROP_SIZE"].as<std::string>();
_crop_size = parse_str_to_vec<int>(process_parenthesis(crop_str));
}
else {
_crop_size = _resize;
}
// 2. get mean
// 2. get mean
if (config["DEPLOY"]["MEAN"].IsDefined()) {
for (const auto& item : config["DEPLOY"]["MEAN"]) {
_mean.push_back(item.as<float>());
}
// 3. get std
} else {
std::cerr << "Please set MEAN: [xx, xx, xx]" << std::endl;
return false;
}
// 3. get std
if(config["DEPLOY"]["STD"].IsDefined()) {
for (const auto& item : config["DEPLOY"]["STD"]) {
_std.push_back(item.as<float>());
}
// 4. get image type
} else {
std::cerr << "Please set STD: [xx, xx, xx]" << std::endl;
return false;
}
// 4. get image type
if (config["DEPLOY"]["IMAGE_TYPE"].IsDefined()) {
_img_type = config["DEPLOY"]["IMAGE_TYPE"].as<std::string>();
// 5. get class number
} else {
std::cerr << "Please set IMAGE_TYPE: \"rgb\" or \"rgba\"" << std::endl;
return false;
}
// 5. get class number
if (config["DEPLOY"]["NUM_CLASSES"].IsDefined()) {
_class_num = config["DEPLOY"]["NUM_CLASSES"].as<int>();
// 7. set model path
} else {
std::cerr << "Please set NUM_CLASSES: x" << std::endl;
return false;
}
// 7. set model path
if (config["DEPLOY"]["MODEL_PATH"].IsDefined()) {
_model_path = config["DEPLOY"]["MODEL_PATH"].as<std::string>();
// 8. get model file_name
} else {
std::cerr << "Please set MODEL_PATH: \"/path/to/model_dir\"" << std::endl;
return false;
}
// 8. get model file_name
if (config["DEPLOY"]["MODEL_FILENAME"].IsDefined()) {
_model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as<std::string>();
// 9. get model param file name
_param_file_name = config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>();
// 10. get pre_processor
} else {
_model_file_name = "__model__";
}
// 9. get model param file name
if (config["DEPLOY"]["PARAMS_FILENAME"].IsDefined()) {
_param_file_name
= config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>();
} else {
_param_file_name = "__params__";
}
// 10. get pre_processor
if (config["DEPLOY"]["PRE_PROCESSOR"].IsDefined()) {
_pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as<std::string>();
// 11. use_gpu
} else {
std::cerr << "Please set PRE_PROCESSOR: \"DetectionPreProcessor\"" << std::endl;
return false;
}
// 11. use_gpu
if (config["DEPLOY"]["USE_GPU"].IsDefined()) {
_use_gpu = config["DEPLOY"]["USE_GPU"].as<int>();
// 12. predictor_mode
} else {
_use_gpu = 0;
}
// 12. predictor_mode
if (config["DEPLOY"]["PREDICTOR_MODE"].IsDefined()) {
_predictor_mode = config["DEPLOY"]["PREDICTOR_MODE"].as<std::string>();
// 13. batch_size
} else {
std::cerr << "Please set PREDICTOR_MODE: \"NATIVE\" or \"ANALYSIS\"" << std::endl;
return false;
}
// 13. batch_size
if (config["DEPLOY"]["BATCH_SIZE"].IsDefined()) {
_batch_size = config["DEPLOY"]["BATCH_SIZE"].as<int>();
// 14. channels
} else {
_batch_size = 1;
}
// 14. channels
if (config["DEPLOY"]["CHANNELS"].IsDefined()) {
_channels = config["DEPLOY"]["CHANNELS"].as<int>();
// 15. target_short_size
if(config["DEPLOY"]["TARGET_SHORT_SIZE"].IsDefined()) {
_target_short_size = config["DEPLOY"]["TARGET_SHORT_SIZE"].as<int>();
}
// 16.resize_type
if(config["DEPLOY"]["RESIZE_TYPE"].IsDefined() &&
_scaling_map.find(config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()) != _scaling_map.end()) {
_resize_type = _scaling_map[config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()];
}
else{
_resize_type = 0;
}
// 17.resize_max_size
if(config["DEPLOY"]["RESIZE_MAX_SIZE"].IsDefined()) {
_resize_max_size = config["DEPLOY"]["RESIZE_MAX_SIZE"].as<int>();
}
// 18.feeds_size
if(config["DEPLOY"]["FEEDS_SIZE"].IsDefined()){
_feeds_size = config["DEPLOY"]["FEEDS_SIZE"].as<int>();
}
// 19. coarsest_stride
if(config["DEPLOY"]["COARSEST_STRIDE"].IsDefined()) {
_coarsest_stride = config["DEPLOY"]["COARSEST_STRIDE"].as<int>();
}
return true;
} else {
std::cerr << "Please set CHANNELS: x" << std::endl;
return false;
}
// 15. target_short_size
if (config["DEPLOY"]["TARGET_SHORT_SIZE"].IsDefined()) {
_target_short_size = config["DEPLOY"]["TARGET_SHORT_SIZE"].as<int>();
}
// 16.resize_type
if (config["DEPLOY"]["RESIZE_TYPE"].IsDefined() &&
_scaling_map.find(config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()) != _scaling_map.end()) {
_resize_type = _scaling_map[config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()];
} else {
_resize_type = 0;
}
// 17.resize_max_size
if (config["DEPLOY"]["RESIZE_MAX_SIZE"].IsDefined()) {
_resize_max_size = config["DEPLOY"]["RESIZE_MAX_SIZE"].as<int>();
}
// 18.feeds_size
if (config["DEPLOY"]["FEEDS_SIZE"].IsDefined()) {
_feeds_size = config["DEPLOY"]["FEEDS_SIZE"].as<int>();
}
// 19. coarsest_stride
if (config["DEPLOY"]["COARSEST_STRIDE"].IsDefined()) {
_coarsest_stride = config["DEPLOY"]["COARSEST_STRIDE"].as<int>();
}
return true;
}
void debug() const {
std::cout << "SCALE_RESIZE: (" << _resize[0] << ", " << _resize[1] << ")" << std::endl;
void debug() const {
std::cout << "SCALE_RESIZE: (" << _resize[0] << ", "
<< _resize[1] << ")" << std::endl;
std::cout << "MEAN: [";
for (int i = 0; i < _mean.size(); ++i) {
if (i != _mean.size() - 1) {
std::cout << _mean[i] << ", ";
} else {
std::cout << _mean[i];
}
std::cout << "MEAN: [";
for (int i = 0; i < _mean.size(); ++i) {
if (i != _mean.size() - 1) {
std::cout << _mean[i] << ", ";
} else {
std::cout << _mean[i];
}
std::cout << "]" << std::endl;
}
std::cout << "]" << std::endl;
std::cout << "STD: [";
for (int i = 0; i < _std.size(); ++i) {
if (i != _std.size() - 1) {
std::cout << _std[i] << ", ";
}
else {
std::cout << _std[i];
}
std::cout << "STD: [";
for (int i = 0; i < _std.size(); ++i) {
if (i != _std.size() - 1) {
std::cout << _std[i] << ", ";
} else {
std::cout << _std[i];
}
std::cout << "]" << std::endl;
std::cout << "DEPLOY.TARGET_SHORT_SIZE: " << _target_short_size << std::endl;
std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl;
std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl;
std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl;
std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl;
std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name << std::endl;
std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name << std::endl;
std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl;
std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl;
std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl;
std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl;
}
//DEPLOY.COARSEST_STRIDE
int _coarsest_stride;
// DEPLOY.FEEDS_SIZE
int _feeds_size;
// DEPLOY.RESIZE_TYPE 0:unpadding 1:rangescaling Default:0
int _resize_type;
// DEPLOY.RESIZE_MAX_SIZE
int _resize_max_size;
// DEPLOY.CROP_SIZE
std::vector<int> _crop_size;
// DEPLOY.SCALE_RESIZE
std::vector<int> _resize;
// DEPLOY.MEAN
std::vector<float> _mean;
// DEPLOY.STD
std::vector<float> _std;
// DEPLOY.IMAGE_TYPE
std::string _img_type;
// DEPLOY.TARGET_SHORT_SIZE
int _target_short_size;
// DEPLOY.NUM_CLASSES
int _class_num;
// DEPLOY.CHANNELS
int _channels;
// DEPLOY.MODEL_PATH
std::string _model_path;
// DEPLOY.MODEL_FILENAME
std::string _model_file_name;
// DEPLOY.PARAMS_FILENAME
std::string _param_file_name;
// DEPLOY.PRE_PROCESSOR
std::string _pre_processor;
// DEPLOY.USE_GPU
int _use_gpu;
// DEPLOY.PREDICTOR_MODE
std::string _predictor_mode;
// DEPLOY.BATCH_SIZE
int _batch_size;
};
}
}
std::cout << "]" << std::endl;
std::cout << "DEPLOY.TARGET_SHORT_SIZE: " << _target_short_size
<< std::endl;
std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl;
std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl;
std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl;
std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl;
std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name
<< std::endl;
std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name
<< std::endl;
std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl;
std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl;
std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl;
std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl;
}
// DEPLOY.COARSEST_STRIDE
int _coarsest_stride;
// DEPLOY.FEEDS_SIZE
int _feeds_size;
// DEPLOY.RESIZE_TYPE 0:unpadding 1:rangescaling Default:0
int _resize_type;
// DEPLOY.RESIZE_MAX_SIZE
int _resize_max_size;
// DEPLOY.CROP_SIZE
std::vector<int> _crop_size;
// DEPLOY.SCALE_RESIZE
std::vector<int> _resize;
// DEPLOY.MEAN
std::vector<float> _mean;
// DEPLOY.STD
std::vector<float> _std;
// DEPLOY.IMAGE_TYPE
std::string _img_type;
// DEPLOY.TARGET_SHORT_SIZE
int _target_short_size;
// DEPLOY.NUM_CLASSES
int _class_num;
// DEPLOY.CHANNELS
int _channels;
// DEPLOY.MODEL_PATH
std::string _model_path;
// DEPLOY.MODEL_FILENAME
std::string _model_file_name;
// DEPLOY.PARAMS_FILENAME
std::string _param_file_name;
// DEPLOY.PRE_PROCESSOR
std::string _pre_processor;
// DEPLOY.USE_GPU
int _use_gpu;
// DEPLOY.PREDICTOR_MODE
std::string _predictor_mode;
// DEPLOY.BATCH_SIZE
int _batch_size;
};
} // namespace PaddleSolution
......@@ -28,97 +28,103 @@
#endif
namespace PaddleSolution {
namespace utils {
enum SCALE_TYPE{
UNPADDING,
RANGE_SCALING
};
inline std::string path_join(const std::string& dir, const std::string& path) {
std::string seperator = "/";
#ifdef _WIN32
seperator = "\\";
#endif
return dir + seperator + path;
namespace utils {
enum SCALE_TYPE{
UNPADDING,
RANGE_SCALING
};
inline std::string path_join(const std::string& dir, const std::string& path) {
std::string seperator = "/";
#ifdef _WIN32
seperator = "\\";
#endif
return dir + seperator + path;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path,
const std::string& exts) {
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
}
while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.');
if (!ext || std::string(ext) == "." || std::string(ext) == "..") {
continue;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts)
{
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
}
if (exts.find(ext) != std::string::npos) {
imgs.push_back(path_join(path, entry->d_name));
}
}
sort(imgs.begin(), imgs.end());
return imgs;
}
#else
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path,
const std::string& exts) {
std::vector<std::string> imgs;
for (const auto& item :
std::experimental::filesystem::directory_iterator(path)) {
auto suffix = item.path().extension().string();
if (exts.find(suffix) != std::string::npos && suffix.size() > 0) {
auto fullname = path_join(path, item.path().filename().string());
imgs.push_back(item.path().string());
}
}
sort(imgs.begin(), imgs.end());
return imgs;
}
#endif
while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.');
if (!ext || std::string(ext) == "." || std::string(ext) == "..") {
continue;
}
if (exts.find(ext) != std::string::npos) {
imgs.push_back(path_join(path, entry->d_name));
}
}
sort(imgs.begin(), imgs.end());
return imgs;
inline int scaling(int resize_type, int &w, int &h, int new_w, int new_h,
int target_size, int max_size, float &im_scale_ratio) {
if (w <= 0 || h <= 0 || new_w <= 0 || new_h <= 0) {
return -1;
}
switch (resize_type) {
case SCALE_TYPE::UNPADDING:
{
w = new_w;
h = new_h;
im_scale_ratio = 0;
}
#else
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts)
break;
case SCALE_TYPE::RANGE_SCALING:
{
std::vector<std::string> imgs;
for (const auto& item : std::experimental::filesystem::directory_iterator(path)) {
auto suffix = item.path().extension().string();
if (exts.find(suffix) != std::string::npos && suffix.size() > 0) {
auto fullname = path_join(path, item.path().filename().string());
imgs.push_back(item.path().string());
int im_max_size = std::max(w, h);
int im_min_size = std::min(w, h);
float scale_ratio = static_cast<float>(target_size)
/ static_cast<float>(im_min_size);
if (max_size > 0) {
if (round(scale_ratio * im_max_size) > max_size) {
scale_ratio = static_cast<float>(max_size)
/ static_cast<float>(im_max_size);
}
}
sort(imgs.begin(), imgs.end());
return imgs;
w = round(scale_ratio * static_cast<float>(w));
h = round(scale_ratio * static_cast<float>(h));
im_scale_ratio = scale_ratio;
}
#endif
inline int scaling(int resize_type, int &w, int &h, int new_w, int new_h, int target_size, int max_size, float &im_scale_ratio)
{
if(w <= 0 || h <= 0 || new_w <= 0 || new_h <= 0){
return -1;
}
switch(resize_type) {
case SCALE_TYPE::UNPADDING:
{
w = new_w;
h = new_h;
im_scale_ratio=0;
}
break;
case SCALE_TYPE::RANGE_SCALING:
{
int im_max_size = std::max(w, h);
int im_min_size = std::min(w, h);
float scale_ratio= static_cast<float>(target_size) / static_cast<float>(im_min_size);
if(max_size > 0) {
if(round(scale_ratio * im_max_size) > max_size) {
scale_ratio = static_cast<float>(max_size) / static_cast<float>(im_max_size);
}
}
w = round(scale_ratio * static_cast<float>(w));
h = round(scale_ratio * static_cast<float>(h));
im_scale_ratio = scale_ratio;
}
break;
default :
{
std::cout << "Can't support this type of scaling strategy." << std::endl;
std::cout << "Throw exception at file " << __FILE__ << " on line " << __LINE__ << std::endl;
throw 0;
}
break;
}
return 0;
}
break;
default :
{
std::cout << "Can't support this type of scaling strategy."
<< std::endl;
std::cout << "Throw exception at file " << __FILE__
<< " on line " << __LINE__ << std::endl;
throw 0;
}
break;
}
return 0;
}
} // namespace utils
} // namespace PaddleSolution
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册