未验证 提交 d3a971b6 编写于 作者: B Bin Long 提交者: GitHub

deploy/cpp : Update cpp ineference solutio (#478)

* deploy/cpp : Update cpp ineference solution

* add README.md to deploy

* deploy/cpp : add PadStride

* deploy/cpp : add output visualization

* deploy/cpp: add video support

* fix coding style problem

* deploy/cpp : add README.md

* deploy/cpp : update docs

* deploy/cpp : update linux build scripts

* deploy/cpp : update linux build docs

* udpate docs

* deploy/cpp : update windows build docs

* depoy/cpp: fix coding style

* fix coding style

* Create CMakeSettings.json

* Update README.md

* Update windows_vs2019_build.md

* Update CMakeLists.txt

* Update linux_build.md

* Update CMakeLists.txt

* deploy/cpp : update linux build script

* update doc structures and support arch Face
上级 2e418109
......@@ -89,7 +89,7 @@ PaddleDetection的目的是为工业界和学术界提供丰富、易用的目
- [推理部署](inference)
- [模型导出教程](docs/advanced_tutorials/inference/EXPORT_MODEL.md)
- [预测引擎Python API使用示例](docs/advanced_tutorials/inference/INFERENCE.md)
- [C++推理部署](inference/README.md)
- [C++推理部署](deploy/README.md)
- [推理Benchmark](docs/advanced_tutorials/inference/BENCHMARK_INFER_cn.md)
## 模型库
......
# PaddleDetection 预测部署
`PaddleDetection`目前支持使用`Python``C++`部署在`Windows``Linux` 上运行。
## 模型导出
训练得到一个满足要求的模型后,如果想要将该模型接入到C++预测库,需要通过`tools/export_model.py`导出该模型。
- [导出教程](../docs/advanced_tutorials/inference/EXPORT_MODEL.md)
模型导出后, 目录结构如下(以`yolov3_darknet`为例):
```
yolov3_darknet # 模型目录
├── infer_cfg.yml # 模型配置信息
├── __model__ # 模型文件
└── __params__ # 参数文件
```
预测时,该目录所在的路径会作为程序的输入参数。
## 预测部署
- [1. Python预测(支持 Linux 和 Windows)](./python/)
- [2. C++预测(支持 Linux 和 Windows)](./cpp/)
cmake_minimum_required(VERSION 3.0)
project(cpp_inference_demo CXX C)
project(PaddleObjectDetector CXX C)
option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
option(USE_TENSORRT "Compile demo with TensorRT." OFF)
option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(CUDA_LIB "" CACHE PATH "Location of libraries")
include(cmake/yaml-cpp.cmake)
include(external-cmake/yaml-cpp.cmake)
include_directories("${CMAKE_SOURCE_DIR}/")
include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
macro(safe_set_static_flag)
foreach(flag_var
......@@ -36,7 +39,6 @@ if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
endif()
include_directories("${CMAKE_SOURCE_DIR}/")
include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
include_directories("${PADDLE_DIR}/")
include_directories("${PADDLE_DIR}/third_party/install/protobuf/include")
include_directories("${PADDLE_DIR}/third_party/install/glog/include")
......@@ -65,21 +67,20 @@ link_directories("${PADDLE_DIR}/third_party/install/glog/lib")
link_directories("${PADDLE_DIR}/third_party/install/gflags/lib")
link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib")
link_directories("${PADDLE_DIR}/paddle/lib/")
link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
link_directories("${CMAKE_CURRENT_BINARY_DIR}")
if (WIN32)
include_directories("${PADDLE_DIR}/paddle/fluid/inference")
include_directories("${PADDLE_DIR}/paddle/include")
link_directories("${PADDLE_DIR}/paddle/fluid/inference")
include_directories("${OPENCV_DIR}/build/include")
include_directories("${OPENCV_DIR}/opencv/build/include")
link_directories("${OPENCV_DIR}/build/x64/vc14/lib")
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
else ()
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
include_directories("${PADDLE_DIR}/paddle/include")
link_directories("${PADDLE_DIR}/paddle/lib")
include_directories("${OPENCV_DIR}/include")
link_directories("${OPENCV_DIR}/lib")
endif ()
include_directories(${OpenCV_INCLUDE_DIRS})
if (WIN32)
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
......@@ -92,7 +93,7 @@ if (WIN32)
add_definitions(-DSTATIC_LIB)
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -o2 -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11")
set(CMAKE_STATIC_LIBRARY_PREFIX "")
endif()
......@@ -110,7 +111,7 @@ endif()
if (NOT WIN32)
if (USE_TENSORRT AND WITH_GPU)
if (WITH_TENSORRT AND WITH_GPU)
include_directories("${PADDLE_DIR}/third_party/install/tensorrt/include")
link_directories("${PADDLE_DIR}/third_party/install/tensorrt/lib")
endif()
......@@ -149,7 +150,7 @@ else()
set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
if(WIN32)
if (WIN32)
if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS
${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
......@@ -168,11 +169,10 @@ else()
endif()
if (NOT WIN32)
set(EXTERNAL_LIB "-lrt -ldl -lpthread")
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf yaml-cpp z xxhash
${EXTERNAL_LIB})
glog gflags protobuf z xxhash yaml-cpp
)
if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
set(DEPS ${DEPS} snappystream)
endif()
......@@ -182,7 +182,7 @@ if (NOT WIN32)
else()
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
opencv_world346 glog libyaml-cppmt gflags_static libprotobuf zlibstatic xxhash ${EXTERNAL_LIB})
glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt)
set(DEPS ${DEPS} libcmt shlwapi)
if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
set(DEPS ${DEPS} snappy)
......@@ -194,7 +194,7 @@ endif(NOT WIN32)
if(WITH_GPU)
if(NOT WIN32)
if (USE_TENSORRT)
if (WITH_TENSORRT)
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
......@@ -208,57 +208,17 @@ if(WITH_GPU)
endif()
if (NOT WIN32)
set(OPENCV_LIB_DIR ${OPENCV_DIR}/lib)
if(EXISTS "${OPENCV_LIB_DIR}")
message("OPENCV_LIB:" ${OPENCV_LIB_DIR})
else()
set(OPENCV_LIB_DIR ${OPENCV_DIR}/lib64)
message("OPENCV_LIB:" ${OPENCV_LIB_DIR})
endif()
set(OPENCV_3RD_LIB_DIR ${OPENCV_DIR}/share/OpenCV/3rdparty/lib)
if(EXISTS "${OPENCV_3RD_LIB_DIR}")
message("OPENCV_3RD_LIB_DIR:" ${OPENCV_3RD_LIB_DIR})
else()
set(OPENCV_3RD_LIB_DIR ${OPENCV_DIR}/share/OpenCV/3rdparty/lib64)
message("OPENCV_3RD_LIB_DIR:" ${OPENCV_3RD_LIB_DIR})
endif()
set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_imgcodecs${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_imgproc${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_core${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_highgui${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libIlmImf${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibjasper${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibpng${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibtiff${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libittnotify${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibjpeg-turbo${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibwebp${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libzlib${CMAKE_STATIC_LIBRARY_SUFFIX})
if(EXISTS "${OPENCV_3RD_LIB_DIR}/libippiw${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libippiw${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
if(EXISTS "${OPENCV_3RD_LIB_DIR}/libippicv${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libippicv${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
set(DEPS ${DEPS} ${EXTERNAL_LIB})
endif()
SET(PADDLESEG_INFERENCE_SRCS preprocessor/preprocessor.cpp
preprocessor/preprocessor_detection.cpp predictor/detection_predictor.cpp
utils/detection_result.pb.cc)
set(DEPS ${DEPS} ${OpenCV_LIBS})
add_executable(main src/main.cc src/preprocess_op.cc src/object_detector.cc)
ADD_DEPENDENCIES(main ext-yaml-cpp)
target_link_libraries(main ${DEPS})
ADD_LIBRARY(libpaddleseg_inference STATIC ${PADDLESEG_INFERENCE_SRCS})
target_link_libraries(libpaddleseg_inference ${DEPS})
add_executable(detection_demo detection_demo.cpp)
ADD_DEPENDENCIES(libpaddleseg_inference ext-yaml-cpp)
ADD_DEPENDENCIES(detection_demo ext-yaml-cpp libpaddleseg_inference)
target_link_libraries(detection_demo ${DEPS} libpaddleseg_inference)
if (WIN32)
add_custom_command(TARGET detection_demo POST_BUILD
if (WIN32 AND WITH_MKL)
add_custom_command(TARGET main POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
......@@ -267,5 +227,3 @@ if (WIN32)
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
)
endif()
execute_process(COMMAND cp -r ${CMAKE_SOURCE_DIR}/images ${CMAKE_SOURCE_DIR}/conf ${CMAKE_CURRENT_BINARY_DIR})
{
"configurations": [
{
"name": "x64-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "-v",
"ctestCommandArgs": "",
"variables": [
{
"name": "CUDA_LIB",
"value": "D:/projects/packages/cuda10_0/lib64",
"type": "PATH"
},
{
"name": "OPENCV_DIR",
"value": "D:/projects/packages/opencv3_4_6",
"type": "PATH"
},
{
"name": "PADDLE_DIR",
"value": "D:/projects/packages/fluid_inference",
"type": "PATH"
},
{
"name": "CMAKE_BUILD_TYPE",
"value": "Release",
"type": "STRING"
},
{
"name": "WITH_STATIC_LIB",
"value": "True",
"type": "BOOL"
},
{
"name": "WITH_MKL",
"value": "True",
"type": "BOOL"
},
{
"name": "WITH_GPU",
"value": "True",
"type": "BOOL"
}
]
}
]
}
# PaddleDetection C++预测部署方案
## 本文档结构
[1.说明](#1说明)
[2.主要目录和文件](#2主要目录和文件)
[3.编译部署](#3编译)
## 1.说明
本目录为用户提供一个跨平台的`C++`部署方案,让用户通过`PaddleDetection`训练的模型导出后,即可基于本项目快速运行,也可以快速集成代码结合到自己的项目实际应用中去。
主要设计的目标包括以下四点:
- 跨平台,支持在 `Windows``Linux` 完成编译、二次开发集成和部署运行
- 可扩展性,支持用户针对新模型开发自己特殊的数据预处理等逻辑
- 高性能,除了`PaddlePaddle`自身带来的性能优势,我们还针对图像检测的特点对关键步骤进行了性能优化
- 支持各种不同检测模型结构,包括`Yolov3`/`Faster_RCNN`/`SSD`/`RetinaNet`
## 2.主要目录和文件
```bash
deploy/cpp
|
├── src
│ ├── main.cc # 集成代码示例, 程序入口
│ ├── object_detector.cc # 模型加载和预测主要逻辑封装类实现
│ └── preprocess_op.cc # 预处理相关主要逻辑封装实现
|
├── include
│ ├── config_parser.h # 导出模型配置yaml文件解析
│ ├── object_detector.h # 模型加载和预测主要逻辑封装类
│ └── preprocess_op.h # 预处理相关主要逻辑类封装
|
├── docs
│ ├── linux_build.md # Linux 编译指南
│ └── windows_vs2019_build.md # Windows VS2019编译指南
├── build.sh # 编译命令脚本
├── CMakeList.txt # cmake编译入口文件
|
├── CMakeSettings.json # Visual Studio 2019 CMake项目编译设置
└── cmake # 依赖的外部项目cmake(目前仅有yaml-cpp)
```
## 3.编译部署
### 3.1 导出模型
请确认您已经基于`PaddleDetection`[export_model.py](../../tools/export_model.py)导出您的模型,并妥善保存到合适的位置。导出模型细节请参考 [导出模型教程](../../docs/advanced_tutorials/inference/EXPORT_MODEL.md)
模型导出后, 目录结构如下(以`yolov3_darknet`为例):
```
yolov3_darknet # 模型目录
├── infer_cfg.yml # 模型配置信息
├── __model__ # 模型文件
└── __params__ # 参数文件
```
预测时,该目录所在的路径会作为程序的输入参数。
### 3.2 编译
仅支持在`Windows``Linux`平台编译和使用
- [Linux 编译指南](../../docs/advanced_tutorials/inference/docs/linux_build.md)
- [Windows编译指南(使用Visual Studio 2019)](../../docs/advanced_tutorials/inference/docs/windows_vs2019_build.md)
......@@ -7,8 +7,8 @@ message("${CMAKE_BUILD_TYPE}")
ExternalProject_Add(
ext-yaml-cpp
GIT_REPOSITORY https://github.com/jbeder/yaml-cpp.git
GIT_TAG e0e01d53c27ffee6c86153fa41e7f5e57d3e5c90
URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
URL_MD5 9542d6de397d1fbd649ed468cb5850e6
CMAKE_ARGS
-DYAML_CPP_BUILD_TESTS=OFF
-DYAML_CPP_BUILD_TOOLS=OFF
......
# Linux平台编译指南
## 说明
本文档在 `Linux`平台使用`GCC 4.8.5``GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html#id15)
## 前置条件
* G++ 4.8.2 ~ 4.9.4
* CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示**
### Step1: 下载代码
`git clone https://github.com/PaddlePaddle/PaddleDetection.git`
**说明**:其中`C++`预测代码在`/root/projects/PaddleDetection/deploy/cpp` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html)
下载并解压后`/root/projects/fluid_inference`目录包含内容为:
```
fluid_inference
├── paddle # paddle核心库和头文件
|
├── third_party # 第三方依赖库和头文件
|
└── version.txt # 版本和编译信息
```
**注意:** 预编译版本除`nv-jetson-cuda10-cudnn7.5-trt5` 以外其它包都是基于`GCC 4.8.5`编译,使用高版本`GCC`可能存在 `ABI`兼容性问题,建议降级或[自行编译预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)
### Step4: 编译
编译`cmake`的命令在`scripts/build.sh`中,请根据实际情况修改主要参数,其主要内容说明如下:
```
# 是否使用GPU(即是否使用 CUDA)
WITH_GPU=ON
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=OFF
# 上一步下载的 Paddle 预测库路径
PADDLE_DIR=/root/projects/deps/fluid_inference/
# OPENCV 路径, 如果使用自带预编译版本可不设置
OPENCV_DIR=$(pwd)/deps/opencv346/
# CUDA 的 lib 路径
CUDA_LIB=/usr/local/cuda/lib64/
# CUDNN 的 lib 路径
CUDNN_LIB=/usr/local/cuda/lib64/
# 以下无需改动
sh $(pwd)/scripts/bootstrap.sh
rm -rf build
mkdir -p build
cd build
cmake .. \
-DWITH_GPU=${WITH_GPU} \
-DWITH_TENSORRT=${WITH_TENSORRT} \
-DPADDLE_DIR=${PADDLE_DIR} \
-DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR}
make
```
修改脚本设置好主要参数后,执行`build`脚本:
```shell
sh ./scripts/build.sh
```
### Step5: 预测及可视化
编译成功后,预测入口程序为`build/main`其主要命令参数说明如下:
| 参数 | 说明 |
| ---- | ---- |
| model_dir | 导出的预测模型所在路径 |
| image_path | 要预测的图片文件路径 |
| video_path | 要预测的视频文件路径 |
| use_gpu | 是否使用 GPU 预测, 支持值为0或1(默认值为0)|
**注意**:如果同时设置了`video_path``image_path`,程序仅预测`video_path`
`样例一`
```shell
#不使用`GPU`测试图片 `/root/projects/images/test.jpeg`
./build/main --model_dir=/root/projects/models/yolov3_darknet --image_path=/root/projects/images/test.jpeg
```
图片文件`可视化预测结果`会保存在当前目录下`result.jpeg`文件中。
`样例二`:
```shell
#使用 `GPU`预测视频`/root/projects/videos/test.avi`
./build/main --model_dir=/root/projects/models/yolov3_darknet --video_path=/root/projects/images/test.avi --use_gpu=1
```
视频文件`可视化预测结果`会保存在当前目录下`result.avi`文件中。
# Visual Studio 2019 Community CMake 编译指南
Windows 平台下,我们使用`Visual Studio 2019 Community` 进行了测试。微软从`Visual Studio 2017`开始即支持直接管理`CMake`跨平台编译项目,但是直到`2019`才提供了稳定和完全的支持,所以如果你想使用CMake管理项目编译构建,我们推荐你使用`Visual Studio 2019`环境下构建。
## 前置条件
* Visual Studio 2019
* CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+
请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。
**下面所有示例以工作目录为 `D:\projects`演示**
### Step1: 下载代码
下载源代码
```shell
git clone https://github.com/PaddlePaddle/PaddleDetection.git
```
**说明**:其中`C++`预测代码在`PaddleDetection/deploy/cpp` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库针对不同的`CPU``CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/windows_cpp_inference.html)
解压后`D:\projects\fluid_inference`目录包含内容为:
```
fluid_inference
├── paddle # paddle核心库和头文件
|
├── third_party # 第三方依赖库和头文件
|
└── version.txt # 版本和编译信息
```
### Step3: 安装配置OpenCV
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download)
2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv`
3. 配置环境变量,如下流程所示
- 我的电脑->属性->高级系统设置->环境变量
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
### Step4: 使用Visual Studio 2019直接编译CMake
1. 打开Visual Studio 2019 Community,点击`继续但无需代码`
![step2](https://paddleseg.bj.bcebos.com/inference/vs2019_step1.png)
2. 点击: `文件`->`打开`->`CMake`
![step2.1](https://paddleseg.bj.bcebos.com/inference/vs2019_step2.png)
选择项目代码所在路径,并打开`CMakeList.txt`
![step2.2](https://paddleseg.bj.bcebos.com/inference/vs2019_step3.png)
3. 点击:`项目`->`cpp_inference_demo的CMake设置`
![step3](https://paddleseg.bj.bcebos.com/inference/vs2019_step4.png)
4. 点击`浏览`,分别设置编译选项指定`CUDA``OpenCV``Paddle预测库`的路径
三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
| 参数名 | 含义 |
| ---- | ---- |
| *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 |
**注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉
![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png)
**设置完成后**, 点击上图中`保存并生成CMake缓存以加载变量`
5. 点击`生成`->`全部生成`
![step6](https://paddleseg.bj.bcebos.com/inference/vs2019_step6.png)
### Step5: 预测及可视化
上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
```
cd D:\projects\PaddleDetection\inference\out\build\x64-Release
```
可执行文件`main`即为样例的预测程序,其主要的命令行参数如下:
| 参数 | 说明 |
| ---- | ---- |
| model_dir | 导出的预测模型所在路径 |
| image_path | 要预测的图片文件路径 |
| video_path | 要预测的视频文件路径 |
| use_gpu | 是否使用 GPU 预测, 支持值为0或1(默认值为0)|
**注意**:如果同时设置了`video_path``image_path`,程序仅预测`video_path`
`样例一`
```shell
#不使用`GPU`测试图片 `D:\\images\\test.jpeg`
.\main --model_dir=D:\\models\\yolov3_darknet --image_path=D:\\images\\test.jpeg
```
图片文件`可视化预测结果`会保存在当前目录下`result.jpeg`文件中。
`样例二`:
```shell
#使用`GPU`测试视频 `D:\\videos\\test.avi`
.\main --model_dir=D:\\models\\yolov3_darknet --video_path=D:\\videos\\test.jpeg --use_gpu=1
```
视频文件`可视化预测结果`会保存在当前目录下`result.avi`文件中。
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <vector>
#include <string>
#include <map>
#include "yaml-cpp/yaml.h"
#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
namespace PaddleDetection {
// Inference model configuration parser
class ConfigPaser {
public:
ConfigPaser() {}
~ConfigPaser() {}
bool load_config(const std::string& model_dir,
const std::string& cfg = "infer_cfg.yml") {
// Load as a YAML::Node
YAML::Node config;
config = YAML::LoadFile(model_dir + OS_PATH_SEP + cfg);
// Get runtime mode : fluid, trt_int8, trt_fp16, trt_fp32
if (config["mode"].IsDefined()) {
mode_ = config["mode"].as<std::string>();
} else {
std::cerr << "Please set mode, "
<< "support value : fluid/trt_int8/trt_fp16/trt_fp32."
<< std::endl;
return false;
}
// Get model arch : YOLO, SSD, RetinaNet, RCNN, Face
if (config["arch"].IsDefined()) {
arch_ = config["arch"].as<std::string>();
} else {
std::cerr << "Please set model arch,"
<< "support value : YOLO, SSD, RetinaNet, RCNN, Face."
<< std::endl;
return false;
}
// Get min_subgraph_size for tensorrt
if (config["min_subgraph_size"].IsDefined()) {
min_subgraph_size_ = config["min_subgraph_size"].as<int>();
} else {
std::cerr << "Please set min_subgraph_size." << std::endl;
return false;
}
// Get draw_threshold for visualization
if (config["draw_threshold"].IsDefined()) {
draw_threshold_ = config["draw_threshold"].as<float>();
} else {
std::cerr << "Please set draw_threshold." << std::endl;
return false;
}
// Get with_background
if (config["with_background"].IsDefined()) {
with_background_ = config["with_background"].as<bool>();
} else {
std::cerr << "Please set with_background." << std::endl;
return false;
}
// Get Preprocess for preprocessing
if (config["Preprocess"].IsDefined()) {
preprocess_info_ = config["Preprocess"];
} else {
std::cerr << "Please set Preprocess." << std::endl;
return false;
}
// Get label_list for visualization
if (config["label_list"].IsDefined()) {
label_list_ = config["label_list"].as<std::vector<std::string>>();
} else {
std::cerr << "Please set label_list." << std::endl;
return false;
}
return true;
}
std::string mode_;
float draw_threshold_;
std::string arch_;
int min_subgraph_size_;
bool with_background_;
YAML::Node preprocess_info_;
std::vector<std::string> label_list_;
};
} // namespace PaddleDetection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include <memory>
#include <utility>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "paddle_inference_api.h" // NOLINT
#include "include/preprocess_op.h"
#include "include/config_parser.h"
namespace PaddleDetection {
// Object Detection Result
struct ObjectResult {
// Rectangle coordinates of detected object: left, right, top, down
std::vector<int> rect;
// Class id of detected object
int class_id;
// Confidence of detected object
float confidence;
};
// Generate visualization colormap for each class
std::vector<int> GenerateColorMap(int num_class);
// Visualiztion Detection Result
cv::Mat VisualizeResult(const cv::Mat& img,
const std::vector<ObjectResult>& results,
const std::vector<std::string>& lable_list,
const std::vector<int>& colormap);
class ObjectDetector {
public:
explicit ObjectDetector(const std::string& model_dir, bool use_gpu = false) {
config_.load_config(model_dir);
threshold_ = config_.draw_threshold_;
preprocessor_.Init(config_.preprocess_info_, config_.arch_);
LoadModel(model_dir, use_gpu);
}
// Load Paddle inference model
void LoadModel(
const std::string& model_dir,
bool use_gpu);
// Run predictor
void Predict(
const cv::Mat& img,
std::vector<ObjectResult>* result);
// Get Model Label list
const std::vector<std::string>& GetLabelList() const {
return config_.label_list_;
}
private:
// Preprocess image and copy data to input buffer
void Preprocess(const cv::Mat& image_mat);
// Postprocess result
void Postprocess(
const cv::Mat& raw_mat,
std::vector<ObjectResult>* result);
std::unique_ptr<paddle::PaddlePredictor> predictor_;
Preprocessor preprocessor_;
ImageBlob inputs_;
std::vector<float> output_data_;
float threshold_;
ConfigPaser config_;
};
} // namespace PaddleDetection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <yaml-cpp/yaml.h>
#include <vector>
#include <string>
#include <utility>
#include <memory>
#include <unordered_map>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
namespace PaddleDetection {
// Object for storing all preprocessed data
class ImageBlob {
public:
// Original image width and height
std::vector<int> ori_im_size_;
// Buffer for image data after preprocessing
std::vector<float> im_data_;
// Original image width, height, shrink in float format
std::vector<float> ori_im_size_f_;
// Evaluation image width and height
std::vector<float> eval_im_size_f_;
};
// Abstraction of preprocessing opration class
class PreprocessOp {
public:
virtual void Init(const YAML::Node& item, const std::string& arch) = 0;
virtual void Run(cv::Mat* im, ImageBlob* data) = 0;
};
class Normalize : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item, const std::string& arch) {
mean_ = item["mean"].as<std::vector<float>>();
scale_ = item["std"].as<std::vector<float>>();
is_channel_first_ = item["is_channel_first"].as<bool>();
is_scale_ = item["is_scale"].as<bool>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
// CHW or HWC
bool is_channel_first_;
bool is_scale_;
std::vector<float> mean_;
std::vector<float> scale_;
};
class Permute : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item, const std::string& arch) {
to_bgr_ = item["to_bgr"].as<bool>();
is_channel_first_ = item["channel_first"].as<bool>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
// RGB to BGR
bool to_bgr_;
// CHW or HWC
bool is_channel_first_;
};
class Resize : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item, const std::string& arch) {
arch_ = arch;
interp_ = item["interp"].as<int>();
max_size_ = item["max_size"].as<int>();
target_size_ = item["target_size"].as<int>();
image_shape_ = item["image_shape"].as<std::vector<int>>();
}
// Compute best resize scale for x-dimension, y-dimension
std::pair<float, float> GenerateScale(const cv::Mat& im);
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
std::string arch_;
int interp_;
int max_size_;
int target_size_;
std::vector<int> image_shape_;
};
// Models with FPN need input shape % stride == 0
class PadStride : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item, const std::string& arch) {
stride_ = item["stride"].as<int>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int stride_;
};
class Preprocessor {
public:
void Init(const YAML::Node& config_node, const std::string& arch) {
arch_ = arch;
for (const auto& item : config_node) {
auto op_name = item["type"].as<std::string>();
ops_[op_name] = CreateOp(op_name);
ops_[op_name]->Init(item, arch);
}
}
std::shared_ptr<PreprocessOp> CreateOp(const std::string& name) {
if (name == "Resize") {
return std::make_shared<Resize>();
} else if (name == "Permute") {
return std::make_shared<Permute>();
} else if (name == "Normalize") {
return std::make_shared<Normalize>();
} else if (name == "PadStride") {
return std::make_shared<PadStride>();
}
return nullptr;
}
void Run(cv::Mat* im, ImageBlob* data);
public:
static const std::vector<std::string> RUN_ORDER;
private:
std::string arch_;
std::unordered_map<std::string, std::shared_ptr<PreprocessOp>> ops_;
};
} // namespace PaddleDetection
# download pre-compiled opencv lib
OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/deps/opencv346.tar.bz2
if [ ! -d "./deps/opencv346" ]; then
mkdir -p deps
cd deps
wget -c ${OPENCV_URL}
tar xvfj opencv346.tar.bz2
rm -rf opencv346.tar.bz2
cd ..
fi
# compile with cuda
WITH_GPU=ON
# compile with tensorrt
WITH_TENSORRT=OFF
# path to paddle inference lib
PADDLE_DIR=/root/projects/deps/fluid_inference/
# path to opencv lib
OPENCV_DIR=$(pwd)/deps/opencv346/
# path to cuda lib
CUDA_LIB=/usr/local/cuda/lib64/
sh $(pwd)/scripts/bootstrap.sh
rm -rf build
mkdir -p build
cd build
cmake .. \
-DWITH_GPU=OFF \
-DWITH_TENSORRT=OFF \
-DPADDLE_DIR=${PADDLE_DIR} \
-DCUDA_LIB=${CUDA_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} \
-DWITH_STATIC_LIB=OFF
make
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <iostream>
#include <string>
#include <vector>
#include "include/object_detector.h"
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_string(image_path, "", "Path of input image");
DEFINE_string(video_path, "", "Path of input video");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
void PredictVideo(const std::string& video_path,
PaddleDetection::ObjectDetector* det) {
// Open video
cv::VideoCapture capture;
capture.open(video_path.c_str());
if (!capture.isOpened()) {
printf("can not open video : %s\n", video_path.c_str());
return;
}
// Get Video info : resolution, fps
int video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
int video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
// Create VideoWriter for output
cv::VideoWriter video_out;
std::string video_out_path = "output.avi";
video_out.open(video_out_path.c_str(),
CV_FOURCC('M', 'J', 'P', 'G'),
video_fps,
cv::Size(video_width, video_height),
true);
if (!video_out.isOpened()) {
printf("create video writer failed!\n");
return;
}
std::vector<PaddleDetection::ObjectResult> result;
auto labels = det->GetLabelList();
auto colormap = PaddleDetection::GenerateColorMap(labels.size());
// Capture all frames and do inference
cv::Mat frame;
while (capture.read(frame)) {
if (frame.empty()) {
break;
}
det->Predict(frame, &result);
cv::Mat out_im = PaddleDetection::VisualizeResult(
frame, result, labels, colormap);
video_out.write(out_im);
}
capture.release();
video_out.release();
}
void PredictImage(const std::string& image_path,
PaddleDetection::ObjectDetector* det) {
// Open input image as an opencv cv::Mat object
cv::Mat im = cv::imread(image_path, 1);
// Store all detected result
std::vector<PaddleDetection::ObjectResult> result;
det->Predict(im, &result);
for (const auto& item : result) {
printf("class=%d confidence=%.2f rect=[%d %d %d %d]\n",
item.class_id,
item.confidence,
item.rect[0],
item.rect[1],
item.rect[2],
item.rect[3]);
}
// Visualization result
auto labels = det->GetLabelList();
auto colormap = PaddleDetection::GenerateColorMap(labels.size());
cv::Mat vis_img = PaddleDetection::VisualizeResult(
im, result, labels, colormap);
cv::imwrite("output.jpeg", vis_img);
printf("Visualized output saved as output.jpeg\n");
}
int main(int argc, char** argv) {
// Parsing command-line
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_dir.empty()
|| (FLAGS_image_path.empty() && FLAGS_video_path.empty())) {
std::cout << "Usage: ./main --model_dir=/PATH/TO/INFERENCE_MODEL/ "
<< "--image_path=/PATH/TO/INPUT/IMAGE/" << std::endl;
return -1;
}
// Load model and create a object detector
PaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_use_gpu);
// Do inference on input video or image
if (!FLAGS_video_path.empty()) {
PredictVideo(FLAGS_video_path, &det);
} else if (!FLAGS_image_path.empty()) {
PredictImage(FLAGS_image_path, &det);
}
return 0;
}
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
# include "include/object_detector.h"
namespace PaddleDetection {
// Load Model and create model predictor
void ObjectDetector::LoadModel(const std::string& model_dir, bool use_gpu) {
paddle::AnalysisConfig config;
std::string prog_file = model_dir + OS_PATH_SEP + "__model__";
std::string params_file = model_dir + OS_PATH_SEP + "__params__";
config.SetModel(prog_file, params_file);
if (use_gpu) {
config.EnableUseGpu(100, 0);
} else {
config.DisableGpu();
}
config.SwitchUseFeedFetchOps(false);
config.SwitchSpecifyInputNames(true);
// Memory optimization
config.EnableMemoryOptim();
predictor_ = std::move(CreatePaddlePredictor(config));
}
// Visualiztion MaskDetector results
cv::Mat VisualizeResult(const cv::Mat& img,
const std::vector<ObjectResult>& results,
const std::vector<std::string>& lable_list,
const std::vector<int>& colormap) {
cv::Mat vis_img = img.clone();
for (int i = 0; i < results.size(); ++i) {
int w = results[i].rect[1] - results[i].rect[0];
int h = results[i].rect[3] - results[i].rect[2];
cv::Rect roi = cv::Rect(results[i].rect[0], results[i].rect[2], w, h);
// Configure color and text size
std::string text = lable_list[results[i].class_id];
int c1 = colormap[3 * results[i].class_id + 0];
int c2 = colormap[3 * results[i].class_id + 1];
int c3 = colormap[3 * results[i].class_id + 2];
cv::Scalar roi_color = cv::Scalar(c1, c2, c3);
text += std::to_string(static_cast<int>(results[i].confidence * 100)) + "%";
int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL;
double font_scale = 0.5f;
float thickness = 0.5;
cv::Size text_size = cv::getTextSize(text,
font_face,
font_scale,
thickness,
nullptr);
float new_font_scale = roi.width * font_scale / text_size.width;
text_size = cv::getTextSize(text,
font_face,
new_font_scale,
thickness,
nullptr);
cv::Point origin;
origin.x = roi.x;
origin.y = roi.y;
// Configure text background
cv::Rect text_back = cv::Rect(results[i].rect[0],
results[i].rect[2] - text_size.height,
text_size.width,
text_size.height);
// Draw roi object, text, and background
cv::rectangle(vis_img, roi, roi_color, 2);
cv::rectangle(vis_img, text_back, roi_color, -1);
cv::putText(vis_img,
text,
origin,
font_face,
new_font_scale,
cv::Scalar(255, 255, 255),
thickness);
}
return vis_img;
}
void ObjectDetector::Preprocess(const cv::Mat& ori_im) {
// Clone the image : keep the original mat for postprocess
cv::Mat im = ori_im.clone();
cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
preprocessor_.Run(&im, &inputs_);
}
void ObjectDetector::Postprocess(
const cv::Mat& raw_mat,
std::vector<ObjectResult>* result) {
result->clear();
int rh = 1;
int rw = 1;
if (config_.arch_ == "SSD" || config_.arch_ == "Face") {
rh = raw_mat.rows;
rw = raw_mat.cols;
}
int total_size = output_data_.size() / 6;
for (int j = 0; j < total_size; ++j) {
// Class id
int class_id = static_cast<int>(round(output_data_[0 + j * 6]));
// Confidence score
float score = output_data_[1 + j * 6];
int xmin = (output_data_[2 + j * 6] * rw);
int ymin = (output_data_[3 + j * 6] * rh);
int xmax = (output_data_[4 + j * 6] * rw);
int ymax = (output_data_[5 + j * 6] * rh);
int wd = xmax - xmin;
int hd = ymax - ymin;
if (score > threshold_) {
ObjectResult result_item;
result_item.rect = {xmin, xmax, ymin, ymax};
result_item.class_id = class_id;
result_item.confidence = score;
result->push_back(result_item);
}
}
}
void ObjectDetector::Predict(const cv::Mat& im,
std::vector<ObjectResult>* result) {
// Preprocess image
Preprocess(im);
// Prepare input tensor
auto input_names = predictor_->GetInputNames();
for (const auto& tensor_name : input_names) {
auto in_tensor = predictor_->GetInputTensor(tensor_name);
if (tensor_name == "image") {
int rh = inputs_.eval_im_size_f_[0];
int rw = inputs_.eval_im_size_f_[1];
in_tensor->Reshape({1, 3, rh, rw});
in_tensor->copy_from_cpu(inputs_.im_data_.data());
} else if (tensor_name == "im_size") {
in_tensor->Reshape({1, 2});
in_tensor->copy_from_cpu(inputs_.ori_im_size_.data());
} else if (tensor_name == "im_info") {
in_tensor->Reshape({1, 3});
in_tensor->copy_from_cpu(inputs_.eval_im_size_f_.data());
} else if (tensor_name == "im_shape") {
in_tensor->Reshape({1, 3});
in_tensor->copy_from_cpu(inputs_.ori_im_size_f_.data());
}
}
// Run predictor
predictor_->ZeroCopyRun();
// Get output tensor
auto output_names = predictor_->GetOutputNames();
auto out_tensor = predictor_->GetOutputTensor(output_names[0]);
std::vector<int> output_shape = out_tensor->shape();
// Calculate output length
int output_size = 1;
for (int j = 0; j < output_shape.size(); ++j) {
output_size *= output_shape[j];
}
output_data_.resize(output_size);
out_tensor->copy_to_cpu(output_data_.data());
// Postprocessing result
Postprocess(im, result);
}
std::vector<int> GenerateColorMap(int num_class) {
auto colormap = std::vector<int>(3 * num_class, 0);
for (int i = 0; i < num_class; ++i) {
int j = 0;
int lab = i;
while (lab) {
colormap[i * 3] |= (((lab >> 0) & 1) << (7 - j));
colormap[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j));
colormap[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j));
++j;
lab >>= 3;
}
}
return colormap;
}
} // namespace PaddleDetection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include <string>
#include "include/preprocess_op.h"
namespace PaddleDetection {
void Normalize::Run(cv::Mat* im, ImageBlob* data) {
double e = 1.0;
if (is_scale_) {
e /= 255.0;
}
(*im).convertTo(*im, CV_32FC3, e);
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
(im->at<cv::Vec3f>(h, w)[0] - mean_[0] ) / scale_[0];
im->at<cv::Vec3f>(h, w)[1] =
(im->at<cv::Vec3f>(h, w)[1] - mean_[1] ) / scale_[1];
im->at<cv::Vec3f>(h, w)[2] =
(im->at<cv::Vec3f>(h, w)[2] - mean_[2] ) / scale_[2];
}
}
}
void Permute::Run(cv::Mat* im, ImageBlob* data) {
int rh = im->rows;
int rw = im->cols;
int rc = im->channels();
(data->im_data_).resize(rc * rh * rw);
float* base = (data->im_data_).data();
for (int i = 0; i < rc; ++i) {
cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);
}
}
void Resize::Run(cv::Mat* im, ImageBlob* data) {
data->ori_im_size_ = {
static_cast<int>(im->rows),
static_cast<int>(im->cols)
};
data->ori_im_size_f_ = {
static_cast<float>(im->rows),
static_cast<float>(im->cols),
1.0
};
auto resize_scale = GenerateScale(*im);
cv::resize(
*im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_);
if (max_size_ != 0 && !image_shape_.empty()) {
// Padding the image with 0 border
cv::copyMakeBorder(
*im,
*im,
0,
max_size_ - im->rows,
0,
max_size_ - im->cols,
cv::BORDER_CONSTANT,
cv::Scalar(0));
}
data->eval_im_size_f_ = {
static_cast<float>(im->rows),
static_cast<float>(im->cols),
resize_scale.first
};
}
std::pair<float, float> Resize::GenerateScale(const cv::Mat& im) {
std::pair<float, float> resize_scale;
int origin_w = im.cols;
int origin_h = im.rows;
if (max_size_ != 0 && (arch_ == "RCNN" || arch_ == "RetinaNet")) {
int im_size_max = std::max(origin_w, origin_h);
int im_size_min = std::min(origin_w, origin_h);
float scale_ratio =
static_cast<float>(target_size_) / static_cast<float>(im_size_min);
if (max_size_ > 0) {
if (round(scale_ratio * im_size_max) > max_size_) {
scale_ratio =
static_cast<float>(max_size_) / static_cast<float>(im_size_max);
}
}
resize_scale = {scale_ratio, scale_ratio};
} else {
resize_scale.first =
static_cast<float>(target_size_) / static_cast<float>(origin_w);
resize_scale.second =
static_cast<float>(target_size_) / static_cast<float>(origin_h);
}
return resize_scale;
}
void PadStride::Run(cv::Mat* im, ImageBlob* data) {
if (stride_ <= 0) {
return;
}
int rc = im->channels();
int rh = im->rows;
int rw = im->cols;
int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_;
int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_;
cv::copyMakeBorder(
*im,
*im,
0,
nh - rh,
0,
nw - rw,
cv::BORDER_CONSTANT,
cv::Scalar(0));
(data->eval_im_size_f_)[0] = static_cast<float>(im->rows);
(data->eval_im_size_f_)[1] = static_cast<float>(im->cols);
}
// Preprocessor op running order
const std::vector<std::string> Preprocessor::RUN_ORDER = {
"Resize", "Normalize", "PadStride", "Permute"
};
void Preprocessor::Run(cv::Mat* im, ImageBlob* data) {
for (const auto& name : RUN_ORDER) {
if (ops_.find(name) != ops_.end()) {
ops_[name]->Run(im, data);
}
}
}
} // namespace PaddleDetection
# PaddleDetection C++预测部署方案
## 本文档结构
[1.说明](#1说明)
[2.主要目录和文件](#2主要目录和文件)
[3.编译](#3编译)
[4.预测并可视化结果](#4预测并可视化结果)
## 1.说明
本目录提供一个跨平台的图像检测模型的C++预测部署方案,用户通过一定的配置,加上少量的代码,即可把模型集成到自己的服务中,完成相应的图像检测任务。
主要设计的目标包括以下四点:
- 跨平台,支持在 Windows 和 Linux 完成编译、开发和部署
- 可扩展性,支持用户针对新模型开发自己特殊的数据预处理等逻辑
- 高性能,除了`PaddlePaddle`自身带来的性能优势,我们还针对图像检测的特点对关键步骤进行了性能优化
- 支持多种常见的图像检测模型,如YOLOv3, Faster-RCNN, Faster-RCNN+FPN,用户通过少量配置即可加载模型完成常见检测任务
## 2.主要目录和文件
```bash
deploy
├── detection_demo.cpp # 完成图像检测预测任务C++代码
├── conf
│ ├── detection_rcnn.yaml #示例faster rcnn 目标检测配置
│ └── detection_rcnn_fpn.yaml #示例faster rcnn + fpn目标检测配置
├── images
│ └── detection_rcnn # 示例faster rcnn + fpn目标检测测试图片目录
├── tools
│ └── vis.py # 示例图像检测结果可视化脚本
├── docs
│ ├── linux_build.md # Linux 编译指南
│ ├── windows_vs2015_build.md # windows VS2015编译指南
│ └── windows_vs2019_build.md # Windows VS2019编译指南
├── utils # 一些基础公共函数
├── preprocess # 数据预处理相关代码
├── predictor # 模型加载和预测相关代码
├── CMakeList.txt # cmake编译入口文件
└── external-cmake # 依赖的外部项目cmake(目前仅有yaml-cpp)
```
## 3.编译
支持在`Windows``Linux`平台编译和使用:
- [Linux 编译指南](./docs/linux_build.md)
- [Windows 使用 Visual Studio 2019 Community 编译指南](./docs/windows_vs2019_build.md)
- [Windows 使用 Visual Studio 2015 编译指南](./docs/windows_vs2015_build.md)
`Windows`上推荐使用最新的`Visual Studio 2019 Community`直接编译`CMake`项目。
## 4.预测并可视化结果
完成编译后,便生成了需要的可执行文件和链接库。这里以我们基于`faster rcnn`检测模型为例,介绍部署图像检测模型的通用流程。
### 4.1. 下载模型文件
我们提供faster rcnn,faster rcnn+fpn模型用于预测coco17数据集,可在以下链接下载:[faster rcnn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50.zip)
[faster rcnn + fpn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50_fpn.zip)
下载并解压,解压后目录结构如下:
```
faster_rcnn_pp50/
├── __model__ # 模型文件
└── __params__ # 参数文件
```
解压后把上述目录拷贝到合适的路径:
**假设**`Windows`系统上,我们模型和参数文件所在路径为`D:\projects\models\faster_rcnn_pp50`
**假设**`Linux`上对应的路径则为`/root/projects/models/faster_rcnn_pp50/`
### 4.2. 修改配置
`inference`源代码(即本目录)的`conf`目录下提供了示例基于faster rcnn的配置文件`detection_rcnn.yaml`, 相关的字段含义和说明如下:
```yaml
DEPLOY:
# 是否使用GPU预测
USE_GPU: 1
# 模型和参数文件所在目录路径
MODEL_PATH: "/root/projects/models/faster_rcnn_pp50"
# 模型文件名
MODEL_FILENAME: "__model__"
# 参数文件名
PARAMS_FILENAME: "__params__"
# 预测图片的标准输入,尺寸不一致会resize
EVAL_CROP_SIZE: (608, 608)
# resize方式,支持 UNPADDING和RANGE_SCALING
RESIZE_TYPE: "RANGE_SCALING"
# 短边对齐的长度,仅在RANGE_SCALING下有效
TARGET_SHORT_SIZE : 800
# 均值
MEAN: [0.4647, 0.4647, 0.4647]
# 方差
STD: [0.0834, 0.0834, 0.0834]
# 图片类型, rgb或者rgba
IMAGE_TYPE: "rgb"
# 像素分类数
NUM_CLASSES: 1
# 通道数
CHANNELS : 3
# 预处理器, 目前提供图像检测的通用处理类DetectionPreProcessor
PRE_PROCESSOR: "DetectionPreProcessor"
# 预测模式,支持 NATIVE 和 ANALYSIS
PREDICTOR_MODE: "ANALYSIS"
# 每次预测的 batch_size
BATCH_SIZE : 3
# 长边伸缩的最大长度,-1代表无限制。
RESIZE_MAX_SIZE: 1333
# 输入的tensor数量。
FEEDS_SIZE: 3
```
修改字段`MODEL_PATH`的值为你在**上一步**下载并解压的模型文件所放置的目录即可。更多配置文件字段介绍,请参考文档[预测部署方案配置文件说明](./docs/configuration.md)
**注意**在使用CPU版本预测库时,`USE_GPU`的值必须设为0,否则无法正常预测。
### 4.3. 执行预测
在终端中切换到生成的可执行文件所在目录为当前目录(Windows系统为`cmd`)。
`Linux` 系统中执行以下命令:
```shell
./detection_demo --conf=conf/detection_rcnn.yaml --input_dir=images/detection_rcnn
```
`Windows` 中执行以下命令:
```shell
.\detection_demo.exe --conf=conf\detection_rcnn.yaml --input_dir=images\detection_rcnn\
```
预测使用的两个命令参数说明如下:
| 参数 | 含义 |
|-------|----------|
| conf | 模型配置的Yaml文件路径 |
| input_dir | 需要预测的图片目录 |
·
配置文件说明请参考上一步,样例程序会扫描input_dir目录下的所有图片,并为每一张图片生成对应的预测结果,输出到屏幕,并在`X`同一目录下保存到`X.pb文件`(X为对应图片的文件名)。可使用工具脚本vis.py将检测结果可视化。
**检测结果可视化**
运行可视化脚本时,只需输入命令行参数图片路径、检测结果pb文件路径、目标框阈值以及类别-标签映射文件路径即可得到可视化的图片`X.png` (tools目录下提供coco17的类别标签映射文件coco17.json)。
```bash
python vis.py --img_path=../build/images/detection_rcnn/000000087038.jpg --img_result_path=../build/images/detection_rcnn/000000087038.jpg.pb --threshold=0.1 --c2l_path=coco17.json
```
检测结果(每个图片的结果用空行隔开)
```原图:```
![](../../../inference/images/detection_rcnn/000000087038.jpg)
```检测结果图:```
![](../../images/000000087038_res.jpg)
../../../../deploy/cpp/README.md
\ No newline at end of file
# 预测部署方案配置文件说明
## 基本概念
预测部署方案的配置文件旨在给用户提供一个预测部署方案定制化接口。用户仅需理解该配置文件相关字段的含义,无需编写任何代码,即可定制化预测部署方案。为了更好地表达每个字段的含义,首先介绍配置文件中字段的类型。
### 字段类型
- **required**: 表明该字段必须显式定义,否则无法正常启动预测部署程序。
- **optional**: 表明该字段可忽略不写,预测部署系统会提供默认值,相关默认值将在下文介绍。
### 字段值类型
- **int**:表明该字段必须赋予整型类型的值。
- **string**:表明该字段必须赋予字符串类型的值。
- **list**:表明该字段必须赋予列表的值。
- **tuple**: 表明该字段必须赋予双元素元组的值。
## 字段介绍
```yaml
# 预测部署时所有配置字段需在DEPLOY字段下
DEPLOY:
# 类型:required int
# 含义:是否使用GPU预测。 0:不使用 1:使用
USE_GPU: 1
# 类型:required string
# 含义:模型和参数文件所在目录
MODEL_PATH: "/path/to/model_directory"
# 类型:required string
# 含义:模型文件名
MODEL_FILENAME: "__model__"
# 类型:required string
# 含义:参数文件名
PARAMS_FILENAME: "__params__"
# 类型:optional string
# 含义:图像resize的类型。支持 UNPADDING 和 RANGE_SCALING模式。默认是UNPADDING模式。
RESIZE_TYPE: "UNPADDING"
# 类型:required tuple
# 含义:当使用UNPADDING模式时,会将图像直接resize到该尺寸。
EVAL_CROP_SIZE: (513, 513)
# 类型:optional int
# 含义:当使用RANGE_SCALING模式时,图像短边需要对齐该字段的值,长边会同比例
# 的缩放,从而在保持图像长宽比例不变的情况下resize到新的尺寸。默认值为0。
TARGET_SHORT_SIZE: 800
# 类型:optional int
# 含义: 当使用RANGE_SCALING模式时,长边不能缩放到比该字段的值大。默认值为0。
RESIZE_MAX_SIZE: 1333
# 类型:required list
# 含义:图像进行归一化预处理时的均值
MEAN: [104.008, 116.669, 122.675]
# 类型:required list
# 含义:图像进行归一化预处理时的方差
STD: [1.0, 1.0, 1.0]
# 类型:string
# 含义:图片类型, rgb 或者 rgba
IMAGE_TYPE: "rgb"
# 类型:required int
# 含义:图像分类类型数
NUM_CLASSES: 2
# 类型:required int
# 含义:图片通道数
CHANNELS : 3
# 类型:required string
# 含义:预处理方式,目前提供图像检测的通用预处理类DetectionPreProcessor.
PRE_PROCESSOR: "DetectionPreProcessor"
# 类型:required string
# 含义:预测模式,支持 NATIVE 和 ANALYSIS
PREDICTOR_MODE: "ANALYSIS"
# 类型:required int
# 含义:每次预测的 batch_size
BATCH_SIZE : 3
# 类型:optional int
# 含义: 输入张量的个数。大部分模型不需要设置。 默认值为1.
FEEDS_SIZE: 2
# 类型: optional int
# 含义: 将图像的边变为该字段的值的整数倍。在使用fpn模型时需要设为32。默认值为1。
COARSEST_STRIDE: 32
```
# Linux平台 编译指南
## 说明
本文档在 `Linux`平台使用`GCC 4.8.5``GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html#id15)
## 前置条件
* G++ 4.8.2 ~ 4.9.4
* CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示**
### Step1: 下载代码
1. `git clone https://github.com/PaddlePaddle/PaddleDetection.git`
`C++`预测代码在`/root/projects/PaddleDetection/inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库主要分为CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。以下为各版本C++预测库的下载链接:
| 版本 | 链接 |
| ---- | ---- |
| CPU版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cpu_1.6.1.tgz) |
| CUDA 9.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda97_1.6.1.tgz) |
| CUDA 10.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda10_1.6.1.tgz) |
针对不同的CPU类型、不同的指令集,官方提供更多可用的预测库版本,目前已经推出1.6版本的预测库。其余版本具体请参考以下链接:[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html)
下载并解压后`/root/projects/fluid_inference`目录包含内容为:
```
fluid_inference
├── paddle # paddle核心库和头文件
|
├── third_party # 第三方依赖库和头文件
|
└── version.txt # 版本和编译信息
```
### Step3: 安装配置OpenCV
```shell
# 0. 切换到/root/projects目录
cd /root/projects
# 1. 下载OpenCV3.4.6版本源代码
wget -c https://paddleseg.bj.bcebos.com/inference/opencv-3.4.6.zip
# 2. 解压
unzip opencv-3.4.6.zip && cd opencv-3.4.6
# 3. 创建build目录并编译, 这里安装到/usr/local/opencv3目录
mkdir build && cd build
cmake .. -DCMAKE_INSTALL_PREFIX=/root/projects/opencv3 -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DWITH_IPP=OFF -DBUILD_IPP_IW=OFF -DWITH_LAPACK=OFF -DWITH_EIGEN=OFF -DCMAKE_INSTALL_LIBDIR=lib64 -DWITH_ZLIB=ON -DBUILD_ZLIB=ON -DWITH_JPEG=ON -DBUILD_JPEG=ON -DWITH_PNG=ON -DBUILD_PNG=ON -DWITH_TIFF=ON -DBUILD_TIFF=ON
make -j4
make install
```
**注意:** 上述操作完成后,`opencv` 被安装在 `/root/projects/opencv3` 目录。
### Step4: 编译
`CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下:(带*表示仅在使用**GPU版本**预测库时指定,其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1版本CUDA库**
| 参数名 | 含义 |
| ---- | ---- |
| * CUDA_LIB | CUDA的库路径 |
| * CUDNN_LIB | cudnn的库路径|
| OPENCV_DIR | OpenCV的安装路径 |
| PADDLE_DIR | Paddle预测库的路径 |
在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径:
```shell
cd /root/projects/PaddleDetection/inference
mkdir build && cd build
cmake .. -DWITH_GPU=ON -DPADDLE_DIR=/root/projects/fluid_inference -DCUDA_LIB=/usr/local/cuda/lib64/ -DOPENCV_DIR=/root/projects/opencv3/ -DCUDNN_LIB=/usr/local/cuda/lib64/ -DWITH_STATIC_LIB=OFF
make
```
在使用**CPU版本**预测库进行编译时,可执行下列操作:
```shell
cd /root/projects/PaddleDetection/inference
mkdir build && cd build
cmake .. -DWITH_GPU=OFF -DPADDLE_DIR=/root/projects/fluid_inference -DOPENCV_DIR=/root/projects/opencv3/ -DWITH_STATIC_LIB=OFF
make
```
### Step5: 预测及可视化
执行命令:
```
./detection_demo --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory
```
更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md)
../../../../deploy/cpp/docs/linux_build.md
\ No newline at end of file
# Visual Studio 2019 Community CMake 编译指南
Windows 平台下,我们使用`Visual Studio 2015``Visual Studio 2019 Community` 进行了测试。微软从`Visual Studio 2017`开始即支持直接管理`CMake`跨平台编译项目,但是直到`2019`才提供了稳定和完全的支持,所以如果你想使用CMake管理项目编译构建,我们推荐你使用`Visual Studio 2019`环境下构建。
你也可以使用和`VS2015`一样,通过把`CMake`项目转化成`VS`项目来编译,其中**有差别的部分**在文档中我们有说明,请参考:[使用Visual Studio 2015 编译指南](./windows_vs2015_build.md)
## 前置条件
* Visual Studio 2019
* CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+
请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。
**下面所有示例以工作目录为 `D:\projects`演示**
### Step1: 下载代码
1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleDetection/archive/master.zip)
2. 解压,解压后目录重命名为`PaddleDetection`
以下代码目录路径为`D:\projects\PaddleDetection` 为例。
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接:
| 版本 | 链接 |
| ---- | ---- |
| CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) |
| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) |
| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) |
解压后`D:\projects\fluid_inference`目录包含内容为:
```
fluid_inference
├── paddle # paddle核心库和头文件
|
├── third_party # 第三方依赖库和头文件
|
└── version.txt # 版本和编译信息
```
### Step3: 安装配置OpenCV
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download)
2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv`
3. 配置环境变量,如下流程所示
- 我的电脑->属性->高级系统设置->环境变量
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
### Step4: 使用Visual Studio 2019直接编译CMake
1. 打开Visual Studio 2019 Community,点击`继续但无需代码`
![step2](https://paddleseg.bj.bcebos.com/inference/vs2019_step1.png)
2. 点击: `文件`->`打开`->`CMake`
![step2.1](https://paddleseg.bj.bcebos.com/inference/vs2019_step2.png)
选择项目代码所在路径,并打开`CMakeList.txt`:
![step2.2](https://paddleseg.bj.bcebos.com/inference/vs2019_step3.png)
3. 点击:`项目`->`cpp_inference_demo的CMake设置`
![step3](https://paddleseg.bj.bcebos.com/inference/vs2019_step4.png)
4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径
三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
| 参数名 | 含义 |
| ---- | ---- |
| *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 |
**注意**在使用CPU版本预测库时,需要把CUDA_LIB的勾去掉。
![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png)
**设置完成后**, 点击上图中`保存并生成CMake缓存以加载变量`。
5. 点击`生成`->`全部生成`
![step6](https://paddleseg.bj.bcebos.com/inference/vs2019_step6.png)
### Step5: 预测及可视化
上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
```
cd D:\projects\PaddleDetection\inference\out\build\x64-Release
```
之后执行命令:
```
detection_demo.exe --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory
```
更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md)
../../../../deploy/cpp/docs/windows_vs2019_build.md
\ No newline at end of file
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
**文档教程请参考:** [PaddleDetection C++预测部署方案](../docs/advanced_tutorials/inference/DEPLOYMENT.md) <br/>
**English document please refer:** [PaddleDetection C++ deployment](../docs/advanced_tutorials/inference/DEPLOYMENT.md)
DEPLOY:
USE_GPU: 1
MODEL_PATH: "/root/projects/models/faster_rcnn_pp50"
MODEL_FILENAME: "__model__"
PARAMS_FILENAME: "__params__"
EVAL_CROP_SIZE: (608, 608)
RESIZE_TYPE: "RANGE_SCALING"
TARGET_SHORT_SIZE : 800
MEAN: [0.485, 0.456, 0.406]
STD: [0.229, 0.224, 0.225]
IMAGE_TYPE: "rgb"
NUM_CLASSES: 1
CHANNELS : 3
PRE_PROCESSOR: "DetectionPreProcessor"
PREDICTOR_MODE: "ANALYSIS"
BATCH_SIZE : 1
RESIZE_MAX_SIZE: 1333
FEEDS_SIZE: 3
DEPLOY:
USE_GPU: 1
MODEL_PATH: "/root/projects/models/faster_rcnn_pp50_fpn"
MODEL_FILENAME: "__model__"
PARAMS_FILENAME: "__params__"
EVAL_CROP_SIZE: (608, 608)
RESIZE_TYPE: "RANGE_SCALING"
TARGET_SHORT_SIZE : 800
MEAN: [0.485, 0.456, 0.406]
STD: [0.229, 0.224, 0.225]
IMAGE_TYPE: "rgb"
NUM_CLASSES: 1
CHANNELS : 3
PRE_PROCESSOR: "DetectionPreProcessor"
PREDICTOR_MODE: "ANALYSIS"
BATCH_SIZE : 1
RESIZE_MAX_SIZE: 1333
FEEDS_SIZE: 3
COARSEST_STRIDE: 32
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <utils/utils.h>
#include <predictor/detection_predictor.h>
DEFINE_string(conf, "", "Configuration File Path");
DEFINE_string(input_dir, "", "Directory of Input Images");
int main(int argc, char** argv) {
// 0. parse args
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) {
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model "
<< "--input_dir=/directory/of/your/input/images" << std::endl;
return -1;
}
// 1. create a predictor and init it with conf
PaddleSolution::DetectionPredictor predictor;
if (predictor.init(FLAGS_conf) != 0) {
#ifdef _WIN32
std::cerr << "Fail to init predictor" << std::endl;
#else
LOG(FATAL) << "Fail to init predictor";
#endif
return -1;
}
// 2. get all the images with extension '.jpeg' at input_dir
auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir,
".jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG");
// 3. predict
predictor.predict(imgs);
return 0;
}
此差异已折叠。
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <paddle_inference_api.h>
#include <memory>
#include <string>
#include <vector>
#include <thread>
#include <chrono>
#include <algorithm>
#include <opencv2/opencv.hpp>
#include "utils/conf_parser.h"
#include "utils/utils.h"
#include "preprocessor/preprocessor.h"
namespace PaddleSolution {
class DetectionPredictor {
public:
// init a predictor with a yaml config file
int init(const std::string& conf);
// predict api
int predict(const std::vector<std::string>& imgs);
private:
int native_predict(const std::vector<std::string>& imgs);
int analysis_predict(const std::vector<std::string>& imgs);
private:
std::vector<float> _buffer;
std::vector<std::string> _imgs_batch;
std::vector<paddle::PaddleTensor> _outputs;
PaddleSolution::PaddleModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
};
} // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include "preprocessor.h"
#include "preprocessor_detection.h"
#include <iostream>
namespace PaddleSolution {
std::shared_ptr<ImagePreProcessor> create_processor(const std::string& conf_file) {
auto config = std::make_shared<PaddleSolution::PaddleModelConfigPaser>();
if (!config->load_config(conf_file)) {
#ifdef _WIN32
std::cerr << "fail to load conf file [" << conf_file << "]" << std::endl;
#else
LOG(FATAL) << "fail to load conf file [" << conf_file << "]";
#endif
return nullptr;
}
if (config->_pre_processor == "DetectionPreProcessor") {
auto p = std::make_shared<DetectionPreProcessor>();
if (!p->init(config)) {
return nullptr;
}
return p;
}
#ifdef _WIN32
std::cerr << "unknown processor_name [" << config->_pre_processor << "],"
<< "please check whether PRE_PROCESSOR is set correctly" << std::endl;
#else
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "],"
<< "please check whether PRE_PROCESSOR is set correctly";
#endif
return nullptr;
}
} // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include <string>
#include <memory>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "utils/conf_parser.h"
namespace PaddleSolution {
class ImagePreProcessor {
protected:
ImagePreProcessor() {}
public:
virtual ~ImagePreProcessor() {}
virtual bool single_process(const std::string& fname,
float* data,
int* ori_w,
int* ori_h) {
return true;
}
virtual bool batch_process(const std::vector<std::string>& imgs,
float* data,
int* ori_w,
int* ori_h) {
return true;
}
virtual bool single_process(const std::string& fname, float* data) {
return true;
}
virtual bool batch_process(const std::vector<std::string>& imgs,
float* data) {
return true;
}
virtual bool single_process(const std::string& fname,
std::vector<float> &data,
int* ori_w, int* ori_h,
int* resize_w, int* resize_h,
float* scale_ratio) {
return true;
}
virtual bool batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio) {
return true;
}
}; // end of class ImagePreProcessor
std::shared_ptr<ImagePreProcessor>
create_processor(const std::string &config_file);
} // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <thread>
#include <mutex>
#include "preprocessor_detection.h"
#include "utils/utils.h"
namespace PaddleSolution {
bool DetectionPreProcessor::single_process(const std::string& fname,
std::vector<float> &vec_data,
int* ori_w, int* ori_h,
int* resize_w, int* resize_h,
float* scale_ratio) {
cv::Mat im1 = cv::imread(fname, -1);
cv::Mat im;
if (_config->_feeds_size == 3) { // faster rcnn
im1.convertTo(im, CV_32FC3, 1/255.0);
} else if (_config->_feeds_size == 2) { // yolo v3
im = im1;
}
if (im.data == nullptr || im.empty()) {
#ifdef _WIN32
std::cerr << "Failed to open image: " << fname << std::endl;
#else
LOG(ERROR) << "Failed to open image: " << fname;
#endif
return false;
}
int channels = im.channels();
if (channels == 1) {
cv::cvtColor(im, im, cv::COLOR_GRAY2BGR);
}
channels = im.channels();
if (channels != 3 && channels != 4) {
#ifdef _WIN32
std::cerr << "Only support rgb(gray) and rgba image." << std::endl;
#else
LOG(ERROR) << "Only support rgb(gray) and rgba image.";
#endif
return false;
}
*ori_w = im.cols;
*ori_h = im.rows;
cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
// channels = im.channels();
// resize
int rw = im.cols;
int rh = im.rows;
float im_scale_ratio;
utils::scaling(_config->_resize_type, rw, rh, _config->_resize[0],
_config->_resize[1], _config->_target_short_size,
_config->_resize_max_size, im_scale_ratio);
cv::Size resize_size(rw, rh);
*resize_w = rw;
*resize_h = rh;
*scale_ratio = im_scale_ratio;
if (*ori_h != rh || *ori_w != rw) {
cv::Mat im_temp;
if (_config->_resize_type == utils::SCALE_TYPE::UNPADDING) {
cv::resize(im, im_temp, resize_size, 0, 0, cv::INTER_LINEAR);
} else if (_config->_resize_type == utils::SCALE_TYPE::RANGE_SCALING) {
cv::resize(im, im_temp, cv::Size(), im_scale_ratio,
im_scale_ratio, cv::INTER_LINEAR);
}
im = im_temp;
}
vec_data.resize(channels * rw * rh);
float *data = vec_data.data();
float* pmean = _config->_mean.data();
float* pscale = _config->_std.data();
for (int h = 0; h < rh; ++h) {
const uchar* uptr = im.ptr<uchar>(h);
const float* fptr = im.ptr<float>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < channels; ++c) {
int top_index = (c * rh + h) * rw + w;
float pixel;
if (_config->_feeds_size == 2) { // yolo v3
pixel = static_cast<float>(uptr[im_index++]) / 255.0;
} else if (_config->_feeds_size == 3) {
pixel = fptr[im_index++];
}
pixel = (pixel - pmean[c]) / pscale[c];
data[top_index] = pixel;
}
}
}
return true;
}
bool DetectionPreProcessor::batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio) {
auto ic = _config->_channels;
auto iw = _config->_resize[0];
auto ih = _config->_resize[1];
std::vector<std::thread> threads;
for (int i = 0; i < imgs.size(); ++i) {
std::string path = imgs[i];
int* width = &ori_w[i];
int* height = &ori_h[i];
int* resize_width = &resize_w[i];
int* resize_height = &resize_h[i];
float* sr = &scale_ratio[i];
threads.emplace_back([this, &data, i, path, width, height,
resize_width, resize_height, sr] {
std::vector<float> buffer;
single_process(path, buffer, width, height, resize_width,
resize_height, sr);
data[i] = buffer;
});
}
for (auto& t : threads) {
if (t.joinable()) {
t.join();
}
}
return true;
}
bool DetectionPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config) {
_config = config;
return true;
}
} // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "preprocessor.h"
namespace PaddleSolution {
class DetectionPreProcessor : public ImagePreProcessor {
public:
DetectionPreProcessor() : _config(nullptr) {
}
bool init(std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> config);
bool single_process(const std::string& fname, std::vector<float> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio);
bool batch_process(const std::vector<std::string>& imgs,
std::vector<std::vector<float>> &data,
int* ori_w, int* ori_h, int* resize_w,
int* resize_h, float* scale_ratio);
private:
std::shared_ptr<PaddleSolution::PaddleModelConfigPaser> _config;
};
} // namespace PaddleSolution
{
"0" : "background",
"1" : "person",
"2" : "bicycle",
"3" : "car",
"4" : "motorcycle",
"5" : "airplane",
"6" : "bus",
"7" : "train",
"8" : "truck",
"9" : "boat",
"10" : "traffic light",
"11" : "fire hydrant",
"12" : "stop sign",
"13" : "parking meter",
"14" : "bench",
"15" : "bird",
"16" : "cat",
"17" : "dog",
"18" : "horse",
"19" : "sheep",
"20" : "cow",
"21" : "elephant",
"22" : "bear",
"23" : "zebra",
"24" : "giraffe",
"25" : "backpack",
"26" : "umbrella",
"27" : "handbag",
"28" : "tie",
"29" : "suitcase",
"30" : "frisbee",
"31" : "skis",
"32" : "snowboard",
"33" : "sports ball",
"34" : "kite",
"35" : "baseball bat",
"36" : "baseball glove",
"37" : "skateboard",
"38" : "surfboard",
"39" : "tennis racket",
"40" : "bottle",
"41" : "wine glass",
"42" : "cup",
"43" : "fork",
"44" : "knife",
"45" : "spoon",
"46" : "bowl",
"47" : "banana",
"48" : "apple",
"49" : "sandwich",
"50" : "orange",
"51" : "broccoli",
"52" : "carrot",
"53" : "hot dog",
"54" : "pizza",
"55" : "donut",
"56" : "cake",
"57" : "chair",
"58" : "couch",
"59" : "potted plant",
"60" : "bed",
"61" : "dining table",
"62" : "toilet",
"63" : "tv",
"64" : "laptop",
"65" : "mouse",
"66" : "remote",
"67" : "keyboard",
"68" : "cell phone",
"69" : "microwave",
"70" : "oven",
"71" : "toaster",
"72" : "sink",
"73" : "refrigerator",
"74" : "book",
"75" : "clock",
"76" : "vase",
"77" : "scissors",
"78" : "teddy bear",
"79" : "hair drier",
"80" : "toothbrush"
}
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: detection_result.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='detection_result.proto',
package='PaddleSolution',
syntax='proto2',
serialized_pb=_b(
'\n\x16\x64\x65tection_result.proto\x12\x0ePaddleSolution\"\x84\x01\n\x0c\x44\x65tectionBox\x12\r\n\x05\x63lass\x18\x01 \x01(\x05\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x12\n\nleft_top_x\x18\x03 \x01(\x02\x12\x12\n\nleft_top_y\x18\x04 \x01(\x02\x12\x16\n\x0eright_bottom_x\x18\x05 \x01(\x02\x12\x16\n\x0eright_bottom_y\x18\x06 \x01(\x02\"Z\n\x0f\x44\x65tectionResult\x12\x10\n\x08\x66ilename\x18\x01 \x01(\t\x12\x35\n\x0f\x64\x65tection_boxes\x18\x02 \x03(\x0b\x32\x1c.PaddleSolution.DetectionBox'
))
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DETECTIONBOX = _descriptor.Descriptor(
name='DetectionBox',
full_name='PaddleSolution.DetectionBox',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class',
full_name='PaddleSolution.DetectionBox.class',
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score',
full_name='PaddleSolution.DetectionBox.score',
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='left_top_x',
full_name='PaddleSolution.DetectionBox.left_top_x',
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='left_top_y',
full_name='PaddleSolution.DetectionBox.left_top_y',
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='right_bottom_x',
full_name='PaddleSolution.DetectionBox.right_bottom_x',
index=4,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='right_bottom_y',
full_name='PaddleSolution.DetectionBox.right_bottom_y',
index=5,
number=6,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[],
serialized_start=43,
serialized_end=175)
_DETECTIONRESULT = _descriptor.Descriptor(
name='DetectionResult',
full_name='PaddleSolution.DetectionResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='filename',
full_name='PaddleSolution.DetectionResult.filename',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='detection_boxes',
full_name='PaddleSolution.DetectionResult.detection_boxes',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[],
serialized_start=177,
serialized_end=267)
_DETECTIONRESULT.fields_by_name['detection_boxes'].message_type = _DETECTIONBOX
DESCRIPTOR.message_types_by_name['DetectionBox'] = _DETECTIONBOX
DESCRIPTOR.message_types_by_name['DetectionResult'] = _DETECTIONRESULT
DetectionBox = _reflection.GeneratedProtocolMessageType(
'DetectionBox',
(_message.Message, ),
dict(
DESCRIPTOR=_DETECTIONBOX,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox)
))
_sym_db.RegisterMessage(DetectionBox)
DetectionResult = _reflection.GeneratedProtocolMessageType(
'DetectionResult',
(_message.Message, ),
dict(
DESCRIPTOR=_DETECTIONRESULT,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult)
))
_sym_db.RegisterMessage(DetectionResult)
# @@protoc_insertion_point(module_scope)
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import detection_result_pb2
import cv2
import sys
import gflags
import numpy as np
import json
from PIL import Image, ImageDraw, ImageFont
import io
Flags = gflags.FLAGS
gflags.DEFINE_string('img_path', 'abc', 'image path')
gflags.DEFINE_string('img_result_path', 'def', 'image result path')
gflags.DEFINE_float('threshold', 0.0, 'threshold of score')
gflags.DEFINE_string('c2l_path', 'ghk', 'class to label path')
def colormap(rgb=False):
"""
Get colormap
"""
color_list = np.array([
0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,
0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,
0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,
0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,
0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,
0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,
0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
if __name__ == "__main__":
if len(sys.argv) != 5:
print(
"Usage: python vis.py --img_path=/path/to/image --img_result_path=/path/to/image_result.pb --threshold=0.1 --c2l_path=/path/to/class2label.json"
)
else:
Flags(sys.argv)
color_list = colormap(rgb=True)
text_thickness = 1
text_scale = 0.3
with open(Flags.img_result_path, "rb") as f:
detection_result = detection_result_pb2.DetectionResult()
detection_result.ParseFromString(f.read())
img = cv2.imread(Flags.img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
class2LabelMap = dict()
with io.open(Flags.c2l_path, "r", encoding="utf-8") as json_f:
class2LabelMap = json.load(json_f)
for box in detection_result.detection_boxes:
if box.score >= Flags.threshold:
box_class = getattr(box, 'class')
text_class_score_str = "%s %.2f" % (
class2LabelMap.get(str(box_class)), box.score)
text_point = (int(box.left_top_x), int(box.left_top_y))
ptLeftTop = (int(box.left_top_x), int(box.left_top_y))
ptRightBottom = (int(box.right_bottom_x),
int(box.right_bottom_y))
box_thickness = 1
color = tuple([int(c) for c in color_list[box_class]])
cv2.rectangle(img, ptLeftTop, ptRightBottom, color,
box_thickness, 8)
if text_point[1] < 0:
text_point = (int(box.left_top_x),
int(box.right_bottom_y))
WHITE = (255, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
text_size = cv2.getTextSize(text_class_score_str, font,
text_scale, text_thickness)
text_box_left_top = (text_point[0],
text_point[1] - text_size[0][1])
text_box_right_bottom = (
text_point[0] + text_size[0][0], text_point[1])
cv2.rectangle(img, text_box_left_top,
text_box_right_bottom, color, -1, 8)
cv2.putText(img, text_class_score_str, text_point, font,
text_scale, WHITE, text_thickness)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(Flags.img_path + ".png", img)
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <yaml-cpp/yaml.h>
#include <iostream>
#include <vector>
#include <string>
#include <map>
namespace PaddleSolution {
class PaddleModelConfigPaser {
std::map<std::string, int> _scaling_map;
public:
PaddleModelConfigPaser()
:_class_num(0),
_channels(0),
_use_gpu(0),
_batch_size(1),
_target_short_size(0),
_model_file_name("__model__"),
_param_file_name("__params__"),
_scaling_map{{"UNPADDING", 0},
{"RANGE_SCALING", 1}},
_feeds_size(1),
_coarsest_stride(1) {}
~PaddleModelConfigPaser() {}
void reset() {
_crop_size.clear();
_resize.clear();
_mean.clear();
_std.clear();
_img_type.clear();
_class_num = 0;
_channels = 0;
_use_gpu = 0;
_target_short_size = 0;
_batch_size = 1;
_model_file_name = "__model__";
_model_path = "./";
_param_file_name = "__params__";
_resize_type = 0;
_resize_max_size = 0;
_feeds_size = 1;
_coarsest_stride = 1;
}
std::string process_parenthesis(const std::string& str) {
if (str.size() < 2) {
return str;
}
std::string nstr(str);
if (str[0] == '(' && str.back() == ')') {
nstr[0] = '[';
nstr[str.size() - 1] = ']';
}
return nstr;
}
template <typename T>
std::vector<T> parse_str_to_vec(const std::string& str) {
std::vector<T> data;
auto node = YAML::Load(str);
for (const auto& item : node) {
data.push_back(item.as<T>());
}
return data;
}
bool load_config(const std::string& conf_file) {
reset();
YAML::Node config;
try {
config = YAML::LoadFile(conf_file);
} catch(...) {
return false;
}
// 1. get resize
if (config["DEPLOY"]["EVAL_CROP_SIZE"].IsDefined()) {
auto str = config["DEPLOY"]["EVAL_CROP_SIZE"].as<std::string>();
_resize = parse_str_to_vec<int>(process_parenthesis(str));
} else {
std::cerr << "Please set EVAL_CROP_SIZE: (xx, xx)" << std::endl;
return false;
}
// 0. get crop_size
if (config["DEPLOY"]["CROP_SIZE"].IsDefined()) {
auto crop_str = config["DEPLOY"]["CROP_SIZE"].as<std::string>();
_crop_size = parse_str_to_vec<int>(process_parenthesis(crop_str));
} else {
_crop_size = _resize;
}
// 2. get mean
if (config["DEPLOY"]["MEAN"].IsDefined()) {
for (const auto& item : config["DEPLOY"]["MEAN"]) {
_mean.push_back(item.as<float>());
}
} else {
std::cerr << "Please set MEAN: [xx, xx, xx]" << std::endl;
return false;
}
// 3. get std
if(config["DEPLOY"]["STD"].IsDefined()) {
for (const auto& item : config["DEPLOY"]["STD"]) {
_std.push_back(item.as<float>());
}
} else {
std::cerr << "Please set STD: [xx, xx, xx]" << std::endl;
return false;
}
// 4. get image type
if (config["DEPLOY"]["IMAGE_TYPE"].IsDefined()) {
_img_type = config["DEPLOY"]["IMAGE_TYPE"].as<std::string>();
} else {
std::cerr << "Please set IMAGE_TYPE: \"rgb\" or \"rgba\"" << std::endl;
return false;
}
// 5. get class number
if (config["DEPLOY"]["NUM_CLASSES"].IsDefined()) {
_class_num = config["DEPLOY"]["NUM_CLASSES"].as<int>();
} else {
std::cerr << "Please set NUM_CLASSES: x" << std::endl;
return false;
}
// 7. set model path
if (config["DEPLOY"]["MODEL_PATH"].IsDefined()) {
_model_path = config["DEPLOY"]["MODEL_PATH"].as<std::string>();
} else {
std::cerr << "Please set MODEL_PATH: \"/path/to/model_dir\"" << std::endl;
return false;
}
// 8. get model file_name
if (config["DEPLOY"]["MODEL_FILENAME"].IsDefined()) {
_model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as<std::string>();
} else {
_model_file_name = "__model__";
}
// 9. get model param file name
if (config["DEPLOY"]["PARAMS_FILENAME"].IsDefined()) {
_param_file_name
= config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>();
} else {
_param_file_name = "__params__";
}
// 10. get pre_processor
if (config["DEPLOY"]["PRE_PROCESSOR"].IsDefined()) {
_pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as<std::string>();
} else {
std::cerr << "Please set PRE_PROCESSOR: \"DetectionPreProcessor\"" << std::endl;
return false;
}
// 11. use_gpu
if (config["DEPLOY"]["USE_GPU"].IsDefined()) {
_use_gpu = config["DEPLOY"]["USE_GPU"].as<int>();
} else {
_use_gpu = 0;
}
// 12. predictor_mode
if (config["DEPLOY"]["PREDICTOR_MODE"].IsDefined()) {
_predictor_mode = config["DEPLOY"]["PREDICTOR_MODE"].as<std::string>();
} else {
std::cerr << "Please set PREDICTOR_MODE: \"NATIVE\" or \"ANALYSIS\"" << std::endl;
return false;
}
// 13. batch_size
if (config["DEPLOY"]["BATCH_SIZE"].IsDefined()) {
_batch_size = config["DEPLOY"]["BATCH_SIZE"].as<int>();
} else {
_batch_size = 1;
}
// 14. channels
if (config["DEPLOY"]["CHANNELS"].IsDefined()) {
_channels = config["DEPLOY"]["CHANNELS"].as<int>();
} else {
std::cerr << "Please set CHANNELS: x" << std::endl;
return false;
}
// 15. target_short_size
if (config["DEPLOY"]["TARGET_SHORT_SIZE"].IsDefined()) {
_target_short_size = config["DEPLOY"]["TARGET_SHORT_SIZE"].as<int>();
}
// 16.resize_type
if (config["DEPLOY"]["RESIZE_TYPE"].IsDefined() &&
_scaling_map.find(config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()) != _scaling_map.end()) {
_resize_type = _scaling_map[config["DEPLOY"]["RESIZE_TYPE"].as<std::string>()];
} else {
_resize_type = 0;
}
// 17.resize_max_size
if (config["DEPLOY"]["RESIZE_MAX_SIZE"].IsDefined()) {
_resize_max_size = config["DEPLOY"]["RESIZE_MAX_SIZE"].as<int>();
}
// 18.feeds_size
if (config["DEPLOY"]["FEEDS_SIZE"].IsDefined()) {
_feeds_size = config["DEPLOY"]["FEEDS_SIZE"].as<int>();
}
// 19. coarsest_stride
if (config["DEPLOY"]["COARSEST_STRIDE"].IsDefined()) {
_coarsest_stride = config["DEPLOY"]["COARSEST_STRIDE"].as<int>();
}
return true;
}
void debug() const {
std::cout << "SCALE_RESIZE: (" << _resize[0] << ", "
<< _resize[1] << ")" << std::endl;
std::cout << "MEAN: [";
for (int i = 0; i < _mean.size(); ++i) {
if (i != _mean.size() - 1) {
std::cout << _mean[i] << ", ";
} else {
std::cout << _mean[i];
}
}
std::cout << "]" << std::endl;
std::cout << "STD: [";
for (int i = 0; i < _std.size(); ++i) {
if (i != _std.size() - 1) {
std::cout << _std[i] << ", ";
} else {
std::cout << _std[i];
}
}
std::cout << "]" << std::endl;
std::cout << "DEPLOY.TARGET_SHORT_SIZE: " << _target_short_size
<< std::endl;
std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl;
std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl;
std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl;
std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl;
std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name
<< std::endl;
std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name
<< std::endl;
std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl;
std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl;
std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl;
std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl;
}
// DEPLOY.COARSEST_STRIDE
int _coarsest_stride;
// DEPLOY.FEEDS_SIZE
int _feeds_size;
// DEPLOY.RESIZE_TYPE 0:unpadding 1:rangescaling Default:0
int _resize_type;
// DEPLOY.RESIZE_MAX_SIZE
int _resize_max_size;
// DEPLOY.CROP_SIZE
std::vector<int> _crop_size;
// DEPLOY.SCALE_RESIZE
std::vector<int> _resize;
// DEPLOY.MEAN
std::vector<float> _mean;
// DEPLOY.STD
std::vector<float> _std;
// DEPLOY.IMAGE_TYPE
std::string _img_type;
// DEPLOY.TARGET_SHORT_SIZE
int _target_short_size;
// DEPLOY.NUM_CLASSES
int _class_num;
// DEPLOY.CHANNELS
int _channels;
// DEPLOY.MODEL_PATH
std::string _model_path;
// DEPLOY.MODEL_FILENAME
std::string _model_file_name;
// DEPLOY.PARAMS_FILENAME
std::string _param_file_name;
// DEPLOY.PRE_PROCESSOR
std::string _pre_processor;
// DEPLOY.USE_GPU
int _use_gpu;
// DEPLOY.PREDICTOR_MODE
std::string _predictor_mode;
// DEPLOY.BATCH_SIZE
int _batch_size;
};
} // namespace PaddleSolution
此差异已折叠。
此差异已折叠。
syntax = "proto2";
package PaddleSolution;
message DetectionBox {
optional int32 class = 1;
optional float score = 2;
optional float left_top_x = 3;
optional float left_top_y = 4;
optional float right_bottom_x = 5;
optional float right_bottom_y = 6;
}
message DetectionResult {
optional string filename = 1;
repeated DetectionBox detection_boxes = 2;
}
//message DetectionResultsContainer {
// repeated DetectionResult result = 1;
//}
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册