diff --git a/README.md b/README.md index 36c913b0d133d4e72b89d05cc81485c4dd2ca29a..a65a4185ace92ab01f0b6d1b60caef593260155f 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ PaddleDetection的目的是为工业界和学术界提供丰富、易用的目 - [推理部署](inference) - [模型导出教程](docs/advanced_tutorials/inference/EXPORT_MODEL.md) - [预测引擎Python API使用示例](docs/advanced_tutorials/inference/INFERENCE.md) - - [C++推理部署](inference/README.md) + - [C++推理部署](deploy/README.md) - [推理Benchmark](docs/advanced_tutorials/inference/BENCHMARK_INFER_cn.md) ## 模型库 diff --git a/deploy/README.md b/deploy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a049ca3eba7e1aa2626ba40dc6a99b8df7a677a0 --- /dev/null +++ b/deploy/README.md @@ -0,0 +1,22 @@ +# PaddleDetection 预测部署 + +`PaddleDetection`目前支持使用`Python`和`C++`部署在`Windows` 和`Linux` 上运行。 + +## 模型导出 +训练得到一个满足要求的模型后,如果想要将该模型接入到C++预测库,需要通过`tools/export_model.py`导出该模型。 + +- [导出教程](../docs/advanced_tutorials/inference/EXPORT_MODEL.md) + +模型导出后, 目录结构如下(以`yolov3_darknet`为例): +``` +yolov3_darknet # 模型目录 +├── infer_cfg.yml # 模型配置信息 +├── __model__ # 模型文件 +└── __params__ # 参数文件 +``` + +预测时,该目录所在的路径会作为程序的输入参数。 + +## 预测部署 +- [1. Python预测(支持 Linux 和 Windows)](./python/) +- [2. C++预测(支持 Linux 和 Windows)](./cpp/) diff --git a/inference/CMakeLists.txt b/deploy/cpp/CMakeLists.txt similarity index 70% rename from inference/CMakeLists.txt rename to deploy/cpp/CMakeLists.txt index d168639f6aac124308f276459594302eec19ef11..5afe6bedc78298e489db9fbbd3f952cb11306fc5 100644 --- a/inference/CMakeLists.txt +++ b/deploy/cpp/CMakeLists.txt @@ -1,17 +1,20 @@ cmake_minimum_required(VERSION 3.0) -project(cpp_inference_demo CXX C) +project(PaddleObjectDetector CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) -option(USE_TENSORRT "Compile demo with TensorRT." OFF) +option(WITH_TENSORRT "Compile demo with TensorRT." OFF) SET(PADDLE_DIR "" CACHE PATH "Location of libraries") SET(OPENCV_DIR "" CACHE PATH "Location of libraries") SET(CUDA_LIB "" CACHE PATH "Location of libraries") +include(cmake/yaml-cpp.cmake) -include(external-cmake/yaml-cpp.cmake) +include_directories("${CMAKE_SOURCE_DIR}/") +include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include") +link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib") macro(safe_set_static_flag) foreach(flag_var @@ -19,7 +22,7 @@ macro(safe_set_static_flag) CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") - endif(${flag_var} MATCHES "/MD") + endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() @@ -36,7 +39,6 @@ if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "") endif() include_directories("${CMAKE_SOURCE_DIR}/") -include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include") include_directories("${PADDLE_DIR}/") include_directories("${PADDLE_DIR}/third_party/install/protobuf/include") include_directories("${PADDLE_DIR}/third_party/install/glog/include") @@ -65,21 +67,20 @@ link_directories("${PADDLE_DIR}/third_party/install/glog/lib") link_directories("${PADDLE_DIR}/third_party/install/gflags/lib") link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib") link_directories("${PADDLE_DIR}/paddle/lib/") -link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib") link_directories("${CMAKE_CURRENT_BINARY_DIR}") + if (WIN32) include_directories("${PADDLE_DIR}/paddle/fluid/inference") include_directories("${PADDLE_DIR}/paddle/include") link_directories("${PADDLE_DIR}/paddle/fluid/inference") - include_directories("${OPENCV_DIR}/build/include") - include_directories("${OPENCV_DIR}/opencv/build/include") - link_directories("${OPENCV_DIR}/build/x64/vc14/lib") + find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH) + else () + find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH) include_directories("${PADDLE_DIR}/paddle/include") link_directories("${PADDLE_DIR}/paddle/lib") - include_directories("${OPENCV_DIR}/include") - link_directories("${OPENCV_DIR}/lib") endif () +include_directories(${OpenCV_INCLUDE_DIRS}) if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") @@ -92,7 +93,7 @@ if (WIN32) add_definitions(-DSTATIC_LIB) endif() else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -o2 -std=c++11") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11") set(CMAKE_STATIC_LIBRARY_PREFIX "") endif() @@ -106,11 +107,11 @@ if (WITH_GPU) message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn_v7.4/cuda/lib64") endif() endif(NOT WIN32) -endif() +endif() if (NOT WIN32) - if (USE_TENSORRT AND WITH_GPU) + if (WITH_TENSORRT AND WITH_GPU) include_directories("${PADDLE_DIR}/third_party/install/tensorrt/include") link_directories("${PADDLE_DIR}/third_party/install/tensorrt/lib") endif() @@ -149,14 +150,14 @@ else() set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() -if(WIN32) +if (WIN32) if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}") set(DEPS ${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - else() + else() set(DEPS ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() + endif() endif() if(WITH_STATIC_LIB) @@ -168,11 +169,10 @@ else() endif() if (NOT WIN32) - set(EXTERNAL_LIB "-lrt -ldl -lpthread") set(DEPS ${DEPS} - ${MATH_LIB} ${MKLDNN_LIB} - glog gflags protobuf yaml-cpp z xxhash - ${EXTERNAL_LIB}) + ${MATH_LIB} ${MKLDNN_LIB} + glog gflags protobuf z xxhash yaml-cpp + ) if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib") set(DEPS ${DEPS} snappystream) endif() @@ -182,7 +182,7 @@ if (NOT WIN32) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} - opencv_world346 glog libyaml-cppmt gflags_static libprotobuf zlibstatic xxhash ${EXTERNAL_LIB}) + glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt) set(DEPS ${DEPS} libcmt shlwapi) if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib") set(DEPS ${DEPS} snappy) @@ -194,7 +194,7 @@ endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) - if (USE_TENSORRT) + if (WITH_TENSORRT) set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() @@ -207,58 +207,18 @@ if(WITH_GPU) endif() endif() -if (NOT WIN32) - set(OPENCV_LIB_DIR ${OPENCV_DIR}/lib) - if(EXISTS "${OPENCV_LIB_DIR}") - message("OPENCV_LIB:" ${OPENCV_LIB_DIR}) - else() - set(OPENCV_LIB_DIR ${OPENCV_DIR}/lib64) - message("OPENCV_LIB:" ${OPENCV_LIB_DIR}) - endif() - - set(OPENCV_3RD_LIB_DIR ${OPENCV_DIR}/share/OpenCV/3rdparty/lib) - if(EXISTS "${OPENCV_3RD_LIB_DIR}") - message("OPENCV_3RD_LIB_DIR:" ${OPENCV_3RD_LIB_DIR}) - else() - set(OPENCV_3RD_LIB_DIR ${OPENCV_DIR}/share/OpenCV/3rdparty/lib64) - message("OPENCV_3RD_LIB_DIR:" ${OPENCV_3RD_LIB_DIR}) - endif() - - set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_imgcodecs${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_imgproc${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_core${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_LIB_DIR}/libopencv_highgui${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libIlmImf${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibjasper${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibpng${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibtiff${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libittnotify${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibjpeg-turbo${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/liblibwebp${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libzlib${CMAKE_STATIC_LIBRARY_SUFFIX}) - if(EXISTS "${OPENCV_3RD_LIB_DIR}/libippiw${CMAKE_STATIC_LIBRARY_SUFFIX}") - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libippiw${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() - if(EXISTS "${OPENCV_3RD_LIB_DIR}/libippicv${CMAKE_STATIC_LIBRARY_SUFFIX}") - set(DEPS ${DEPS} ${OPENCV_3RD_LIB_DIR}/libippicv${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() +if (NOT WIN32) + set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread") + set(DEPS ${DEPS} ${EXTERNAL_LIB}) endif() -SET(PADDLESEG_INFERENCE_SRCS preprocessor/preprocessor.cpp - preprocessor/preprocessor_detection.cpp predictor/detection_predictor.cpp - utils/detection_result.pb.cc) - -ADD_LIBRARY(libpaddleseg_inference STATIC ${PADDLESEG_INFERENCE_SRCS}) -target_link_libraries(libpaddleseg_inference ${DEPS}) - -add_executable(detection_demo detection_demo.cpp) +set(DEPS ${DEPS} ${OpenCV_LIBS}) +add_executable(main src/main.cc src/preprocess_op.cc src/object_detector.cc) +ADD_DEPENDENCIES(main ext-yaml-cpp) +target_link_libraries(main ${DEPS}) -ADD_DEPENDENCIES(libpaddleseg_inference ext-yaml-cpp) -ADD_DEPENDENCIES(detection_demo ext-yaml-cpp libpaddleseg_inference) -target_link_libraries(detection_demo ${DEPS} libpaddleseg_inference) - -if (WIN32) - add_custom_command(TARGET detection_demo POST_BUILD +if (WIN32 AND WITH_MKL) + add_custom_command(TARGET main POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll @@ -267,5 +227,3 @@ if (WIN32) COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll ) endif() - -execute_process(COMMAND cp -r ${CMAKE_SOURCE_DIR}/images ${CMAKE_SOURCE_DIR}/conf ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/deploy/cpp/CMakeSettings.json b/deploy/cpp/CMakeSettings.json new file mode 100644 index 0000000000000000000000000000000000000000..9ab7a33e7dd894f3fcacda809d1b571e73dcdc6e --- /dev/null +++ b/deploy/cpp/CMakeSettings.json @@ -0,0 +1,52 @@ +{ + "configurations": [ + { + "name": "x64-Release", + "generator": "Ninja", + "configurationType": "RelWithDebInfo", + "inheritEnvironments": [ "msvc_x64_x64" ], + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", + "cmakeCommandArgs": "", + "buildCommandArgs": "-v", + "ctestCommandArgs": "", + "variables": [ + { + "name": "CUDA_LIB", + "value": "D:/projects/packages/cuda10_0/lib64", + "type": "PATH" + }, + { + "name": "OPENCV_DIR", + "value": "D:/projects/packages/opencv3_4_6", + "type": "PATH" + }, + { + "name": "PADDLE_DIR", + "value": "D:/projects/packages/fluid_inference", + "type": "PATH" + }, + { + "name": "CMAKE_BUILD_TYPE", + "value": "Release", + "type": "STRING" + }, + { + "name": "WITH_STATIC_LIB", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_MKL", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_GPU", + "value": "True", + "type": "BOOL" + } + ] + } + ] +} diff --git a/deploy/cpp/README.md b/deploy/cpp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7c2ee467d965fa022e1721dc15cda9ac0ee650b5 --- /dev/null +++ b/deploy/cpp/README.md @@ -0,0 +1,71 @@ +# PaddleDetection C++预测部署方案 + +## 本文档结构 + +[1.说明](#1说明) + +[2.主要目录和文件](#2主要目录和文件) + +[3.编译部署](#3编译) + + + +## 1.说明 + +本目录为用户提供一个跨平台的`C++`部署方案,让用户通过`PaddleDetection`训练的模型导出后,即可基于本项目快速运行,也可以快速集成代码结合到自己的项目实际应用中去。 + +主要设计的目标包括以下四点: +- 跨平台,支持在 `Windows` 和 `Linux` 完成编译、二次开发集成和部署运行 +- 可扩展性,支持用户针对新模型开发自己特殊的数据预处理等逻辑 +- 高性能,除了`PaddlePaddle`自身带来的性能优势,我们还针对图像检测的特点对关键步骤进行了性能优化 +- 支持各种不同检测模型结构,包括`Yolov3`/`Faster_RCNN`/`SSD`/`RetinaNet`等 + +## 2.主要目录和文件 + +```bash +deploy/cpp +| +├── src +│ ├── main.cc # 集成代码示例, 程序入口 +│ ├── object_detector.cc # 模型加载和预测主要逻辑封装类实现 +│ └── preprocess_op.cc # 预处理相关主要逻辑封装实现 +| +├── include +│ ├── config_parser.h # 导出模型配置yaml文件解析 +│ ├── object_detector.h # 模型加载和预测主要逻辑封装类 +│ └── preprocess_op.h # 预处理相关主要逻辑类封装 +| +├── docs +│ ├── linux_build.md # Linux 编译指南 +│ └── windows_vs2019_build.md # Windows VS2019编译指南 +│ +├── build.sh # 编译命令脚本 +│ +├── CMakeList.txt # cmake编译入口文件 +| +├── CMakeSettings.json # Visual Studio 2019 CMake项目编译设置 +│ +└── cmake # 依赖的外部项目cmake(目前仅有yaml-cpp) + +``` + +## 3.编译部署 + +### 3.1 导出模型 +请确认您已经基于`PaddleDetection`的[export_model.py](../../tools/export_model.py)导出您的模型,并妥善保存到合适的位置。导出模型细节请参考 [导出模型教程](../../docs/advanced_tutorials/inference/EXPORT_MODEL.md)。 + +模型导出后, 目录结构如下(以`yolov3_darknet`为例): +``` +yolov3_darknet # 模型目录 +├── infer_cfg.yml # 模型配置信息 +├── __model__ # 模型文件 +└── __params__ # 参数文件 +``` + +预测时,该目录所在的路径会作为程序的输入参数。 + +### 3.2 编译 + +仅支持在`Windows`和`Linux`平台编译和使用 +- [Linux 编译指南](../../docs/advanced_tutorials/inference/docs/linux_build.md) +- [Windows编译指南(使用Visual Studio 2019)](../../docs/advanced_tutorials/inference/docs/windows_vs2019_build.md) diff --git a/inference/external-cmake/yaml-cpp.cmake b/deploy/cpp/cmake/yaml-cpp.cmake similarity index 87% rename from inference/external-cmake/yaml-cpp.cmake rename to deploy/cpp/cmake/yaml-cpp.cmake index 15fa2674e00d85f1db7bbdfdceeebadaf0eabf5a..fc2e37f71679ee1e8d6d45b857d2c0aa82a3253a 100644 --- a/inference/external-cmake/yaml-cpp.cmake +++ b/deploy/cpp/cmake/yaml-cpp.cmake @@ -7,8 +7,8 @@ message("${CMAKE_BUILD_TYPE}") ExternalProject_Add( ext-yaml-cpp - GIT_REPOSITORY https://github.com/jbeder/yaml-cpp.git - GIT_TAG e0e01d53c27ffee6c86153fa41e7f5e57d3e5c90 + URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip + URL_MD5 9542d6de397d1fbd649ed468cb5850e6 CMAKE_ARGS -DYAML_CPP_BUILD_TESTS=OFF -DYAML_CPP_BUILD_TOOLS=OFF diff --git a/deploy/cpp/docs/linux_build.md b/deploy/cpp/docs/linux_build.md new file mode 100644 index 0000000000000000000000000000000000000000..1882fdc27f773f834eabd0008d829938af1a7d4f --- /dev/null +++ b/deploy/cpp/docs/linux_build.md @@ -0,0 +1,104 @@ +# Linux平台编译指南 + +## 说明 +本文档在 `Linux`平台使用`GCC 4.8.5` 和 `GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html#id15)。 + +## 前置条件 +* G++ 4.8.2 ~ 4.9.4 +* CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要) +* CMake 3.0+ + +请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示**。 + +### Step1: 下载代码 + + `git clone https://github.com/PaddlePaddle/PaddleDetection.git` + +**说明**:其中`C++`预测代码在`/root/projects/PaddleDetection/deploy/cpp` 目录,该目录不依赖任何`PaddleDetection`下其他目录。 + + +### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference + +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html) + + +下载并解压后`/root/projects/fluid_inference`目录包含内容为: +``` +fluid_inference +├── paddle # paddle核心库和头文件 +| +├── third_party # 第三方依赖库和头文件 +| +└── version.txt # 版本和编译信息 +``` + +**注意:** 预编译版本除`nv-jetson-cuda10-cudnn7.5-trt5` 以外其它包都是基于`GCC 4.8.5`编译,使用高版本`GCC`可能存在 `ABI`兼容性问题,建议降级或[自行编译预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。 + + +### Step4: 编译 + +编译`cmake`的命令在`scripts/build.sh`中,请根据实际情况修改主要参数,其主要内容说明如下: +``` +# 是否使用GPU(即是否使用 CUDA) +WITH_GPU=ON +# 是否集成 TensorRT(仅WITH_GPU=ON 有效) +WITH_TENSORRT=OFF +# 上一步下载的 Paddle 预测库路径 +PADDLE_DIR=/root/projects/deps/fluid_inference/ +# OPENCV 路径, 如果使用自带预编译版本可不设置 +OPENCV_DIR=$(pwd)/deps/opencv346/ +# CUDA 的 lib 路径 +CUDA_LIB=/usr/local/cuda/lib64/ +# CUDNN 的 lib 路径 +CUDNN_LIB=/usr/local/cuda/lib64/ + +# 以下无需改动 + +sh $(pwd)/scripts/bootstrap.sh +rm -rf build +mkdir -p build +cd build +cmake .. \ + -DWITH_GPU=${WITH_GPU} \ + -DWITH_TENSORRT=${WITH_TENSORRT} \ + -DPADDLE_DIR=${PADDLE_DIR} \ + -DCUDA_LIB=${CUDA_LIB} \ + -DCUDNN_LIB=${CUDNN_LIB} \ + -DOPENCV_DIR=${OPENCV_DIR} +make + +``` + +修改脚本设置好主要参数后,执行`build`脚本: + ```shell + sh ./scripts/build.sh + ``` + + +### Step5: 预测及可视化 +编译成功后,预测入口程序为`build/main`其主要命令参数说明如下: +| 参数 | 说明 | +| ---- | ---- | +| model_dir | 导出的预测模型所在路径 | +| image_path | 要预测的图片文件路径 | +| video_path | 要预测的视频文件路径 | +| use_gpu | 是否使用 GPU 预测, 支持值为0或1(默认值为0)| + +**注意**:如果同时设置了`video_path`和`image_path`,程序仅预测`video_path`。 + + +`样例一`: +```shell +#不使用`GPU`测试图片 `/root/projects/images/test.jpeg` +./build/main --model_dir=/root/projects/models/yolov3_darknet --image_path=/root/projects/images/test.jpeg +``` + +图片文件`可视化预测结果`会保存在当前目录下`result.jpeg`文件中。 + + +`样例二`: +```shell +#使用 `GPU`预测视频`/root/projects/videos/test.avi` +./build/main --model_dir=/root/projects/models/yolov3_darknet --video_path=/root/projects/images/test.avi --use_gpu=1 +``` +视频文件`可视化预测结果`会保存在当前目录下`result.avi`文件中。 diff --git a/deploy/cpp/docs/windows_vs2019_build.md b/deploy/cpp/docs/windows_vs2019_build.md new file mode 100644 index 0000000000000000000000000000000000000000..0f5b8691717213f15080448017bb90d8d940f920 --- /dev/null +++ b/deploy/cpp/docs/windows_vs2019_build.md @@ -0,0 +1,117 @@ +# Visual Studio 2019 Community CMake 编译指南 + +Windows 平台下,我们使用`Visual Studio 2019 Community` 进行了测试。微软从`Visual Studio 2017`开始即支持直接管理`CMake`跨平台编译项目,但是直到`2019`才提供了稳定和完全的支持,所以如果你想使用CMake管理项目编译构建,我们推荐你使用`Visual Studio 2019`环境下构建。 + + +## 前置条件 +* Visual Studio 2019 +* CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要) +* CMake 3.0+ + +请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。 + +**下面所有示例以工作目录为 `D:\projects`演示**。 + +### Step1: 下载代码 + +下载源代码 +```shell +git clone https://github.com/PaddlePaddle/PaddleDetection.git +``` + +**说明**:其中`C++`预测代码在`PaddleDetection/deploy/cpp` 目录,该目录不依赖任何`PaddleDetection`下其他目录。 + + +### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference + +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/windows_cpp_inference.html) + +解压后`D:\projects\fluid_inference`目录包含内容为: +``` +fluid_inference +├── paddle # paddle核心库和头文件 +| +├── third_party # 第三方依赖库和头文件 +| +└── version.txt # 版本和编译信息 +``` + +### Step3: 安装配置OpenCV + +1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download) +2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv` +3. 配置环境变量,如下流程所示 + - 我的电脑->属性->高级系统设置->环境变量 + - 在系统变量中找到Path(如没有,自行创建),并双击编辑 + - 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin` + +### Step4: 使用Visual Studio 2019直接编译CMake + +1. 打开Visual Studio 2019 Community,点击`继续但无需代码` +![step2](https://paddleseg.bj.bcebos.com/inference/vs2019_step1.png) +2. 点击: `文件`->`打开`->`CMake` +![step2.1](https://paddleseg.bj.bcebos.com/inference/vs2019_step2.png) + +选择项目代码所在路径,并打开`CMakeList.txt`: + +![step2.2](https://paddleseg.bj.bcebos.com/inference/vs2019_step3.png) + +3. 点击:`项目`->`cpp_inference_demo的CMake设置` + +![step3](https://paddleseg.bj.bcebos.com/inference/vs2019_step4.png) + +4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径 + +三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**): + +| 参数名 | 含义 | +| ---- | ---- | +| *CUDA_LIB | CUDA的库路径 | +| OPENCV_DIR | OpenCV的安装路径, | +| PADDLE_DIR | Paddle预测库的路径 | + +**注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉 +![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png) + +**设置完成后**, 点击上图中`保存并生成CMake缓存以加载变量`。 + +5. 点击`生成`->`全部生成` + +![step6](https://paddleseg.bj.bcebos.com/inference/vs2019_step6.png) + + +### Step5: 预测及可视化 + +上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录: + +``` +cd D:\projects\PaddleDetection\inference\out\build\x64-Release +``` +可执行文件`main`即为样例的预测程序,其主要的命令行参数如下: + +| 参数 | 说明 | +| ---- | ---- | +| model_dir | 导出的预测模型所在路径 | +| image_path | 要预测的图片文件路径 | +| video_path | 要预测的视频文件路径 | +| use_gpu | 是否使用 GPU 预测, 支持值为0或1(默认值为0)| + +**注意**:如果同时设置了`video_path`和`image_path`,程序仅预测`video_path`。 + + +`样例一`: +```shell +#不使用`GPU`测试图片 `D:\\images\\test.jpeg` +.\main --model_dir=D:\\models\\yolov3_darknet --image_path=D:\\images\\test.jpeg +``` + +图片文件`可视化预测结果`会保存在当前目录下`result.jpeg`文件中。 + + +`样例二`: +```shell +#使用`GPU`测试视频 `D:\\videos\\test.avi` +.\main --model_dir=D:\\models\\yolov3_darknet --video_path=D:\\videos\\test.jpeg --use_gpu=1 +``` + +视频文件`可视化预测结果`会保存在当前目录下`result.avi`文件中。 diff --git a/deploy/cpp/include/config_parser.h b/deploy/cpp/include/config_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..5fdaad408b8398b3a3186c9b480759d0e7b3136e --- /dev/null +++ b/deploy/cpp/include/config_parser.h @@ -0,0 +1,113 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include "yaml-cpp/yaml.h" + +#ifdef _WIN32 +#define OS_PATH_SEP "\\" +#else +#define OS_PATH_SEP "/" +#endif + +namespace PaddleDetection { + +// Inference model configuration parser +class ConfigPaser { + public: + ConfigPaser() {} + + ~ConfigPaser() {} + + bool load_config(const std::string& model_dir, + const std::string& cfg = "infer_cfg.yml") { + // Load as a YAML::Node + YAML::Node config; + config = YAML::LoadFile(model_dir + OS_PATH_SEP + cfg); + + // Get runtime mode : fluid, trt_int8, trt_fp16, trt_fp32 + if (config["mode"].IsDefined()) { + mode_ = config["mode"].as(); + } else { + std::cerr << "Please set mode, " + << "support value : fluid/trt_int8/trt_fp16/trt_fp32." + << std::endl; + return false; + } + + // Get model arch : YOLO, SSD, RetinaNet, RCNN, Face + if (config["arch"].IsDefined()) { + arch_ = config["arch"].as(); + } else { + std::cerr << "Please set model arch," + << "support value : YOLO, SSD, RetinaNet, RCNN, Face." + << std::endl; + return false; + } + + // Get min_subgraph_size for tensorrt + if (config["min_subgraph_size"].IsDefined()) { + min_subgraph_size_ = config["min_subgraph_size"].as(); + } else { + std::cerr << "Please set min_subgraph_size." << std::endl; + return false; + } + // Get draw_threshold for visualization + if (config["draw_threshold"].IsDefined()) { + draw_threshold_ = config["draw_threshold"].as(); + } else { + std::cerr << "Please set draw_threshold." << std::endl; + return false; + } + // Get with_background + if (config["with_background"].IsDefined()) { + with_background_ = config["with_background"].as(); + } else { + std::cerr << "Please set with_background." << std::endl; + return false; + } + // Get Preprocess for preprocessing + if (config["Preprocess"].IsDefined()) { + preprocess_info_ = config["Preprocess"]; + } else { + std::cerr << "Please set Preprocess." << std::endl; + return false; + } + // Get label_list for visualization + if (config["label_list"].IsDefined()) { + label_list_ = config["label_list"].as>(); + } else { + std::cerr << "Please set label_list." << std::endl; + return false; + } + + return true; + } + std::string mode_; + float draw_threshold_; + std::string arch_; + int min_subgraph_size_; + bool with_background_; + YAML::Node preprocess_info_; + std::vector label_list_; +}; + +} // namespace PaddleDetection + diff --git a/deploy/cpp/include/object_detector.h b/deploy/cpp/include/object_detector.h new file mode 100644 index 0000000000000000000000000000000000000000..dda91cfc4832ce5599c761f11b6b62ccdd05c0df --- /dev/null +++ b/deploy/cpp/include/object_detector.h @@ -0,0 +1,95 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +#include "paddle_inference_api.h" // NOLINT + +#include "include/preprocess_op.h" +#include "include/config_parser.h" + + +namespace PaddleDetection { +// Object Detection Result +struct ObjectResult { + // Rectangle coordinates of detected object: left, right, top, down + std::vector rect; + // Class id of detected object + int class_id; + // Confidence of detected object + float confidence; +}; + + +// Generate visualization colormap for each class +std::vector GenerateColorMap(int num_class); + + +// Visualiztion Detection Result +cv::Mat VisualizeResult(const cv::Mat& img, + const std::vector& results, + const std::vector& lable_list, + const std::vector& colormap); + + +class ObjectDetector { + public: + explicit ObjectDetector(const std::string& model_dir, bool use_gpu = false) { + config_.load_config(model_dir); + threshold_ = config_.draw_threshold_; + preprocessor_.Init(config_.preprocess_info_, config_.arch_); + LoadModel(model_dir, use_gpu); + } + + // Load Paddle inference model + void LoadModel( + const std::string& model_dir, + bool use_gpu); + + // Run predictor + void Predict( + const cv::Mat& img, + std::vector* result); + + // Get Model Label list + const std::vector& GetLabelList() const { + return config_.label_list_; + } + + private: + // Preprocess image and copy data to input buffer + void Preprocess(const cv::Mat& image_mat); + // Postprocess result + void Postprocess( + const cv::Mat& raw_mat, + std::vector* result); + + std::unique_ptr predictor_; + Preprocessor preprocessor_; + ImageBlob inputs_; + std::vector output_data_; + float threshold_; + ConfigPaser config_; +}; + +} // namespace PaddleDetection diff --git a/deploy/cpp/include/preprocess_op.h b/deploy/cpp/include/preprocess_op.h new file mode 100644 index 0000000000000000000000000000000000000000..1e6c8844e8c25345cc60085e8bea87b256a9f9d9 --- /dev/null +++ b/deploy/cpp/include/preprocess_op.h @@ -0,0 +1,156 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace PaddleDetection { + +// Object for storing all preprocessed data +class ImageBlob { + public: + // Original image width and height + std::vector ori_im_size_; + // Buffer for image data after preprocessing + std::vector im_data_; + // Original image width, height, shrink in float format + std::vector ori_im_size_f_; + // Evaluation image width and height + std::vector eval_im_size_f_; +}; + +// Abstraction of preprocessing opration class +class PreprocessOp { + public: + virtual void Init(const YAML::Node& item, const std::string& arch) = 0; + virtual void Run(cv::Mat* im, ImageBlob* data) = 0; +}; + +class Normalize : public PreprocessOp { + public: + virtual void Init(const YAML::Node& item, const std::string& arch) { + mean_ = item["mean"].as>(); + scale_ = item["std"].as>(); + is_channel_first_ = item["is_channel_first"].as(); + is_scale_ = item["is_scale"].as(); + } + + virtual void Run(cv::Mat* im, ImageBlob* data); + + private: + // CHW or HWC + bool is_channel_first_; + bool is_scale_; + std::vector mean_; + std::vector scale_; +}; + +class Permute : public PreprocessOp { + public: + virtual void Init(const YAML::Node& item, const std::string& arch) { + to_bgr_ = item["to_bgr"].as(); + is_channel_first_ = item["channel_first"].as(); + } + + virtual void Run(cv::Mat* im, ImageBlob* data); + + private: + // RGB to BGR + bool to_bgr_; + // CHW or HWC + bool is_channel_first_; +}; + +class Resize : public PreprocessOp { + public: + virtual void Init(const YAML::Node& item, const std::string& arch) { + arch_ = arch; + interp_ = item["interp"].as(); + max_size_ = item["max_size"].as(); + target_size_ = item["target_size"].as(); + image_shape_ = item["image_shape"].as>(); + } + + // Compute best resize scale for x-dimension, y-dimension + std::pair GenerateScale(const cv::Mat& im); + + virtual void Run(cv::Mat* im, ImageBlob* data); + + private: + std::string arch_; + int interp_; + int max_size_; + int target_size_; + std::vector image_shape_; +}; + +// Models with FPN need input shape % stride == 0 +class PadStride : public PreprocessOp { + public: + virtual void Init(const YAML::Node& item, const std::string& arch) { + stride_ = item["stride"].as(); + } + + virtual void Run(cv::Mat* im, ImageBlob* data); + + private: + int stride_; +}; + +class Preprocessor { + public: + void Init(const YAML::Node& config_node, const std::string& arch) { + arch_ = arch; + for (const auto& item : config_node) { + auto op_name = item["type"].as(); + ops_[op_name] = CreateOp(op_name); + ops_[op_name]->Init(item, arch); + } + } + + std::shared_ptr CreateOp(const std::string& name) { + if (name == "Resize") { + return std::make_shared(); + } else if (name == "Permute") { + return std::make_shared(); + } else if (name == "Normalize") { + return std::make_shared(); + } else if (name == "PadStride") { + return std::make_shared(); + } + return nullptr; + } + + void Run(cv::Mat* im, ImageBlob* data); + + public: + static const std::vector RUN_ORDER; + + private: + std::string arch_; + std::unordered_map> ops_; +}; + +} // namespace PaddleDetection diff --git a/deploy/cpp/scripts/bootstrap.sh b/deploy/cpp/scripts/bootstrap.sh new file mode 100644 index 0000000000000000000000000000000000000000..fc2bbb8643e44fe3b75af41ec5322b4ce64c8a65 --- /dev/null +++ b/deploy/cpp/scripts/bootstrap.sh @@ -0,0 +1,11 @@ +# download pre-compiled opencv lib +OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/deps/opencv346.tar.bz2 +if [ ! -d "./deps/opencv346" ]; then + mkdir -p deps + cd deps + wget -c ${OPENCV_URL} + tar xvfj opencv346.tar.bz2 + rm -rf opencv346.tar.bz2 + cd .. +fi + diff --git a/deploy/cpp/scripts/build.sh b/deploy/cpp/scripts/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..6361b837f72f21fbc51f78990c62525afb1d862f --- /dev/null +++ b/deploy/cpp/scripts/build.sh @@ -0,0 +1,24 @@ +# compile with cuda +WITH_GPU=ON +# compile with tensorrt +WITH_TENSORRT=OFF +# path to paddle inference lib +PADDLE_DIR=/root/projects/deps/fluid_inference/ +# path to opencv lib +OPENCV_DIR=$(pwd)/deps/opencv346/ +# path to cuda lib +CUDA_LIB=/usr/local/cuda/lib64/ + +sh $(pwd)/scripts/bootstrap.sh + +rm -rf build +mkdir -p build +cd build +cmake .. \ + -DWITH_GPU=OFF \ + -DWITH_TENSORRT=OFF \ + -DPADDLE_DIR=${PADDLE_DIR} \ + -DCUDA_LIB=${CUDA_LIB} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DWITH_STATIC_LIB=OFF +make diff --git a/deploy/cpp/src/main.cc b/deploy/cpp/src/main.cc new file mode 100644 index 0000000000000000000000000000000000000000..d7eccff0c198e8341aa10d24e6046951e7a52c85 --- /dev/null +++ b/deploy/cpp/src/main.cc @@ -0,0 +1,119 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include + +#include "include/object_detector.h" + + +DEFINE_string(model_dir, "", "Path of inference model"); +DEFINE_string(image_path, "", "Path of input image"); +DEFINE_string(video_path, "", "Path of input video"); +DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); + +void PredictVideo(const std::string& video_path, + PaddleDetection::ObjectDetector* det) { + // Open video + cv::VideoCapture capture; + capture.open(video_path.c_str()); + if (!capture.isOpened()) { + printf("can not open video : %s\n", video_path.c_str()); + return; + } + + // Get Video info : resolution, fps + int video_width = static_cast(capture.get(CV_CAP_PROP_FRAME_WIDTH)); + int video_height = static_cast(capture.get(CV_CAP_PROP_FRAME_HEIGHT)); + int video_fps = static_cast(capture.get(CV_CAP_PROP_FPS)); + + // Create VideoWriter for output + cv::VideoWriter video_out; + std::string video_out_path = "output.avi"; + video_out.open(video_out_path.c_str(), + CV_FOURCC('M', 'J', 'P', 'G'), + video_fps, + cv::Size(video_width, video_height), + true); + if (!video_out.isOpened()) { + printf("create video writer failed!\n"); + return; + } + + std::vector result; + auto labels = det->GetLabelList(); + auto colormap = PaddleDetection::GenerateColorMap(labels.size()); + // Capture all frames and do inference + cv::Mat frame; + while (capture.read(frame)) { + if (frame.empty()) { + break; + } + det->Predict(frame, &result); + cv::Mat out_im = PaddleDetection::VisualizeResult( + frame, result, labels, colormap); + video_out.write(out_im); + } + capture.release(); + video_out.release(); +} + +void PredictImage(const std::string& image_path, + PaddleDetection::ObjectDetector* det) { + // Open input image as an opencv cv::Mat object + cv::Mat im = cv::imread(image_path, 1); + // Store all detected result + std::vector result; + det->Predict(im, &result); + for (const auto& item : result) { + printf("class=%d confidence=%.2f rect=[%d %d %d %d]\n", + item.class_id, + item.confidence, + item.rect[0], + item.rect[1], + item.rect[2], + item.rect[3]); + } + // Visualization result + auto labels = det->GetLabelList(); + auto colormap = PaddleDetection::GenerateColorMap(labels.size()); + cv::Mat vis_img = PaddleDetection::VisualizeResult( + im, result, labels, colormap); + cv::imwrite("output.jpeg", vis_img); + printf("Visualized output saved as output.jpeg\n"); +} + +int main(int argc, char** argv) { + // Parsing command-line + google::ParseCommandLineFlags(&argc, &argv, true); + if (FLAGS_model_dir.empty() + || (FLAGS_image_path.empty() && FLAGS_video_path.empty())) { + std::cout << "Usage: ./main --model_dir=/PATH/TO/INFERENCE_MODEL/ " + << "--image_path=/PATH/TO/INPUT/IMAGE/" << std::endl; + return -1; + } + + // Load model and create a object detector + PaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_use_gpu); + // Do inference on input video or image + if (!FLAGS_video_path.empty()) { + PredictVideo(FLAGS_video_path, &det); + } else if (!FLAGS_image_path.empty()) { + PredictImage(FLAGS_image_path, &det); + } + return 0; +} diff --git a/deploy/cpp/src/object_detector.cc b/deploy/cpp/src/object_detector.cc new file mode 100644 index 0000000000000000000000000000000000000000..172b22d2c5e63d97d5580ea093024a3a411e7685 --- /dev/null +++ b/deploy/cpp/src/object_detector.cc @@ -0,0 +1,190 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +# include "include/object_detector.h" + +namespace PaddleDetection { + +// Load Model and create model predictor +void ObjectDetector::LoadModel(const std::string& model_dir, bool use_gpu) { + paddle::AnalysisConfig config; + std::string prog_file = model_dir + OS_PATH_SEP + "__model__"; + std::string params_file = model_dir + OS_PATH_SEP + "__params__"; + config.SetModel(prog_file, params_file); + if (use_gpu) { + config.EnableUseGpu(100, 0); + } else { + config.DisableGpu(); + } + config.SwitchUseFeedFetchOps(false); + config.SwitchSpecifyInputNames(true); + // Memory optimization + config.EnableMemoryOptim(); + predictor_ = std::move(CreatePaddlePredictor(config)); +} + +// Visualiztion MaskDetector results +cv::Mat VisualizeResult(const cv::Mat& img, + const std::vector& results, + const std::vector& lable_list, + const std::vector& colormap) { + cv::Mat vis_img = img.clone(); + for (int i = 0; i < results.size(); ++i) { + int w = results[i].rect[1] - results[i].rect[0]; + int h = results[i].rect[3] - results[i].rect[2]; + cv::Rect roi = cv::Rect(results[i].rect[0], results[i].rect[2], w, h); + + // Configure color and text size + std::string text = lable_list[results[i].class_id]; + int c1 = colormap[3 * results[i].class_id + 0]; + int c2 = colormap[3 * results[i].class_id + 1]; + int c3 = colormap[3 * results[i].class_id + 2]; + cv::Scalar roi_color = cv::Scalar(c1, c2, c3); + text += std::to_string(static_cast(results[i].confidence * 100)) + "%"; + int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL; + double font_scale = 0.5f; + float thickness = 0.5; + cv::Size text_size = cv::getTextSize(text, + font_face, + font_scale, + thickness, + nullptr); + float new_font_scale = roi.width * font_scale / text_size.width; + text_size = cv::getTextSize(text, + font_face, + new_font_scale, + thickness, + nullptr); + cv::Point origin; + origin.x = roi.x; + origin.y = roi.y; + + // Configure text background + cv::Rect text_back = cv::Rect(results[i].rect[0], + results[i].rect[2] - text_size.height, + text_size.width, + text_size.height); + + // Draw roi object, text, and background + cv::rectangle(vis_img, roi, roi_color, 2); + cv::rectangle(vis_img, text_back, roi_color, -1); + cv::putText(vis_img, + text, + origin, + font_face, + new_font_scale, + cv::Scalar(255, 255, 255), + thickness); + } + return vis_img; +} + +void ObjectDetector::Preprocess(const cv::Mat& ori_im) { + // Clone the image : keep the original mat for postprocess + cv::Mat im = ori_im.clone(); + cv::cvtColor(im, im, cv::COLOR_BGR2RGB); + preprocessor_.Run(&im, &inputs_); +} + +void ObjectDetector::Postprocess( + const cv::Mat& raw_mat, + std::vector* result) { + result->clear(); + int rh = 1; + int rw = 1; + if (config_.arch_ == "SSD" || config_.arch_ == "Face") { + rh = raw_mat.rows; + rw = raw_mat.cols; + } + + int total_size = output_data_.size() / 6; + for (int j = 0; j < total_size; ++j) { + // Class id + int class_id = static_cast(round(output_data_[0 + j * 6])); + // Confidence score + float score = output_data_[1 + j * 6]; + int xmin = (output_data_[2 + j * 6] * rw); + int ymin = (output_data_[3 + j * 6] * rh); + int xmax = (output_data_[4 + j * 6] * rw); + int ymax = (output_data_[5 + j * 6] * rh); + int wd = xmax - xmin; + int hd = ymax - ymin; + if (score > threshold_) { + ObjectResult result_item; + result_item.rect = {xmin, xmax, ymin, ymax}; + result_item.class_id = class_id; + result_item.confidence = score; + result->push_back(result_item); + } + } +} + +void ObjectDetector::Predict(const cv::Mat& im, + std::vector* result) { + // Preprocess image + Preprocess(im); + // Prepare input tensor + auto input_names = predictor_->GetInputNames(); + for (const auto& tensor_name : input_names) { + auto in_tensor = predictor_->GetInputTensor(tensor_name); + if (tensor_name == "image") { + int rh = inputs_.eval_im_size_f_[0]; + int rw = inputs_.eval_im_size_f_[1]; + in_tensor->Reshape({1, 3, rh, rw}); + in_tensor->copy_from_cpu(inputs_.im_data_.data()); + } else if (tensor_name == "im_size") { + in_tensor->Reshape({1, 2}); + in_tensor->copy_from_cpu(inputs_.ori_im_size_.data()); + } else if (tensor_name == "im_info") { + in_tensor->Reshape({1, 3}); + in_tensor->copy_from_cpu(inputs_.eval_im_size_f_.data()); + } else if (tensor_name == "im_shape") { + in_tensor->Reshape({1, 3}); + in_tensor->copy_from_cpu(inputs_.ori_im_size_f_.data()); + } + } + // Run predictor + predictor_->ZeroCopyRun(); + // Get output tensor + auto output_names = predictor_->GetOutputNames(); + auto out_tensor = predictor_->GetOutputTensor(output_names[0]); + std::vector output_shape = out_tensor->shape(); + // Calculate output length + int output_size = 1; + for (int j = 0; j < output_shape.size(); ++j) { + output_size *= output_shape[j]; + } + output_data_.resize(output_size); + out_tensor->copy_to_cpu(output_data_.data()); + // Postprocessing result + Postprocess(im, result); +} + +std::vector GenerateColorMap(int num_class) { + auto colormap = std::vector(3 * num_class, 0); + for (int i = 0; i < num_class; ++i) { + int j = 0; + int lab = i; + while (lab) { + colormap[i * 3] |= (((lab >> 0) & 1) << (7 - j)); + colormap[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)); + colormap[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)); + ++j; + lab >>= 3; + } + } + return colormap; +} + +} // namespace PaddleDetection diff --git a/deploy/cpp/src/preprocess_op.cc b/deploy/cpp/src/preprocess_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..b3bc27528affe0ba4f83cd247daa59aa20d1cd25 --- /dev/null +++ b/deploy/cpp/src/preprocess_op.cc @@ -0,0 +1,145 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "include/preprocess_op.h" + +namespace PaddleDetection { + +void Normalize::Run(cv::Mat* im, ImageBlob* data) { + double e = 1.0; + if (is_scale_) { + e /= 255.0; + } + (*im).convertTo(*im, CV_32FC3, e); + for (int h = 0; h < im->rows; h++) { + for (int w = 0; w < im->cols; w++) { + im->at(h, w)[0] = + (im->at(h, w)[0] - mean_[0] ) / scale_[0]; + im->at(h, w)[1] = + (im->at(h, w)[1] - mean_[1] ) / scale_[1]; + im->at(h, w)[2] = + (im->at(h, w)[2] - mean_[2] ) / scale_[2]; + } + } +} + +void Permute::Run(cv::Mat* im, ImageBlob* data) { + int rh = im->rows; + int rw = im->cols; + int rc = im->channels(); + (data->im_data_).resize(rc * rh * rw); + float* base = (data->im_data_).data(); + for (int i = 0; i < rc; ++i) { + cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i); + } +} + +void Resize::Run(cv::Mat* im, ImageBlob* data) { + data->ori_im_size_ = { + static_cast(im->rows), + static_cast(im->cols) + }; + data->ori_im_size_f_ = { + static_cast(im->rows), + static_cast(im->cols), + 1.0 + }; + auto resize_scale = GenerateScale(*im); + cv::resize( + *im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_); + if (max_size_ != 0 && !image_shape_.empty()) { + // Padding the image with 0 border + cv::copyMakeBorder( + *im, + *im, + 0, + max_size_ - im->rows, + 0, + max_size_ - im->cols, + cv::BORDER_CONSTANT, + cv::Scalar(0)); + } + data->eval_im_size_f_ = { + static_cast(im->rows), + static_cast(im->cols), + resize_scale.first + }; +} + +std::pair Resize::GenerateScale(const cv::Mat& im) { + std::pair resize_scale; + int origin_w = im.cols; + int origin_h = im.rows; + + if (max_size_ != 0 && (arch_ == "RCNN" || arch_ == "RetinaNet")) { + int im_size_max = std::max(origin_w, origin_h); + int im_size_min = std::min(origin_w, origin_h); + float scale_ratio = + static_cast(target_size_) / static_cast(im_size_min); + if (max_size_ > 0) { + if (round(scale_ratio * im_size_max) > max_size_) { + scale_ratio = + static_cast(max_size_) / static_cast(im_size_max); + } + } + resize_scale = {scale_ratio, scale_ratio}; + } else { + resize_scale.first = + static_cast(target_size_) / static_cast(origin_w); + resize_scale.second = + static_cast(target_size_) / static_cast(origin_h); + } + return resize_scale; +} + +void PadStride::Run(cv::Mat* im, ImageBlob* data) { + if (stride_ <= 0) { + return; + } + int rc = im->channels(); + int rh = im->rows; + int rw = im->cols; + int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_; + int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_; + cv::copyMakeBorder( + *im, + *im, + 0, + nh - rh, + 0, + nw - rw, + cv::BORDER_CONSTANT, + cv::Scalar(0)); + (data->eval_im_size_f_)[0] = static_cast(im->rows); + (data->eval_im_size_f_)[1] = static_cast(im->cols); +} + + +// Preprocessor op running order +const std::vector Preprocessor::RUN_ORDER = { + "Resize", "Normalize", "PadStride", "Permute" +}; + +void Preprocessor::Run(cv::Mat* im, ImageBlob* data) { + for (const auto& name : RUN_ORDER) { + if (ops_.find(name) != ops_.end()) { + ops_[name]->Run(im, data); + } + } +} + +} // namespace PaddleDetection diff --git a/docs/advanced_tutorials/inference/DEPLOYMENT.md b/docs/advanced_tutorials/inference/DEPLOYMENT.md deleted file mode 100644 index 4915b410e2c370fc2a3820832bd8d28ae7c74c6a..0000000000000000000000000000000000000000 --- a/docs/advanced_tutorials/inference/DEPLOYMENT.md +++ /dev/null @@ -1,172 +0,0 @@ -# PaddleDetection C++预测部署方案 - -## 本文档结构 - -[1.说明](#1说明) - -[2.主要目录和文件](#2主要目录和文件) - -[3.编译](#3编译) - -[4.预测并可视化结果](#4预测并可视化结果) - - - - -## 1.说明 - -本目录提供一个跨平台的图像检测模型的C++预测部署方案,用户通过一定的配置,加上少量的代码,即可把模型集成到自己的服务中,完成相应的图像检测任务。 - -主要设计的目标包括以下四点: -- 跨平台,支持在 Windows 和 Linux 完成编译、开发和部署 -- 可扩展性,支持用户针对新模型开发自己特殊的数据预处理等逻辑 -- 高性能,除了`PaddlePaddle`自身带来的性能优势,我们还针对图像检测的特点对关键步骤进行了性能优化 -- 支持多种常见的图像检测模型,如YOLOv3, Faster-RCNN, Faster-RCNN+FPN,用户通过少量配置即可加载模型完成常见检测任务 - -## 2.主要目录和文件 - -```bash -deploy -├── detection_demo.cpp # 完成图像检测预测任务C++代码 -│ -├── conf -│ ├── detection_rcnn.yaml #示例faster rcnn 目标检测配置 -│ └── detection_rcnn_fpn.yaml #示例faster rcnn + fpn目标检测配置 -├── images -│ └── detection_rcnn # 示例faster rcnn + fpn目标检测测试图片目录 -├── tools -│ └── vis.py # 示例图像检测结果可视化脚本 -├── docs -│ ├── linux_build.md # Linux 编译指南 -│ ├── windows_vs2015_build.md # windows VS2015编译指南 -│ └── windows_vs2019_build.md # Windows VS2019编译指南 -│ -├── utils # 一些基础公共函数 -│ -├── preprocess # 数据预处理相关代码 -│ -├── predictor # 模型加载和预测相关代码 -│ -├── CMakeList.txt # cmake编译入口文件 -│ -└── external-cmake # 依赖的外部项目cmake(目前仅有yaml-cpp) - -``` - -## 3.编译 -支持在`Windows`和`Linux`平台编译和使用: -- [Linux 编译指南](./docs/linux_build.md) -- [Windows 使用 Visual Studio 2019 Community 编译指南](./docs/windows_vs2019_build.md) -- [Windows 使用 Visual Studio 2015 编译指南](./docs/windows_vs2015_build.md) - -`Windows`上推荐使用最新的`Visual Studio 2019 Community`直接编译`CMake`项目。 - -## 4.预测并可视化结果 - -完成编译后,便生成了需要的可执行文件和链接库。这里以我们基于`faster rcnn`检测模型为例,介绍部署图像检测模型的通用流程。 - -### 4.1. 下载模型文件 -我们提供faster rcnn,faster rcnn+fpn模型用于预测coco17数据集,可在以下链接下载:[faster rcnn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50.zip), - [faster rcnn + fpn示例模型下载地址](https://paddleseg.bj.bcebos.com/inference/faster_rcnn_pp50_fpn.zip)。 - -下载并解压,解压后目录结构如下: -``` -faster_rcnn_pp50/ -├── __model__ # 模型文件 -│ -└── __params__ # 参数文件 -``` -解压后把上述目录拷贝到合适的路径: - -**假设**`Windows`系统上,我们模型和参数文件所在路径为`D:\projects\models\faster_rcnn_pp50`。 - -**假设**`Linux`上对应的路径则为`/root/projects/models/faster_rcnn_pp50/`。 - - -### 4.2. 修改配置 - -`inference`源代码(即本目录)的`conf`目录下提供了示例基于faster rcnn的配置文件`detection_rcnn.yaml`, 相关的字段含义和说明如下: - -```yaml -DEPLOY: - # 是否使用GPU预测 - USE_GPU: 1 - # 模型和参数文件所在目录路径 - MODEL_PATH: "/root/projects/models/faster_rcnn_pp50" - # 模型文件名 - MODEL_FILENAME: "__model__" - # 参数文件名 - PARAMS_FILENAME: "__params__" - # 预测图片的标准输入,尺寸不一致会resize - EVAL_CROP_SIZE: (608, 608) - # resize方式,支持 UNPADDING和RANGE_SCALING - RESIZE_TYPE: "RANGE_SCALING" - # 短边对齐的长度,仅在RANGE_SCALING下有效 - TARGET_SHORT_SIZE : 800 - # 均值 - MEAN: [0.4647, 0.4647, 0.4647] - # 方差 - STD: [0.0834, 0.0834, 0.0834] - # 图片类型, rgb或者rgba - IMAGE_TYPE: "rgb" - # 像素分类数 - NUM_CLASSES: 1 - # 通道数 - CHANNELS : 3 - # 预处理器, 目前提供图像检测的通用处理类DetectionPreProcessor - PRE_PROCESSOR: "DetectionPreProcessor" - # 预测模式,支持 NATIVE 和 ANALYSIS - PREDICTOR_MODE: "ANALYSIS" - # 每次预测的 batch_size - BATCH_SIZE : 3 - # 长边伸缩的最大长度,-1代表无限制。 - RESIZE_MAX_SIZE: 1333 - # 输入的tensor数量。 - FEEDS_SIZE: 3 - -``` -修改字段`MODEL_PATH`的值为你在**上一步**下载并解压的模型文件所放置的目录即可。更多配置文件字段介绍,请参考文档[预测部署方案配置文件说明](./docs/configuration.md)。 - -**注意**在使用CPU版本预测库时,`USE_GPU`的值必须设为0,否则无法正常预测。 - -### 4.3. 执行预测 - -在终端中切换到生成的可执行文件所在目录为当前目录(Windows系统为`cmd`)。 - -`Linux` 系统中执行以下命令: -```shell -./detection_demo --conf=conf/detection_rcnn.yaml --input_dir=images/detection_rcnn -``` -`Windows` 中执行以下命令: -```shell -.\detection_demo.exe --conf=conf\detection_rcnn.yaml --input_dir=images\detection_rcnn\ -``` - - -预测使用的两个命令参数说明如下: - -| 参数 | 含义 | -|-------|----------| -| conf | 模型配置的Yaml文件路径 | -| input_dir | 需要预测的图片目录 | - -· -配置文件说明请参考上一步,样例程序会扫描input_dir目录下的所有图片,并为每一张图片生成对应的预测结果,输出到屏幕,并在`X`同一目录下保存到`X.pb文件`(X为对应图片的文件名)。可使用工具脚本vis.py将检测结果可视化。 - -**检测结果可视化** - -运行可视化脚本时,只需输入命令行参数图片路径、检测结果pb文件路径、目标框阈值以及类别-标签映射文件路径即可得到可视化的图片`X.png` (tools目录下提供coco17的类别标签映射文件coco17.json)。 - -```bash -python vis.py --img_path=../build/images/detection_rcnn/000000087038.jpg --img_result_path=../build/images/detection_rcnn/000000087038.jpg.pb --threshold=0.1 --c2l_path=coco17.json -``` - -检测结果(每个图片的结果用空行隔开) - -```原图:``` - -![](../../../inference/images/detection_rcnn/000000087038.jpg) - -```检测结果图:``` - -![](../../images/000000087038_res.jpg) diff --git a/docs/advanced_tutorials/inference/DEPLOYMENT.md b/docs/advanced_tutorials/inference/DEPLOYMENT.md new file mode 120000 index 0000000000000000000000000000000000000000..8ff4f4b539012e7676e70cf8316fbcbe337fb99a --- /dev/null +++ b/docs/advanced_tutorials/inference/DEPLOYMENT.md @@ -0,0 +1 @@ +../../../../deploy/cpp/README.md \ No newline at end of file diff --git a/docs/advanced_tutorials/inference/docs/configuration.md b/docs/advanced_tutorials/inference/docs/configuration.md deleted file mode 100644 index 06fa7cc3ff32f98cb92b1c683cb8507fdfdd0f8e..0000000000000000000000000000000000000000 --- a/docs/advanced_tutorials/inference/docs/configuration.md +++ /dev/null @@ -1,75 +0,0 @@ -# 预测部署方案配置文件说明 -## 基本概念 -预测部署方案的配置文件旨在给用户提供一个预测部署方案定制化接口。用户仅需理解该配置文件相关字段的含义,无需编写任何代码,即可定制化预测部署方案。为了更好地表达每个字段的含义,首先介绍配置文件中字段的类型。 - -### 字段类型 -- **required**: 表明该字段必须显式定义,否则无法正常启动预测部署程序。 -- **optional**: 表明该字段可忽略不写,预测部署系统会提供默认值,相关默认值将在下文介绍。 - -### 字段值类型 -- **int**:表明该字段必须赋予整型类型的值。 -- **string**:表明该字段必须赋予字符串类型的值。 -- **list**:表明该字段必须赋予列表的值。 -- **tuple**: 表明该字段必须赋予双元素元组的值。 - -## 字段介绍 - -```yaml -# 预测部署时所有配置字段需在DEPLOY字段下 -DEPLOY: - # 类型:required int - # 含义:是否使用GPU预测。 0:不使用 1:使用 - USE_GPU: 1 - # 类型:required string - # 含义:模型和参数文件所在目录 - MODEL_PATH: "/path/to/model_directory" - # 类型:required string - # 含义:模型文件名 - MODEL_FILENAME: "__model__" - # 类型:required string - # 含义:参数文件名 - PARAMS_FILENAME: "__params__" - # 类型:optional string - # 含义:图像resize的类型。支持 UNPADDING 和 RANGE_SCALING模式。默认是UNPADDING模式。 - RESIZE_TYPE: "UNPADDING" - # 类型:required tuple - # 含义:当使用UNPADDING模式时,会将图像直接resize到该尺寸。 - EVAL_CROP_SIZE: (513, 513) - # 类型:optional int - # 含义:当使用RANGE_SCALING模式时,图像短边需要对齐该字段的值,长边会同比例 - # 的缩放,从而在保持图像长宽比例不变的情况下resize到新的尺寸。默认值为0。 - TARGET_SHORT_SIZE: 800 - # 类型:optional int - # 含义: 当使用RANGE_SCALING模式时,长边不能缩放到比该字段的值大。默认值为0。 - RESIZE_MAX_SIZE: 1333 - # 类型:required list - # 含义:图像进行归一化预处理时的均值 - MEAN: [104.008, 116.669, 122.675] - # 类型:required list - # 含义:图像进行归一化预处理时的方差 - STD: [1.0, 1.0, 1.0] - # 类型:string - # 含义:图片类型, rgb 或者 rgba - IMAGE_TYPE: "rgb" - # 类型:required int - # 含义:图像分类类型数 - NUM_CLASSES: 2 - # 类型:required int - # 含义:图片通道数 - CHANNELS : 3 - # 类型:required string - # 含义:预处理方式,目前提供图像检测的通用预处理类DetectionPreProcessor. - PRE_PROCESSOR: "DetectionPreProcessor" - # 类型:required string - # 含义:预测模式,支持 NATIVE 和 ANALYSIS - PREDICTOR_MODE: "ANALYSIS" - # 类型:required int - # 含义:每次预测的 batch_size - BATCH_SIZE : 3 - # 类型:optional int - # 含义: 输入张量的个数。大部分模型不需要设置。 默认值为1. - FEEDS_SIZE: 2 - # 类型: optional int - # 含义: 将图像的边变为该字段的值的整数倍。在使用fpn模型时需要设为32。默认值为1。 - COARSEST_STRIDE: 32 -``` diff --git a/docs/advanced_tutorials/inference/docs/linux_build.md b/docs/advanced_tutorials/inference/docs/linux_build.md deleted file mode 100644 index 012f3dee729a1395d6a66e89fce6a68c5f79c5cb..0000000000000000000000000000000000000000 --- a/docs/advanced_tutorials/inference/docs/linux_build.md +++ /dev/null @@ -1,100 +0,0 @@ -# Linux平台 编译指南 - -## 说明 -本文档在 `Linux`平台使用`GCC 4.8.5` 和 `GCC 4.9.4`测试过,如果需要使用更高G++版本编译使用,则需要重新编译Paddle预测库,请参考: [从源码编译Paddle预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html#id15)。 - -## 前置条件 -* G++ 4.8.2 ~ 4.9.4 -* CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要) -* CMake 3.0+ - -请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示**。 - -### Step1: 下载代码 - -1. `git clone https://github.com/PaddlePaddle/PaddleDetection.git` - -`C++`预测代码在`/root/projects/PaddleDetection/inference` 目录,该目录不依赖任何`PaddleDetection`下其他目录。 - - -### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference - -PaddlePaddle C++ 预测库主要分为CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。以下为各版本C++预测库的下载链接: - -| 版本 | 链接 | -| ---- | ---- | -| CPU版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cpu_1.6.1.tgz) | -| CUDA 9.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda97_1.6.1.tgz) | -| CUDA 10.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda10_1.6.1.tgz) | - - -针对不同的CPU类型、不同的指令集,官方提供更多可用的预测库版本,目前已经推出1.6版本的预测库。其余版本具体请参考以下链接:[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html) - - -下载并解压后`/root/projects/fluid_inference`目录包含内容为: -``` -fluid_inference -├── paddle # paddle核心库和头文件 -| -├── third_party # 第三方依赖库和头文件 -| -└── version.txt # 版本和编译信息 -``` - -### Step3: 安装配置OpenCV - -```shell -# 0. 切换到/root/projects目录 -cd /root/projects -# 1. 下载OpenCV3.4.6版本源代码 -wget -c https://paddleseg.bj.bcebos.com/inference/opencv-3.4.6.zip -# 2. 解压 -unzip opencv-3.4.6.zip && cd opencv-3.4.6 -# 3. 创建build目录并编译, 这里安装到/usr/local/opencv3目录 -mkdir build && cd build -cmake .. -DCMAKE_INSTALL_PREFIX=/root/projects/opencv3 -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DWITH_IPP=OFF -DBUILD_IPP_IW=OFF -DWITH_LAPACK=OFF -DWITH_EIGEN=OFF -DCMAKE_INSTALL_LIBDIR=lib64 -DWITH_ZLIB=ON -DBUILD_ZLIB=ON -DWITH_JPEG=ON -DBUILD_JPEG=ON -DWITH_PNG=ON -DBUILD_PNG=ON -DWITH_TIFF=ON -DBUILD_TIFF=ON -make -j4 -make install -``` - -**注意:** 上述操作完成后,`opencv` 被安装在 `/root/projects/opencv3` 目录。 - -### Step4: 编译 - -`CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下:(带*表示仅在使用**GPU版本**预测库时指定,其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1版本CUDA库**) - -| 参数名 | 含义 | -| ---- | ---- | -| * CUDA_LIB | CUDA的库路径 | -| * CUDNN_LIB | cudnn的库路径| -| OPENCV_DIR | OpenCV的安装路径 | -| PADDLE_DIR | Paddle预测库的路径 | - -在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径: - -```shell -cd /root/projects/PaddleDetection/inference -mkdir build && cd build -cmake .. -DWITH_GPU=ON -DPADDLE_DIR=/root/projects/fluid_inference -DCUDA_LIB=/usr/local/cuda/lib64/ -DOPENCV_DIR=/root/projects/opencv3/ -DCUDNN_LIB=/usr/local/cuda/lib64/ -DWITH_STATIC_LIB=OFF -make -``` - -在使用**CPU版本**预测库进行编译时,可执行下列操作: - -```shell -cd /root/projects/PaddleDetection/inference - -mkdir build && cd build -cmake .. -DWITH_GPU=OFF -DPADDLE_DIR=/root/projects/fluid_inference -DOPENCV_DIR=/root/projects/opencv3/ -DWITH_STATIC_LIB=OFF -make -``` - -### Step5: 预测及可视化 - -执行命令: - -``` -./detection_demo --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory -``` - -更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md) diff --git a/docs/advanced_tutorials/inference/docs/linux_build.md b/docs/advanced_tutorials/inference/docs/linux_build.md new file mode 120000 index 0000000000000000000000000000000000000000..6a26e62001ff8072960f229a860a6bbc54acc37f --- /dev/null +++ b/docs/advanced_tutorials/inference/docs/linux_build.md @@ -0,0 +1 @@ +../../../../deploy/cpp/docs/linux_build.md \ No newline at end of file diff --git a/docs/advanced_tutorials/inference/docs/windows_vs2019_build.md b/docs/advanced_tutorials/inference/docs/windows_vs2019_build.md deleted file mode 100644 index f831aba90ed398f920bd1dc53b6620b05d188e8a..0000000000000000000000000000000000000000 --- a/docs/advanced_tutorials/inference/docs/windows_vs2019_build.md +++ /dev/null @@ -1,102 +0,0 @@ -# Visual Studio 2019 Community CMake 编译指南 - -Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Community` 进行了测试。微软从`Visual Studio 2017`开始即支持直接管理`CMake`跨平台编译项目,但是直到`2019`才提供了稳定和完全的支持,所以如果你想使用CMake管理项目编译构建,我们推荐你使用`Visual Studio 2019`环境下构建。 - -你也可以使用和`VS2015`一样,通过把`CMake`项目转化成`VS`项目来编译,其中**有差别的部分**在文档中我们有说明,请参考:[使用Visual Studio 2015 编译指南](./windows_vs2015_build.md) - -## 前置条件 -* Visual Studio 2019 -* CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要) -* CMake 3.0+ - -请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。 - -**下面所有示例以工作目录为 `D:\projects`演示**。 - -### Step1: 下载代码 - -1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleDetection/archive/master.zip) -2. 解压,解压后目录重命名为`PaddleDetection` - -以下代码目录路径为`D:\projects\PaddleDetection` 为例。 - - -### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference - -PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接: - -| 版本 | 链接 | -| ---- | ---- | -| CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) | -| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) | -| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) | - -解压后`D:\projects\fluid_inference`目录包含内容为: -``` -fluid_inference -├── paddle # paddle核心库和头文件 -| -├── third_party # 第三方依赖库和头文件 -| -└── version.txt # 版本和编译信息 -``` - -### Step3: 安装配置OpenCV - -1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download) -2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv` -3. 配置环境变量,如下流程所示 - - 我的电脑->属性->高级系统设置->环境变量 - - 在系统变量中找到Path(如没有,自行创建),并双击编辑 - - 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin` - -### Step4: 使用Visual Studio 2019直接编译CMake - -1. 打开Visual Studio 2019 Community,点击`继续但无需代码` -![step2](https://paddleseg.bj.bcebos.com/inference/vs2019_step1.png) -2. 点击: `文件`->`打开`->`CMake` -![step2.1](https://paddleseg.bj.bcebos.com/inference/vs2019_step2.png) - -选择项目代码所在路径,并打开`CMakeList.txt`: - -![step2.2](https://paddleseg.bj.bcebos.com/inference/vs2019_step3.png) - -3. 点击:`项目`->`cpp_inference_demo的CMake设置` - -![step3](https://paddleseg.bj.bcebos.com/inference/vs2019_step4.png) - -4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径 - -三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**): - -| 参数名 | 含义 | -| ---- | ---- | -| *CUDA_LIB | CUDA的库路径 | -| OPENCV_DIR | OpenCV的安装路径, | -| PADDLE_DIR | Paddle预测库的路径 | - -**注意**在使用CPU版本预测库时,需要把CUDA_LIB的勾去掉。 -![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png) - -**设置完成后**, 点击上图中`保存并生成CMake缓存以加载变量`。 - -5. 点击`生成`->`全部生成` - -![step6](https://paddleseg.bj.bcebos.com/inference/vs2019_step6.png) - - -### Step5: 预测及可视化 - -上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录: - -``` -cd D:\projects\PaddleDetection\inference\out\build\x64-Release -``` - -之后执行命令: - -``` -detection_demo.exe --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory -``` - -更详细说明请参考ReadMe文档: [预测和可视化部分](../README.md) diff --git a/docs/advanced_tutorials/inference/docs/windows_vs2019_build.md b/docs/advanced_tutorials/inference/docs/windows_vs2019_build.md new file mode 120000 index 0000000000000000000000000000000000000000..a39dd3610577892e7d1ff414345334ed291d8ce0 --- /dev/null +++ b/docs/advanced_tutorials/inference/docs/windows_vs2019_build.md @@ -0,0 +1 @@ +../../../../deploy/cpp/docs/windows_vs2019_build.md \ No newline at end of file diff --git a/inference/LICENSE b/inference/LICENSE deleted file mode 100644 index 261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64..0000000000000000000000000000000000000000 --- a/inference/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/inference/README.md b/inference/README.md deleted file mode 100644 index 46de1b7caf3eb3e616dc5c6e047edd889dd01675..0000000000000000000000000000000000000000 --- a/inference/README.md +++ /dev/null @@ -1,2 +0,0 @@ -**文档教程请参考:** [PaddleDetection C++预测部署方案](../docs/advanced_tutorials/inference/DEPLOYMENT.md)
-**English document please refer:** [PaddleDetection C++ deployment](../docs/advanced_tutorials/inference/DEPLOYMENT.md) diff --git a/inference/conf/detection_rcnn.yaml b/inference/conf/detection_rcnn.yaml deleted file mode 100644 index a53698d7fedaaaec790318dade1621ea578eb0b6..0000000000000000000000000000000000000000 --- a/inference/conf/detection_rcnn.yaml +++ /dev/null @@ -1,18 +0,0 @@ -DEPLOY: - USE_GPU: 1 - MODEL_PATH: "/root/projects/models/faster_rcnn_pp50" - MODEL_FILENAME: "__model__" - PARAMS_FILENAME: "__params__" - EVAL_CROP_SIZE: (608, 608) - RESIZE_TYPE: "RANGE_SCALING" - TARGET_SHORT_SIZE : 800 - MEAN: [0.485, 0.456, 0.406] - STD: [0.229, 0.224, 0.225] - IMAGE_TYPE: "rgb" - NUM_CLASSES: 1 - CHANNELS : 3 - PRE_PROCESSOR: "DetectionPreProcessor" - PREDICTOR_MODE: "ANALYSIS" - BATCH_SIZE : 1 - RESIZE_MAX_SIZE: 1333 - FEEDS_SIZE: 3 diff --git a/inference/conf/detection_rcnn_fpn.yaml b/inference/conf/detection_rcnn_fpn.yaml deleted file mode 100644 index 9d6635ef8c2b29fb0ca9318d1ec08f1f7be037f7..0000000000000000000000000000000000000000 --- a/inference/conf/detection_rcnn_fpn.yaml +++ /dev/null @@ -1,19 +0,0 @@ -DEPLOY: - USE_GPU: 1 - MODEL_PATH: "/root/projects/models/faster_rcnn_pp50_fpn" - MODEL_FILENAME: "__model__" - PARAMS_FILENAME: "__params__" - EVAL_CROP_SIZE: (608, 608) - RESIZE_TYPE: "RANGE_SCALING" - TARGET_SHORT_SIZE : 800 - MEAN: [0.485, 0.456, 0.406] - STD: [0.229, 0.224, 0.225] - IMAGE_TYPE: "rgb" - NUM_CLASSES: 1 - CHANNELS : 3 - PRE_PROCESSOR: "DetectionPreProcessor" - PREDICTOR_MODE: "ANALYSIS" - BATCH_SIZE : 1 - RESIZE_MAX_SIZE: 1333 - FEEDS_SIZE: 3 - COARSEST_STRIDE: 32 diff --git a/inference/detection_demo.cpp b/inference/detection_demo.cpp deleted file mode 100644 index 32921fbd1cb3bdef4f55f8d5c9d5687780e4929b..0000000000000000000000000000000000000000 --- a/inference/detection_demo.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -DEFINE_string(conf, "", "Configuration File Path"); -DEFINE_string(input_dir, "", "Directory of Input Images"); - -int main(int argc, char** argv) { - // 0. parse args - google::ParseCommandLineFlags(&argc, &argv, true); - if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) { - std::cout << "Usage: ./predictor --conf=/config/path/to/your/model " - << "--input_dir=/directory/of/your/input/images" << std::endl; - return -1; - } - // 1. create a predictor and init it with conf - PaddleSolution::DetectionPredictor predictor; - if (predictor.init(FLAGS_conf) != 0) { - #ifdef _WIN32 - std::cerr << "Fail to init predictor" << std::endl; - #else - LOG(FATAL) << "Fail to init predictor"; - #endif - return -1; - } - - // 2. get all the images with extension '.jpeg' at input_dir - auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir, - ".jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG"); - - // 3. predict - predictor.predict(imgs); - return 0; -} diff --git a/inference/images/detection_rcnn/000000014439.jpg b/inference/images/detection_rcnn/000000014439.jpg deleted file mode 100644 index 0abbdab06eb5950b93908cc91adfa640e8a3ac78..0000000000000000000000000000000000000000 Binary files a/inference/images/detection_rcnn/000000014439.jpg and /dev/null differ diff --git a/inference/images/detection_rcnn/000000087038.jpg b/inference/images/detection_rcnn/000000087038.jpg deleted file mode 100644 index 9f77f5d5f057b6f92dc096da704ecb8dee99bdf5..0000000000000000000000000000000000000000 Binary files a/inference/images/detection_rcnn/000000087038.jpg and /dev/null differ diff --git a/inference/images/detection_rcnn/000000570688.jpg b/inference/images/detection_rcnn/000000570688.jpg deleted file mode 100644 index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000 Binary files a/inference/images/detection_rcnn/000000570688.jpg and /dev/null differ diff --git a/inference/predictor/detection_predictor.cpp b/inference/predictor/detection_predictor.cpp deleted file mode 100644 index e47125c5e3eff159ea6f43740906f58e08dc78aa..0000000000000000000000000000000000000000 --- a/inference/predictor/detection_predictor.cpp +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "detection_predictor.h" -#include -#include -#include -#include "utils/detection_result.pb.h" -#undef min - -namespace PaddleSolution { - /* lod_buffer: every item in lod_buffer is an image matrix after preprocessing - * input_buffer: same data with lod_buffer after flattening to 1-D vector and padding, needed to be empty before using this function - */ - void padding_minibatch(const std::vector> &lod_buffer, - std::vector &input_buffer, - std::vector &resize_heights, - std::vector &resize_widths, - int channels, int coarsest_stride = 1) { - int batch_size = lod_buffer.size(); - int max_h = -1; - int max_w = -1; - for (int i = 0; i < batch_size; ++i) { - max_h = (max_h > resize_heights[i])? max_h:resize_heights[i]; - max_w = (max_w > resize_widths[i])? max_w:resize_widths[i]; - } - max_h = static_cast(ceil(static_cast(max_h) - / static_cast(coarsest_stride)) * coarsest_stride); - max_w = static_cast(ceil(static_cast(max_w) - / static_cast(coarsest_stride)) * coarsest_stride); - std::cout << "max_w: " << max_w << " max_h: " << max_h << std::endl; - input_buffer.insert(input_buffer.end(), - batch_size * channels * max_h * max_w, 0); - // flatten tensor and padding - for (int i = 0; i < lod_buffer.size(); ++i) { - float *input_buffer_ptr = input_buffer.data() - + i * channels * max_h * max_w; - const float *lod_ptr = lod_buffer[i].data(); - for (int c = 0; c < channels; ++c) { - for (int h = 0; h < resize_heights[i]; ++h) { - memcpy(input_buffer_ptr, lod_ptr, - resize_widths[i] * sizeof(float)); - lod_ptr += resize_widths[i]; - input_buffer_ptr += max_w; - } - input_buffer_ptr += (max_h - resize_heights[i]) * max_w; - } - } - // change resize w, h - for (int i = 0; i < batch_size; ++i) { - resize_widths[i] = max_w; - resize_heights[i] = max_h; - } - } - - void output_detection_result(const float* out_addr, - const std::vector> &lod_vector, - const std::vector &imgs_batch) { - for (int i = 0; i < lod_vector[0].size() - 1; ++i) { - DetectionResult detection_result; - detection_result.set_filename(imgs_batch[i]); - std::cout << imgs_batch[i] << ":" << std::endl; - for (int j = lod_vector[0][i]; j < lod_vector[0][i+1]; ++j) { - DetectionBox *box_ptr = detection_result.add_detection_boxes(); - box_ptr->set_class_( - static_cast(round(out_addr[0 + j * 6]))); - box_ptr->set_score(out_addr[1 + j * 6]); - box_ptr->set_left_top_x(out_addr[2 + j * 6]); - box_ptr->set_left_top_y(out_addr[3 + j * 6]); - box_ptr->set_right_bottom_x(out_addr[4 + j * 6]); - box_ptr->set_right_bottom_y(out_addr[5 + j * 6]); - printf("Class %d, score = %f, left top = [%f, %f], right bottom = [%f, %f]\n", - static_cast(round(out_addr[0 + j * 6])), - out_addr[1 + j * 6], - out_addr[2 + j * 6], - out_addr[3 + j * 6], - out_addr[4 + j * 6], - out_addr[5 + j * 6]); - } - printf("\n"); - std::ofstream output(imgs_batch[i] + ".pb", - std::ios::out | std::ios::trunc | std::ios::binary); - detection_result.SerializeToOstream(&output); - output.close(); - } - } - - int DetectionPredictor::init(const std::string& conf) { - if (!_model_config.load_config(conf)) { - #ifdef _WIN32 - std::cerr << "Fail to load config file: [" << conf << "], " - << "please check whether the config file path is correct" - << std::endl; - #else - LOG(FATAL) << "Fail to load config file: [" << conf << "], " - << "please check whether the config file path is correct"; - #endif - return -1; - } - _preprocessor = PaddleSolution::create_processor(conf); - if (_preprocessor == nullptr) { - #ifdef _WIN32 - std::cerr << "Failed to create_processor, please check whether you" - << " write a correct config file." << std::endl; - #else - LOG(FATAL) << "Failed to create_processor, please check whether" - << " you write a correct config file."; - #endif - return -1; - } - - bool use_gpu = _model_config._use_gpu; - const auto& model_dir = _model_config._model_path; - const auto& model_filename = _model_config._model_file_name; - const auto& params_filename = _model_config._param_file_name; - - // load paddle model file - if (_model_config._predictor_mode == "NATIVE") { - paddle::NativeConfig config; - auto prog_file = utils::path_join(model_dir, model_filename); - auto param_file = utils::path_join(model_dir, params_filename); - config.prog_file = prog_file; - config.param_file = param_file; - config.fraction_of_gpu_memory = 0; - config.use_gpu = use_gpu; - config.device = 0; - _main_predictor = paddle::CreatePaddlePredictor(config); - } else if (_model_config._predictor_mode == "ANALYSIS") { - paddle::AnalysisConfig config; - if (use_gpu) { - config.EnableUseGpu(100, 0); - } - auto prog_file = utils::path_join(model_dir, model_filename); - auto param_file = utils::path_join(model_dir, params_filename); - config.SetModel(prog_file, param_file); - config.SwitchUseFeedFetchOps(false); - config.SwitchSpecifyInputNames(true); - config.EnableMemoryOptim(); - // config.SwitchIrOptim(true); - // config.EnableTensorRtEngine(1<<4, 30, 3); - _main_predictor = paddle::CreatePaddlePredictor(config); - } else { - return -1; - } - return 0; - } - - int DetectionPredictor::predict(const std::vector& imgs) { - if (imgs.size() == 0) { - #ifdef _WIN32 - std::cerr << "No image found! Please check whether the images path" - << " is correct or the format of images is correct\n" - << "Supporting format: [.jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG]" << std::endl; - #else - LOG(ERROR) << "No image found! Please check whether the images path" - << " is correct or the format of images is correct\n" - << "Supporting format: [.jpeg|.jpg|.JPEG|.JPG|.bmp|.BMP|.png|.PNG]"; - #endif - return -1; - } - if (_model_config._predictor_mode == "NATIVE") { - return native_predict(imgs); - } else if (_model_config._predictor_mode == "ANALYSIS") { - return analysis_predict(imgs); - } - return -1; - } - - int DetectionPredictor::native_predict(const std::vector& imgs) { - int config_batch_size = _model_config._batch_size; - - int channels = _model_config._channels; - int eval_width = _model_config._resize[0]; - int eval_height = _model_config._resize[1]; - std::size_t total_size = imgs.size(); - int default_batch_size = std::min(config_batch_size, - static_cast(total_size)); - int batch = total_size / default_batch_size + - ((total_size % default_batch_size) != 0); - int batch_buffer_size = default_batch_size * channels - * eval_width * eval_height; - - auto& input_buffer = _buffer; - auto& imgs_batch = _imgs_batch; - float sr; - for (int u = 0; u < batch; ++u) { - int batch_size = default_batch_size; - if (u == (batch - 1) && (total_size % default_batch_size)) { - batch_size = total_size % default_batch_size; - } - - int real_buffer_size = batch_size * channels - * eval_width * eval_height; - std::vector feeds; - input_buffer.clear(); - imgs_batch.clear(); - for (int i = 0; i < batch_size; ++i) { - int idx = u * default_batch_size + i; - imgs_batch.push_back(imgs[idx]); - } - std::vector ori_widths; - std::vector ori_heights; - std::vector resize_widths; - std::vector resize_heights; - std::vector scale_ratios; - ori_widths.resize(batch_size); - ori_heights.resize(batch_size); - resize_widths.resize(batch_size); - resize_heights.resize(batch_size); - scale_ratios.resize(batch_size); - std::vector> lod_buffer(batch_size); - if (!_preprocessor->batch_process(imgs_batch, lod_buffer, - ori_widths.data(), - ori_heights.data(), - resize_widths.data(), - resize_heights.data(), - scale_ratios.data())) { - return -1; - } - // flatten and padding - padding_minibatch(lod_buffer, input_buffer, resize_heights, - resize_widths, channels, - _model_config._coarsest_stride); - paddle::PaddleTensor im_tensor, im_size_tensor, im_info_tensor; - - im_tensor.name = "image"; - im_tensor.shape = std::vector({ batch_size, - channels, - resize_heights[0], - resize_widths[0] }); - im_tensor.data.Reset(input_buffer.data(), - input_buffer.size() * sizeof(float)); - im_tensor.dtype = paddle::PaddleDType::FLOAT32; - - std::vector image_infos; - for (int i = 0; i < batch_size; ++i) { - image_infos.push_back(resize_heights[i]); - image_infos.push_back(resize_widths[i]); - image_infos.push_back(scale_ratios[i]); - } - im_info_tensor.name = "info"; - im_info_tensor.shape = std::vector({batch_size, 3}); - im_info_tensor.data.Reset(image_infos.data(), - batch_size * 3 * sizeof(float)); - im_info_tensor.dtype = paddle::PaddleDType::FLOAT32; - - std::vector image_size; - for (int i = 0; i < batch_size; ++i) { - image_size.push_back(ori_heights[i]); - image_size.push_back(ori_widths[i]); - } - - std::vector image_size_f; - for (int i = 0; i < batch_size; ++i) { - image_size_f.push_back(ori_heights[i]); - image_size_f.push_back(ori_widths[i]); - image_size_f.push_back(1.0); - } - - int feeds_size = _model_config._feeds_size; - im_size_tensor.name = "im_size"; - if (feeds_size == 2) { - im_size_tensor.shape = std::vector({ batch_size, 2}); - im_size_tensor.data.Reset(image_size.data(), - batch_size * 2 * sizeof(int)); - im_size_tensor.dtype = paddle::PaddleDType::INT32; - } else if (feeds_size == 3) { - im_size_tensor.shape = std::vector({ batch_size, 3}); - im_size_tensor.data.Reset(image_size_f.data(), - batch_size * 3 * sizeof(float)); - im_size_tensor.dtype = paddle::PaddleDType::FLOAT32; - } - std::cout << "Feed size = " << feeds_size << std::endl; - feeds.push_back(im_tensor); - if (_model_config._feeds_size > 2) { - feeds.push_back(im_info_tensor); - } - feeds.push_back(im_size_tensor); - _outputs.clear(); - - auto t1 = std::chrono::high_resolution_clock::now(); - if (!_main_predictor->Run(feeds, &_outputs, batch_size)) { - #ifdef _WIN32 - std::cerr << "Failed: NativePredictor->Run() return false at batch: " << u; - #else - LOG(ERROR) << "Failed: NativePredictor->Run() return false at batch: " << u; - #endif - continue; - } - auto t2 = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(t2 - t1).count(); - std::cout << "runtime = " << duration << std::endl; - std::cout << "Number of outputs:" << _outputs.size() << std::endl; - int out_num = 1; - // print shape of first output tensor for debugging - std::cout << "size of outputs[" << 0 << "]: ("; - for (int j = 0; j < _outputs[0].shape.size(); ++j) { - out_num *= _outputs[0].shape[j]; - std::cout << _outputs[0].shape[j] << ","; - } - std::cout << ")" << std::endl; - - // const size_t nums = _outputs.front().data.length() / sizeof(float); - // if (out_num % batch_size != 0 || out_num != nums) { - // LOG(ERROR) << "outputs data size mismatch with shape size."; - // return -1; - // } - float* out_addr = reinterpret_cast(_outputs[0].data.data()); - output_detection_result(out_addr, _outputs[0].lod, imgs_batch); - } - return 0; - } - - int DetectionPredictor::analysis_predict( - const std::vector& imgs) { - int config_batch_size = _model_config._batch_size; - int channels = _model_config._channels; - int eval_width = _model_config._resize[0]; - int eval_height = _model_config._resize[1]; - auto total_size = imgs.size(); - int default_batch_size = std::min(config_batch_size, - static_cast(total_size)); - int batch = total_size / default_batch_size - + ((total_size % default_batch_size) != 0); - int batch_buffer_size = default_batch_size * channels - * eval_width * eval_height; - - auto& input_buffer = _buffer; - auto& imgs_batch = _imgs_batch; - for (int u = 0; u < batch; ++u) { - int batch_size = default_batch_size; - if (u == (batch - 1) && (total_size % default_batch_size)) { - batch_size = total_size % default_batch_size; - } - - int real_buffer_size = batch_size * channels * - eval_width * eval_height; - std::vector feeds; - // input_buffer.resize(real_buffer_size); - input_buffer.clear(); - imgs_batch.clear(); - for (int i = 0; i < batch_size; ++i) { - int idx = u * default_batch_size + i; - imgs_batch.push_back(imgs[idx]); - } - - std::vector ori_widths; - std::vector ori_heights; - std::vector resize_widths; - std::vector resize_heights; - std::vector scale_ratios; - ori_widths.resize(batch_size); - ori_heights.resize(batch_size); - resize_widths.resize(batch_size); - resize_heights.resize(batch_size); - scale_ratios.resize(batch_size); - - std::vector> lod_buffer(batch_size); - if (!_preprocessor->batch_process(imgs_batch, lod_buffer, - ori_widths.data(), - ori_heights.data(), - resize_widths.data(), - resize_heights.data(), - scale_ratios.data())) { - std::cout << "Failed to preprocess!" << std::endl; - return -1; - } - - // flatten tensor - padding_minibatch(lod_buffer, input_buffer, resize_heights, - resize_widths, channels, - _model_config._coarsest_stride); - - std::vector input_names = _main_predictor->GetInputNames(); - auto im_tensor = _main_predictor->GetInputTensor( - input_names.front()); - im_tensor->Reshape({ batch_size, channels, - resize_heights[0], resize_widths[0] }); - im_tensor->copy_from_cpu(input_buffer.data()); - - if (input_names.size() > 2) { - std::vector image_infos; - for (int i = 0; i < batch_size; ++i) { - image_infos.push_back(resize_heights[i]); - image_infos.push_back(resize_widths[i]); - image_infos.push_back(scale_ratios[i]); - } - auto im_info_tensor = _main_predictor->GetInputTensor( - input_names[1]); - im_info_tensor->Reshape({batch_size, 3}); - im_info_tensor->copy_from_cpu(image_infos.data()); - } - - std::vector image_size; - for (int i = 0; i < batch_size; ++i) { - image_size.push_back(ori_heights[i]); - image_size.push_back(ori_widths[i]); - } - std::vector image_size_f; - for (int i = 0; i < batch_size; ++i) { - image_size_f.push_back(static_cast(ori_heights[i])); - image_size_f.push_back(static_cast(ori_widths[i])); - image_size_f.push_back(1.0); - } - - auto im_size_tensor = _main_predictor->GetInputTensor( - input_names.back()); - if (input_names.size() > 2) { - im_size_tensor->Reshape({batch_size, 3}); - im_size_tensor->copy_from_cpu(image_size_f.data()); - } else { - im_size_tensor->Reshape({batch_size, 2}); - im_size_tensor->copy_from_cpu(image_size.data()); - } - - auto t1 = std::chrono::high_resolution_clock::now(); - _main_predictor->ZeroCopyRun(); - auto t2 = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(t2 - t1).count(); - std::cout << "runtime = " << duration << std::endl; - - auto output_names = _main_predictor->GetOutputNames(); - auto output_t = _main_predictor->GetOutputTensor(output_names[0]); - std::vector out_data; - std::vector output_shape = output_t->shape(); - - int out_num = 1; - std::cout << "size of outputs[" << 0 << "]: ("; - for (int j = 0; j < output_shape.size(); ++j) { - out_num *= output_shape[j]; - std::cout << output_shape[j] << ","; - } - std::cout << ")" << std::endl; - - out_data.resize(out_num); - output_t->copy_to_cpu(out_data.data()); - - float* out_addr = reinterpret_cast(out_data.data()); - auto lod_vector = output_t->lod(); - output_detection_result(out_addr, lod_vector, imgs_batch); - } - return 0; - } -} // namespace PaddleSolution diff --git a/inference/predictor/detection_predictor.h b/inference/predictor/detection_predictor.h deleted file mode 100644 index 2ce6f45202019f54468a9bc33f8a5bce0e7f2c20..0000000000000000000000000000000000000000 --- a/inference/predictor/detection_predictor.h +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "utils/conf_parser.h" -#include "utils/utils.h" -#include "preprocessor/preprocessor.h" - -namespace PaddleSolution { -class DetectionPredictor { - public: - // init a predictor with a yaml config file - int init(const std::string& conf); - // predict api - int predict(const std::vector& imgs); - - private: - int native_predict(const std::vector& imgs); - int analysis_predict(const std::vector& imgs); - private: - std::vector _buffer; - std::vector _imgs_batch; - std::vector _outputs; - - PaddleSolution::PaddleModelConfigPaser _model_config; - std::shared_ptr _preprocessor; - std::unique_ptr _main_predictor; -}; -} // namespace PaddleSolution diff --git a/inference/preprocessor/preprocessor.cpp b/inference/preprocessor/preprocessor.cpp deleted file mode 100644 index 941416a044116b51155e7e71fe9ef53066602ddf..0000000000000000000000000000000000000000 --- a/inference/preprocessor/preprocessor.cpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include "preprocessor.h" -#include "preprocessor_detection.h" -#include - -namespace PaddleSolution { - - std::shared_ptr create_processor(const std::string& conf_file) { - auto config = std::make_shared(); - if (!config->load_config(conf_file)) { - #ifdef _WIN32 - std::cerr << "fail to load conf file [" << conf_file << "]" << std::endl; - #else - LOG(FATAL) << "fail to load conf file [" << conf_file << "]"; - #endif - return nullptr; - } - - if (config->_pre_processor == "DetectionPreProcessor") { - auto p = std::make_shared(); - if (!p->init(config)) { - return nullptr; - } - return p; - } - #ifdef _WIN32 - std::cerr << "unknown processor_name [" << config->_pre_processor << "]," - << "please check whether PRE_PROCESSOR is set correctly" << std::endl; - #else - LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "]," - << "please check whether PRE_PROCESSOR is set correctly"; - #endif - return nullptr; - } -} // namespace PaddleSolution diff --git a/inference/preprocessor/preprocessor.h b/inference/preprocessor/preprocessor.h deleted file mode 100644 index b5cfa467f9af391ffe456bc0e7a4fa7ea6792c49..0000000000000000000000000000000000000000 --- a/inference/preprocessor/preprocessor.h +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include -#include -#include - -#include -#include -#include - -#include "utils/conf_parser.h" - -namespace PaddleSolution { - -class ImagePreProcessor { - protected: - ImagePreProcessor() {} - - public: - virtual ~ImagePreProcessor() {} - - virtual bool single_process(const std::string& fname, - float* data, - int* ori_w, - int* ori_h) { - return true; - } - - virtual bool batch_process(const std::vector& imgs, - float* data, - int* ori_w, - int* ori_h) { - return true; - } - - virtual bool single_process(const std::string& fname, float* data) { - return true; - } - - virtual bool batch_process(const std::vector& imgs, - float* data) { - return true; - } - - virtual bool single_process(const std::string& fname, - std::vector &data, - int* ori_w, int* ori_h, - int* resize_w, int* resize_h, - float* scale_ratio) { - return true; - } - - virtual bool batch_process(const std::vector& imgs, - std::vector> &data, - int* ori_w, int* ori_h, int* resize_w, - int* resize_h, float* scale_ratio) { - return true; - } -}; // end of class ImagePreProcessor - -std::shared_ptr - create_processor(const std::string &config_file); -} // namespace PaddleSolution - diff --git a/inference/preprocessor/preprocessor_detection.cpp b/inference/preprocessor/preprocessor_detection.cpp deleted file mode 100644 index 15d2dc3c4b6255b8639d3296da7be4d22764f475..0000000000000000000000000000000000000000 --- a/inference/preprocessor/preprocessor_detection.cpp +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -#include "preprocessor_detection.h" -#include "utils/utils.h" - -namespace PaddleSolution { -bool DetectionPreProcessor::single_process(const std::string& fname, - std::vector &vec_data, - int* ori_w, int* ori_h, - int* resize_w, int* resize_h, - float* scale_ratio) { - cv::Mat im1 = cv::imread(fname, -1); - cv::Mat im; - if (_config->_feeds_size == 3) { // faster rcnn - im1.convertTo(im, CV_32FC3, 1/255.0); - } else if (_config->_feeds_size == 2) { // yolo v3 - im = im1; - } - if (im.data == nullptr || im.empty()) { - #ifdef _WIN32 - std::cerr << "Failed to open image: " << fname << std::endl; - #else - LOG(ERROR) << "Failed to open image: " << fname; - #endif - return false; - } - int channels = im.channels(); - if (channels == 1) { - cv::cvtColor(im, im, cv::COLOR_GRAY2BGR); - } - channels = im.channels(); - if (channels != 3 && channels != 4) { - #ifdef _WIN32 - std::cerr << "Only support rgb(gray) and rgba image." << std::endl; - #else - LOG(ERROR) << "Only support rgb(gray) and rgba image."; - #endif - return false; - } - *ori_w = im.cols; - *ori_h = im.rows; - cv::cvtColor(im, im, cv::COLOR_BGR2RGB); - // channels = im.channels(); - - // resize - int rw = im.cols; - int rh = im.rows; - float im_scale_ratio; - utils::scaling(_config->_resize_type, rw, rh, _config->_resize[0], - _config->_resize[1], _config->_target_short_size, - _config->_resize_max_size, im_scale_ratio); - cv::Size resize_size(rw, rh); - *resize_w = rw; - *resize_h = rh; - *scale_ratio = im_scale_ratio; - if (*ori_h != rh || *ori_w != rw) { - cv::Mat im_temp; - if (_config->_resize_type == utils::SCALE_TYPE::UNPADDING) { - cv::resize(im, im_temp, resize_size, 0, 0, cv::INTER_LINEAR); - } else if (_config->_resize_type == utils::SCALE_TYPE::RANGE_SCALING) { - cv::resize(im, im_temp, cv::Size(), im_scale_ratio, - im_scale_ratio, cv::INTER_LINEAR); - } - im = im_temp; - } - - vec_data.resize(channels * rw * rh); - float *data = vec_data.data(); - - float* pmean = _config->_mean.data(); - float* pscale = _config->_std.data(); - for (int h = 0; h < rh; ++h) { - const uchar* uptr = im.ptr(h); - const float* fptr = im.ptr(h); - int im_index = 0; - for (int w = 0; w < rw; ++w) { - for (int c = 0; c < channels; ++c) { - int top_index = (c * rh + h) * rw + w; - float pixel; - if (_config->_feeds_size == 2) { // yolo v3 - pixel = static_cast(uptr[im_index++]) / 255.0; - } else if (_config->_feeds_size == 3) { - pixel = fptr[im_index++]; - } - pixel = (pixel - pmean[c]) / pscale[c]; - data[top_index] = pixel; - } - } - } - return true; -} - -bool DetectionPreProcessor::batch_process(const std::vector& imgs, - std::vector> &data, - int* ori_w, int* ori_h, int* resize_w, - int* resize_h, float* scale_ratio) { - auto ic = _config->_channels; - auto iw = _config->_resize[0]; - auto ih = _config->_resize[1]; - std::vector threads; - for (int i = 0; i < imgs.size(); ++i) { - std::string path = imgs[i]; - int* width = &ori_w[i]; - int* height = &ori_h[i]; - int* resize_width = &resize_w[i]; - int* resize_height = &resize_h[i]; - float* sr = &scale_ratio[i]; - threads.emplace_back([this, &data, i, path, width, height, - resize_width, resize_height, sr] { - std::vector buffer; - single_process(path, buffer, width, height, resize_width, - resize_height, sr); - data[i] = buffer; - }); - } - for (auto& t : threads) { - if (t.joinable()) { - t.join(); - } - } - return true; -} - -bool DetectionPreProcessor::init(std::shared_ptr config) { - _config = config; - return true; -} -} // namespace PaddleSolution diff --git a/inference/preprocessor/preprocessor_detection.h b/inference/preprocessor/preprocessor_detection.h deleted file mode 100644 index 7ee1678e3bb3710986500a92a71d444dfd32a0f2..0000000000000000000000000000000000000000 --- a/inference/preprocessor/preprocessor_detection.h +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "preprocessor.h" - -namespace PaddleSolution { - -class DetectionPreProcessor : public ImagePreProcessor { - public: - DetectionPreProcessor() : _config(nullptr) { - } - - bool init(std::shared_ptr config); - - bool single_process(const std::string& fname, std::vector &data, - int* ori_w, int* ori_h, int* resize_w, - int* resize_h, float* scale_ratio); - - bool batch_process(const std::vector& imgs, - std::vector> &data, - int* ori_w, int* ori_h, int* resize_w, - int* resize_h, float* scale_ratio); - private: - std::shared_ptr _config; -}; - -} // namespace PaddleSolution diff --git a/inference/tools/coco17.json b/inference/tools/coco17.json deleted file mode 100644 index d3bbbaad038534baacf6f86f78db5d32bce16238..0000000000000000000000000000000000000000 --- a/inference/tools/coco17.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "0" : "background", - "1" : "person", - "2" : "bicycle", - "3" : "car", - "4" : "motorcycle", - "5" : "airplane", - "6" : "bus", - "7" : "train", - "8" : "truck", - "9" : "boat", - "10" : "traffic light", - "11" : "fire hydrant", - "12" : "stop sign", - "13" : "parking meter", - "14" : "bench", - "15" : "bird", - "16" : "cat", - "17" : "dog", - "18" : "horse", - "19" : "sheep", - "20" : "cow", - "21" : "elephant", - "22" : "bear", - "23" : "zebra", - "24" : "giraffe", - "25" : "backpack", - "26" : "umbrella", - "27" : "handbag", - "28" : "tie", - "29" : "suitcase", - "30" : "frisbee", - "31" : "skis", - "32" : "snowboard", - "33" : "sports ball", - "34" : "kite", - "35" : "baseball bat", - "36" : "baseball glove", - "37" : "skateboard", - "38" : "surfboard", - "39" : "tennis racket", - "40" : "bottle", - "41" : "wine glass", - "42" : "cup", - "43" : "fork", - "44" : "knife", - "45" : "spoon", - "46" : "bowl", - "47" : "banana", - "48" : "apple", - "49" : "sandwich", - "50" : "orange", - "51" : "broccoli", - "52" : "carrot", - "53" : "hot dog", - "54" : "pizza", - "55" : "donut", - "56" : "cake", - "57" : "chair", - "58" : "couch", - "59" : "potted plant", - "60" : "bed", - "61" : "dining table", - "62" : "toilet", - "63" : "tv", - "64" : "laptop", - "65" : "mouse", - "66" : "remote", - "67" : "keyboard", - "68" : "cell phone", - "69" : "microwave", - "70" : "oven", - "71" : "toaster", - "72" : "sink", - "73" : "refrigerator", - "74" : "book", - "75" : "clock", - "76" : "vase", - "77" : "scissors", - "78" : "teddy bear", - "79" : "hair drier", - "80" : "toothbrush" -} diff --git a/inference/tools/detection_result_pb2.py b/inference/tools/detection_result_pb2.py deleted file mode 100644 index bb7acae60f143d4056ff8d5cd0222105f42f3eba..0000000000000000000000000000000000000000 --- a/inference/tools/detection_result_pb2.py +++ /dev/null @@ -1,214 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: detection_result.proto - -import sys -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - -DESCRIPTOR = _descriptor.FileDescriptor( - name='detection_result.proto', - package='PaddleSolution', - syntax='proto2', - serialized_pb=_b( - '\n\x16\x64\x65tection_result.proto\x12\x0ePaddleSolution\"\x84\x01\n\x0c\x44\x65tectionBox\x12\r\n\x05\x63lass\x18\x01 \x01(\x05\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x12\n\nleft_top_x\x18\x03 \x01(\x02\x12\x12\n\nleft_top_y\x18\x04 \x01(\x02\x12\x16\n\x0eright_bottom_x\x18\x05 \x01(\x02\x12\x16\n\x0eright_bottom_y\x18\x06 \x01(\x02\"Z\n\x0f\x44\x65tectionResult\x12\x10\n\x08\x66ilename\x18\x01 \x01(\t\x12\x35\n\x0f\x64\x65tection_boxes\x18\x02 \x03(\x0b\x32\x1c.PaddleSolution.DetectionBox' - )) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_DETECTIONBOX = _descriptor.Descriptor( - name='DetectionBox', - full_name='PaddleSolution.DetectionBox', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='class', - full_name='PaddleSolution.DetectionBox.class', - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='score', - full_name='PaddleSolution.DetectionBox.score', - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='left_top_x', - full_name='PaddleSolution.DetectionBox.left_top_x', - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='left_top_y', - full_name='PaddleSolution.DetectionBox.left_top_y', - index=3, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='right_bottom_x', - full_name='PaddleSolution.DetectionBox.right_bottom_x', - index=4, - number=5, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='right_bottom_y', - full_name='PaddleSolution.DetectionBox.right_bottom_y', - index=5, - number=6, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=43, - serialized_end=175) - -_DETECTIONRESULT = _descriptor.Descriptor( - name='DetectionResult', - full_name='PaddleSolution.DetectionResult', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filename', - full_name='PaddleSolution.DetectionResult.filename', - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='detection_boxes', - full_name='PaddleSolution.DetectionResult.detection_boxes', - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[], - serialized_start=177, - serialized_end=267) - -_DETECTIONRESULT.fields_by_name['detection_boxes'].message_type = _DETECTIONBOX -DESCRIPTOR.message_types_by_name['DetectionBox'] = _DETECTIONBOX -DESCRIPTOR.message_types_by_name['DetectionResult'] = _DETECTIONRESULT - -DetectionBox = _reflection.GeneratedProtocolMessageType( - 'DetectionBox', - (_message.Message, ), - dict( - DESCRIPTOR=_DETECTIONBOX, - __module__='detection_result_pb2' - # @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox) - )) -_sym_db.RegisterMessage(DetectionBox) - -DetectionResult = _reflection.GeneratedProtocolMessageType( - 'DetectionResult', - (_message.Message, ), - dict( - DESCRIPTOR=_DETECTIONRESULT, - __module__='detection_result_pb2' - # @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult) - )) -_sym_db.RegisterMessage(DetectionResult) - -# @@protoc_insertion_point(module_scope) diff --git a/inference/tools/vis.py b/inference/tools/vis.py deleted file mode 100644 index a4f7c6b413299d446d7ebc81990162fe209f7645..0000000000000000000000000000000000000000 --- a/inference/tools/vis.py +++ /dev/null @@ -1,118 +0,0 @@ -# coding: utf-8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import detection_result_pb2 -import cv2 -import sys -import gflags -import numpy as np -import json -from PIL import Image, ImageDraw, ImageFont -import io - -Flags = gflags.FLAGS -gflags.DEFINE_string('img_path', 'abc', 'image path') -gflags.DEFINE_string('img_result_path', 'def', 'image result path') -gflags.DEFINE_float('threshold', 0.0, 'threshold of score') -gflags.DEFINE_string('c2l_path', 'ghk', 'class to label path') - - -def colormap(rgb=False): - """ - Get colormap - """ - color_list = np.array([ - 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, - 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, - 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, - 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, - 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, - 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, - 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, - 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, - 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, - 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, - 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, - 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, - 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, - 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, - 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, - 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167, - 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, - 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, - 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, - 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, - 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, - 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, - 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, - 0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000 - ]).astype(np.float32) - color_list = color_list.reshape((-1, 3)) * 255 - if not rgb: - color_list = color_list[:, ::-1] - return color_list - - -if __name__ == "__main__": - if len(sys.argv) != 5: - print( - "Usage: python vis.py --img_path=/path/to/image --img_result_path=/path/to/image_result.pb --threshold=0.1 --c2l_path=/path/to/class2label.json" - ) - else: - Flags(sys.argv) - color_list = colormap(rgb=True) - text_thickness = 1 - text_scale = 0.3 - with open(Flags.img_result_path, "rb") as f: - detection_result = detection_result_pb2.DetectionResult() - detection_result.ParseFromString(f.read()) - img = cv2.imread(Flags.img_path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - class2LabelMap = dict() - with io.open(Flags.c2l_path, "r", encoding="utf-8") as json_f: - class2LabelMap = json.load(json_f) - for box in detection_result.detection_boxes: - if box.score >= Flags.threshold: - box_class = getattr(box, 'class') - text_class_score_str = "%s %.2f" % ( - class2LabelMap.get(str(box_class)), box.score) - text_point = (int(box.left_top_x), int(box.left_top_y)) - - ptLeftTop = (int(box.left_top_x), int(box.left_top_y)) - ptRightBottom = (int(box.right_bottom_x), - int(box.right_bottom_y)) - box_thickness = 1 - color = tuple([int(c) for c in color_list[box_class]]) - cv2.rectangle(img, ptLeftTop, ptRightBottom, color, - box_thickness, 8) - if text_point[1] < 0: - text_point = (int(box.left_top_x), - int(box.right_bottom_y)) - WHITE = (255, 255, 255) - font = cv2.FONT_HERSHEY_SIMPLEX - text_size = cv2.getTextSize(text_class_score_str, font, - text_scale, text_thickness) - - text_box_left_top = (text_point[0], - text_point[1] - text_size[0][1]) - text_box_right_bottom = ( - text_point[0] + text_size[0][0], text_point[1]) - - cv2.rectangle(img, text_box_left_top, - text_box_right_bottom, color, -1, 8) - cv2.putText(img, text_class_score_str, text_point, font, - text_scale, WHITE, text_thickness) - img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) - cv2.imwrite(Flags.img_path + ".png", img) diff --git a/inference/utils/conf_parser.h b/inference/utils/conf_parser.h deleted file mode 100644 index 97461a6e92eee1373cec0ff22e9f9387b77766d3..0000000000000000000000000000000000000000 --- a/inference/utils/conf_parser.h +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include -#include -#include -#include -#include - -namespace PaddleSolution { - -class PaddleModelConfigPaser { - std::map _scaling_map; - - public: - PaddleModelConfigPaser() - :_class_num(0), - _channels(0), - _use_gpu(0), - _batch_size(1), - _target_short_size(0), - _model_file_name("__model__"), - _param_file_name("__params__"), - _scaling_map{{"UNPADDING", 0}, - {"RANGE_SCALING", 1}}, - _feeds_size(1), - _coarsest_stride(1) {} - - ~PaddleModelConfigPaser() {} - - void reset() { - _crop_size.clear(); - _resize.clear(); - _mean.clear(); - _std.clear(); - _img_type.clear(); - _class_num = 0; - _channels = 0; - _use_gpu = 0; - _target_short_size = 0; - _batch_size = 1; - _model_file_name = "__model__"; - _model_path = "./"; - _param_file_name = "__params__"; - _resize_type = 0; - _resize_max_size = 0; - _feeds_size = 1; - _coarsest_stride = 1; - } - - std::string process_parenthesis(const std::string& str) { - if (str.size() < 2) { - return str; - } - std::string nstr(str); - if (str[0] == '(' && str.back() == ')') { - nstr[0] = '['; - nstr[str.size() - 1] = ']'; - } - return nstr; - } - - template - std::vector parse_str_to_vec(const std::string& str) { - std::vector data; - auto node = YAML::Load(str); - for (const auto& item : node) { - data.push_back(item.as()); - } - return data; - } - - bool load_config(const std::string& conf_file) { - reset(); - YAML::Node config; - try { - config = YAML::LoadFile(conf_file); - } catch(...) { - return false; - } - // 1. get resize - if (config["DEPLOY"]["EVAL_CROP_SIZE"].IsDefined()) { - auto str = config["DEPLOY"]["EVAL_CROP_SIZE"].as(); - _resize = parse_str_to_vec(process_parenthesis(str)); - } else { - std::cerr << "Please set EVAL_CROP_SIZE: (xx, xx)" << std::endl; - return false; - } - // 0. get crop_size - if (config["DEPLOY"]["CROP_SIZE"].IsDefined()) { - auto crop_str = config["DEPLOY"]["CROP_SIZE"].as(); - _crop_size = parse_str_to_vec(process_parenthesis(crop_str)); - } else { - _crop_size = _resize; - } - - // 2. get mean - if (config["DEPLOY"]["MEAN"].IsDefined()) { - for (const auto& item : config["DEPLOY"]["MEAN"]) { - _mean.push_back(item.as()); - } - } else { - std::cerr << "Please set MEAN: [xx, xx, xx]" << std::endl; - return false; - } - // 3. get std - if(config["DEPLOY"]["STD"].IsDefined()) { - for (const auto& item : config["DEPLOY"]["STD"]) { - _std.push_back(item.as()); - } - } else { - std::cerr << "Please set STD: [xx, xx, xx]" << std::endl; - return false; - } - // 4. get image type - if (config["DEPLOY"]["IMAGE_TYPE"].IsDefined()) { - _img_type = config["DEPLOY"]["IMAGE_TYPE"].as(); - } else { - std::cerr << "Please set IMAGE_TYPE: \"rgb\" or \"rgba\"" << std::endl; - return false; - } - // 5. get class number - if (config["DEPLOY"]["NUM_CLASSES"].IsDefined()) { - _class_num = config["DEPLOY"]["NUM_CLASSES"].as(); - } else { - std::cerr << "Please set NUM_CLASSES: x" << std::endl; - return false; - } - // 7. set model path - if (config["DEPLOY"]["MODEL_PATH"].IsDefined()) { - _model_path = config["DEPLOY"]["MODEL_PATH"].as(); - } else { - std::cerr << "Please set MODEL_PATH: \"/path/to/model_dir\"" << std::endl; - return false; - } - // 8. get model file_name - if (config["DEPLOY"]["MODEL_FILENAME"].IsDefined()) { - _model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as(); - } else { - _model_file_name = "__model__"; - } - // 9. get model param file name - if (config["DEPLOY"]["PARAMS_FILENAME"].IsDefined()) { - _param_file_name - = config["DEPLOY"]["PARAMS_FILENAME"].as(); - } else { - _param_file_name = "__params__"; - } - // 10. get pre_processor - if (config["DEPLOY"]["PRE_PROCESSOR"].IsDefined()) { - _pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as(); - } else { - std::cerr << "Please set PRE_PROCESSOR: \"DetectionPreProcessor\"" << std::endl; - return false; - } - // 11. use_gpu - if (config["DEPLOY"]["USE_GPU"].IsDefined()) { - _use_gpu = config["DEPLOY"]["USE_GPU"].as(); - } else { - _use_gpu = 0; - } - // 12. predictor_mode - if (config["DEPLOY"]["PREDICTOR_MODE"].IsDefined()) { - _predictor_mode = config["DEPLOY"]["PREDICTOR_MODE"].as(); - } else { - std::cerr << "Please set PREDICTOR_MODE: \"NATIVE\" or \"ANALYSIS\"" << std::endl; - return false; - } - // 13. batch_size - if (config["DEPLOY"]["BATCH_SIZE"].IsDefined()) { - _batch_size = config["DEPLOY"]["BATCH_SIZE"].as(); - } else { - _batch_size = 1; - } - // 14. channels - if (config["DEPLOY"]["CHANNELS"].IsDefined()) { - _channels = config["DEPLOY"]["CHANNELS"].as(); - } else { - std::cerr << "Please set CHANNELS: x" << std::endl; - return false; - } - // 15. target_short_size - if (config["DEPLOY"]["TARGET_SHORT_SIZE"].IsDefined()) { - _target_short_size = config["DEPLOY"]["TARGET_SHORT_SIZE"].as(); - } - // 16.resize_type - if (config["DEPLOY"]["RESIZE_TYPE"].IsDefined() && - _scaling_map.find(config["DEPLOY"]["RESIZE_TYPE"].as()) != _scaling_map.end()) { - _resize_type = _scaling_map[config["DEPLOY"]["RESIZE_TYPE"].as()]; - } else { - _resize_type = 0; - } - // 17.resize_max_size - if (config["DEPLOY"]["RESIZE_MAX_SIZE"].IsDefined()) { - _resize_max_size = config["DEPLOY"]["RESIZE_MAX_SIZE"].as(); - } - // 18.feeds_size - if (config["DEPLOY"]["FEEDS_SIZE"].IsDefined()) { - _feeds_size = config["DEPLOY"]["FEEDS_SIZE"].as(); - } - // 19. coarsest_stride - if (config["DEPLOY"]["COARSEST_STRIDE"].IsDefined()) { - _coarsest_stride = config["DEPLOY"]["COARSEST_STRIDE"].as(); - } - return true; - } - - void debug() const { - std::cout << "SCALE_RESIZE: (" << _resize[0] << ", " - << _resize[1] << ")" << std::endl; - - std::cout << "MEAN: ["; - for (int i = 0; i < _mean.size(); ++i) { - if (i != _mean.size() - 1) { - std::cout << _mean[i] << ", "; - } else { - std::cout << _mean[i]; - } - } - std::cout << "]" << std::endl; - - std::cout << "STD: ["; - for (int i = 0; i < _std.size(); ++i) { - if (i != _std.size() - 1) { - std::cout << _std[i] << ", "; - } else { - std::cout << _std[i]; - } - } - std::cout << "]" << std::endl; - std::cout << "DEPLOY.TARGET_SHORT_SIZE: " << _target_short_size - << std::endl; - std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl; - std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl; - std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl; - std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl; - std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name - << std::endl; - std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name - << std::endl; - std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl; - std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl; - std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl; - std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl; - } - // DEPLOY.COARSEST_STRIDE - int _coarsest_stride; - // DEPLOY.FEEDS_SIZE - int _feeds_size; - // DEPLOY.RESIZE_TYPE 0:unpadding 1:rangescaling Default:0 - int _resize_type; - // DEPLOY.RESIZE_MAX_SIZE - int _resize_max_size; - // DEPLOY.CROP_SIZE - std::vector _crop_size; - // DEPLOY.SCALE_RESIZE - std::vector _resize; - // DEPLOY.MEAN - std::vector _mean; - // DEPLOY.STD - std::vector _std; - // DEPLOY.IMAGE_TYPE - std::string _img_type; - // DEPLOY.TARGET_SHORT_SIZE - int _target_short_size; - // DEPLOY.NUM_CLASSES - int _class_num; - // DEPLOY.CHANNELS - int _channels; - // DEPLOY.MODEL_PATH - std::string _model_path; - // DEPLOY.MODEL_FILENAME - std::string _model_file_name; - // DEPLOY.PARAMS_FILENAME - std::string _param_file_name; - // DEPLOY.PRE_PROCESSOR - std::string _pre_processor; - // DEPLOY.USE_GPU - int _use_gpu; - // DEPLOY.PREDICTOR_MODE - std::string _predictor_mode; - // DEPLOY.BATCH_SIZE - int _batch_size; -}; -} // namespace PaddleSolution diff --git a/inference/utils/detection_result.pb.cc b/inference/utils/detection_result.pb.cc deleted file mode 100644 index b5cce7317914cf93f99d0d4efa3aee763972cc4e..0000000000000000000000000000000000000000 --- a/inference/utils/detection_result.pb.cc +++ /dev/null @@ -1,1159 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: detection_result.proto - -#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION -#include "detection_result.pb.h" - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -// @@protoc_insertion_point(includes) - -namespace PaddleSolution { - -namespace { - -const ::google::protobuf::Descriptor* DetectionBox_descriptor_ = NULL; -const ::google::protobuf::internal::GeneratedMessageReflection* - DetectionBox_reflection_ = NULL; -const ::google::protobuf::Descriptor* DetectionResult_descriptor_ = NULL; -const ::google::protobuf::internal::GeneratedMessageReflection* - DetectionResult_reflection_ = NULL; - -} // namespace - - -void protobuf_AssignDesc_detection_5fresult_2eproto() GOOGLE_ATTRIBUTE_COLD; -void protobuf_AssignDesc_detection_5fresult_2eproto() { - protobuf_AddDesc_detection_5fresult_2eproto(); - const ::google::protobuf::FileDescriptor* file = - ::google::protobuf::DescriptorPool::generated_pool()->FindFileByName( - "detection_result.proto"); - GOOGLE_CHECK(file != NULL); - DetectionBox_descriptor_ = file->message_type(0); - static const int DetectionBox_offsets_[6] = { - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, class__), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, score_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, left_top_x_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, left_top_y_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, right_bottom_x_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, right_bottom_y_), - }; - DetectionBox_reflection_ = - ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection( - DetectionBox_descriptor_, - DetectionBox::internal_default_instance(), - DetectionBox_offsets_, - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, _has_bits_), - -1, - -1, - sizeof(DetectionBox), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionBox, _internal_metadata_)); - DetectionResult_descriptor_ = file->message_type(1); - static const int DetectionResult_offsets_[2] = { - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionResult, filename_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionResult, detection_boxes_), - }; - DetectionResult_reflection_ = - ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection( - DetectionResult_descriptor_, - DetectionResult::internal_default_instance(), - DetectionResult_offsets_, - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionResult, _has_bits_), - -1, - -1, - sizeof(DetectionResult), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DetectionResult, _internal_metadata_)); -} - -namespace { - -GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_); -void protobuf_AssignDescriptorsOnce() { - ::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_, - &protobuf_AssignDesc_detection_5fresult_2eproto); -} - -void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD; -void protobuf_RegisterTypes(const ::std::string&) { - protobuf_AssignDescriptorsOnce(); - ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( - DetectionBox_descriptor_, DetectionBox::internal_default_instance()); - ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( - DetectionResult_descriptor_, DetectionResult::internal_default_instance()); -} - -} // namespace - -void protobuf_ShutdownFile_detection_5fresult_2eproto() { - DetectionBox_default_instance_.Shutdown(); - delete DetectionBox_reflection_; - DetectionResult_default_instance_.Shutdown(); - delete DetectionResult_reflection_; -} - -void protobuf_InitDefaults_detection_5fresult_2eproto_impl() { - GOOGLE_PROTOBUF_VERIFY_VERSION; - - DetectionBox_default_instance_.DefaultConstruct(); - ::google::protobuf::internal::GetEmptyString(); - DetectionResult_default_instance_.DefaultConstruct(); - DetectionBox_default_instance_.get_mutable()->InitAsDefaultInstance(); - DetectionResult_default_instance_.get_mutable()->InitAsDefaultInstance(); -} - -GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_InitDefaults_detection_5fresult_2eproto_once_); -void protobuf_InitDefaults_detection_5fresult_2eproto() { - ::google::protobuf::GoogleOnceInit(&protobuf_InitDefaults_detection_5fresult_2eproto_once_, - &protobuf_InitDefaults_detection_5fresult_2eproto_impl); -} -void protobuf_AddDesc_detection_5fresult_2eproto_impl() { - GOOGLE_PROTOBUF_VERIFY_VERSION; - - protobuf_InitDefaults_detection_5fresult_2eproto(); - ::google::protobuf::DescriptorPool::InternalAddGeneratedFile( - "\n\026detection_result.proto\022\016PaddleSolution" - "\"\204\001\n\014DetectionBox\022\r\n\005class\030\001 \001(\005\022\r\n\005scor" - "e\030\002 \001(\002\022\022\n\nleft_top_x\030\003 \001(\002\022\022\n\nleft_top_" - "y\030\004 \001(\002\022\026\n\016right_bottom_x\030\005 \001(\002\022\026\n\016right" - "_bottom_y\030\006 \001(\002\"Z\n\017DetectionResult\022\020\n\010fi" - "lename\030\001 \001(\t\0225\n\017detection_boxes\030\002 \003(\0132\034." - "PaddleSolution.DetectionBox", 267); - ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile( - "detection_result.proto", &protobuf_RegisterTypes); - ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_detection_5fresult_2eproto); -} - -GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AddDesc_detection_5fresult_2eproto_once_); -void protobuf_AddDesc_detection_5fresult_2eproto() { - ::google::protobuf::GoogleOnceInit(&protobuf_AddDesc_detection_5fresult_2eproto_once_, - &protobuf_AddDesc_detection_5fresult_2eproto_impl); -} -// Force AddDescriptors() to be called at static initialization time. -struct StaticDescriptorInitializer_detection_5fresult_2eproto { - StaticDescriptorInitializer_detection_5fresult_2eproto() { - protobuf_AddDesc_detection_5fresult_2eproto(); - } -} static_descriptor_initializer_detection_5fresult_2eproto_; - -namespace { - -static void MergeFromFail(int line) GOOGLE_ATTRIBUTE_COLD GOOGLE_ATTRIBUTE_NORETURN; -static void MergeFromFail(int line) { - ::google::protobuf::internal::MergeFromFail(__FILE__, line); -} - -} // namespace - - -// =================================================================== - -#if !defined(_MSC_VER) || _MSC_VER >= 1900 -const int DetectionBox::kClassFieldNumber; -const int DetectionBox::kScoreFieldNumber; -const int DetectionBox::kLeftTopXFieldNumber; -const int DetectionBox::kLeftTopYFieldNumber; -const int DetectionBox::kRightBottomXFieldNumber; -const int DetectionBox::kRightBottomYFieldNumber; -#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 - -DetectionBox::DetectionBox() - : ::google::protobuf::Message(), _internal_metadata_(NULL) { - if (this != internal_default_instance()) protobuf_InitDefaults_detection_5fresult_2eproto(); - SharedCtor(); - // @@protoc_insertion_point(constructor:PaddleSolution.DetectionBox) -} - -void DetectionBox::InitAsDefaultInstance() { -} - -DetectionBox::DetectionBox(const DetectionBox& from) - : ::google::protobuf::Message(), - _internal_metadata_(NULL) { - SharedCtor(); - UnsafeMergeFrom(from); - // @@protoc_insertion_point(copy_constructor:PaddleSolution.DetectionBox) -} - -void DetectionBox::SharedCtor() { - _cached_size_ = 0; - ::memset(&class__, 0, reinterpret_cast(&right_bottom_y_) - - reinterpret_cast(&class__) + sizeof(right_bottom_y_)); -} - -DetectionBox::~DetectionBox() { - // @@protoc_insertion_point(destructor:PaddleSolution.DetectionBox) - SharedDtor(); -} - -void DetectionBox::SharedDtor() { -} - -void DetectionBox::SetCachedSize(int size) const { - GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); - _cached_size_ = size; - GOOGLE_SAFE_CONCURRENT_WRITES_END(); -} -const ::google::protobuf::Descriptor* DetectionBox::descriptor() { - protobuf_AssignDescriptorsOnce(); - return DetectionBox_descriptor_; -} - -const DetectionBox& DetectionBox::default_instance() { - protobuf_InitDefaults_detection_5fresult_2eproto(); - return *internal_default_instance(); -} - -::google::protobuf::internal::ExplicitlyConstructed DetectionBox_default_instance_; - -DetectionBox* DetectionBox::New(::google::protobuf::Arena* arena) const { - DetectionBox* n = new DetectionBox; - if (arena != NULL) { - arena->Own(n); - } - return n; -} - -void DetectionBox::Clear() { -// @@protoc_insertion_point(message_clear_start:PaddleSolution.DetectionBox) -#if defined(__clang__) -#define ZR_HELPER_(f) \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Winvalid-offsetof\"") \ - __builtin_offsetof(DetectionBox, f) \ - _Pragma("clang diagnostic pop") -#else -#define ZR_HELPER_(f) reinterpret_cast(\ - &reinterpret_cast(16)->f) -#endif - -#define ZR_(first, last) do {\ - ::memset(&(first), 0,\ - ZR_HELPER_(last) - ZR_HELPER_(first) + sizeof(last));\ -} while (0) - - ZR_(class__, right_bottom_y_); - -#undef ZR_HELPER_ -#undef ZR_ - - _has_bits_.Clear(); - if (_internal_metadata_.have_unknown_fields()) { - mutable_unknown_fields()->Clear(); - } -} - -bool DetectionBox::MergePartialFromCodedStream( - ::google::protobuf::io::CodedInputStream* input) { -#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure - ::google::protobuf::uint32 tag; - // @@protoc_insertion_point(parse_start:PaddleSolution.DetectionBox) - for (;;) { - ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); - tag = p.first; - if (!p.second) goto handle_unusual; - switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { - // optional int32 class = 1; - case 1: { - if (tag == 8) { - set_has_class_(); - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( - input, &class__))); - } else { - goto handle_unusual; - } - if (input->ExpectTag(21)) goto parse_score; - break; - } - - // optional float score = 2; - case 2: { - if (tag == 21) { - parse_score: - set_has_score(); - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - float, ::google::protobuf::internal::WireFormatLite::TYPE_FLOAT>( - input, &score_))); - } else { - goto handle_unusual; - } - if (input->ExpectTag(29)) goto parse_left_top_x; - break; - } - - // optional float left_top_x = 3; - case 3: { - if (tag == 29) { - parse_left_top_x: - set_has_left_top_x(); - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - float, ::google::protobuf::internal::WireFormatLite::TYPE_FLOAT>( - input, &left_top_x_))); - } else { - goto handle_unusual; - } - if (input->ExpectTag(37)) goto parse_left_top_y; - break; - } - - // optional float left_top_y = 4; - case 4: { - if (tag == 37) { - parse_left_top_y: - set_has_left_top_y(); - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - float, ::google::protobuf::internal::WireFormatLite::TYPE_FLOAT>( - input, &left_top_y_))); - } else { - goto handle_unusual; - } - if (input->ExpectTag(45)) goto parse_right_bottom_x; - break; - } - - // optional float right_bottom_x = 5; - case 5: { - if (tag == 45) { - parse_right_bottom_x: - set_has_right_bottom_x(); - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - float, ::google::protobuf::internal::WireFormatLite::TYPE_FLOAT>( - input, &right_bottom_x_))); - } else { - goto handle_unusual; - } - if (input->ExpectTag(53)) goto parse_right_bottom_y; - break; - } - - // optional float right_bottom_y = 6; - case 6: { - if (tag == 53) { - parse_right_bottom_y: - set_has_right_bottom_y(); - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - float, ::google::protobuf::internal::WireFormatLite::TYPE_FLOAT>( - input, &right_bottom_y_))); - } else { - goto handle_unusual; - } - if (input->ExpectAtEnd()) goto success; - break; - } - - default: { - handle_unusual: - if (tag == 0 || - ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == - ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { - goto success; - } - DO_(::google::protobuf::internal::WireFormat::SkipField( - input, tag, mutable_unknown_fields())); - break; - } - } - } -success: - // @@protoc_insertion_point(parse_success:PaddleSolution.DetectionBox) - return true; -failure: - // @@protoc_insertion_point(parse_failure:PaddleSolution.DetectionBox) - return false; -#undef DO_ -} - -void DetectionBox::SerializeWithCachedSizes( - ::google::protobuf::io::CodedOutputStream* output) const { - // @@protoc_insertion_point(serialize_start:PaddleSolution.DetectionBox) - // optional int32 class = 1; - if (has_class_()) { - ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->class_(), output); - } - - // optional float score = 2; - if (has_score()) { - ::google::protobuf::internal::WireFormatLite::WriteFloat(2, this->score(), output); - } - - // optional float left_top_x = 3; - if (has_left_top_x()) { - ::google::protobuf::internal::WireFormatLite::WriteFloat(3, this->left_top_x(), output); - } - - // optional float left_top_y = 4; - if (has_left_top_y()) { - ::google::protobuf::internal::WireFormatLite::WriteFloat(4, this->left_top_y(), output); - } - - // optional float right_bottom_x = 5; - if (has_right_bottom_x()) { - ::google::protobuf::internal::WireFormatLite::WriteFloat(5, this->right_bottom_x(), output); - } - - // optional float right_bottom_y = 6; - if (has_right_bottom_y()) { - ::google::protobuf::internal::WireFormatLite::WriteFloat(6, this->right_bottom_y(), output); - } - - if (_internal_metadata_.have_unknown_fields()) { - ::google::protobuf::internal::WireFormat::SerializeUnknownFields( - unknown_fields(), output); - } - // @@protoc_insertion_point(serialize_end:PaddleSolution.DetectionBox) -} - -::google::protobuf::uint8* DetectionBox::InternalSerializeWithCachedSizesToArray( - bool deterministic, ::google::protobuf::uint8* target) const { - (void)deterministic; // Unused - // @@protoc_insertion_point(serialize_to_array_start:PaddleSolution.DetectionBox) - // optional int32 class = 1; - if (has_class_()) { - target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->class_(), target); - } - - // optional float score = 2; - if (has_score()) { - target = ::google::protobuf::internal::WireFormatLite::WriteFloatToArray(2, this->score(), target); - } - - // optional float left_top_x = 3; - if (has_left_top_x()) { - target = ::google::protobuf::internal::WireFormatLite::WriteFloatToArray(3, this->left_top_x(), target); - } - - // optional float left_top_y = 4; - if (has_left_top_y()) { - target = ::google::protobuf::internal::WireFormatLite::WriteFloatToArray(4, this->left_top_y(), target); - } - - // optional float right_bottom_x = 5; - if (has_right_bottom_x()) { - target = ::google::protobuf::internal::WireFormatLite::WriteFloatToArray(5, this->right_bottom_x(), target); - } - - // optional float right_bottom_y = 6; - if (has_right_bottom_y()) { - target = ::google::protobuf::internal::WireFormatLite::WriteFloatToArray(6, this->right_bottom_y(), target); - } - - if (_internal_metadata_.have_unknown_fields()) { - target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( - unknown_fields(), target); - } - // @@protoc_insertion_point(serialize_to_array_end:PaddleSolution.DetectionBox) - return target; -} - -size_t DetectionBox::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:PaddleSolution.DetectionBox) - size_t total_size = 0; - - if (_has_bits_[0 / 32] & 63u) { - // optional int32 class = 1; - if (has_class_()) { - total_size += 1 + - ::google::protobuf::internal::WireFormatLite::Int32Size( - this->class_()); - } - - // optional float score = 2; - if (has_score()) { - total_size += 1 + 4; - } - - // optional float left_top_x = 3; - if (has_left_top_x()) { - total_size += 1 + 4; - } - - // optional float left_top_y = 4; - if (has_left_top_y()) { - total_size += 1 + 4; - } - - // optional float right_bottom_x = 5; - if (has_right_bottom_x()) { - total_size += 1 + 4; - } - - // optional float right_bottom_y = 6; - if (has_right_bottom_y()) { - total_size += 1 + 4; - } - - } - if (_internal_metadata_.have_unknown_fields()) { - total_size += - ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( - unknown_fields()); - } - int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); - GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); - _cached_size_ = cached_size; - GOOGLE_SAFE_CONCURRENT_WRITES_END(); - return total_size; -} - -void DetectionBox::MergeFrom(const ::google::protobuf::Message& from) { -// @@protoc_insertion_point(generalized_merge_from_start:PaddleSolution.DetectionBox) - if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__); - const DetectionBox* source = - ::google::protobuf::internal::DynamicCastToGenerated( - &from); - if (source == NULL) { - // @@protoc_insertion_point(generalized_merge_from_cast_fail:PaddleSolution.DetectionBox) - ::google::protobuf::internal::ReflectionOps::Merge(from, this); - } else { - // @@protoc_insertion_point(generalized_merge_from_cast_success:PaddleSolution.DetectionBox) - UnsafeMergeFrom(*source); - } -} - -void DetectionBox::MergeFrom(const DetectionBox& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:PaddleSolution.DetectionBox) - if (GOOGLE_PREDICT_TRUE(&from != this)) { - UnsafeMergeFrom(from); - } else { - MergeFromFail(__LINE__); - } -} - -void DetectionBox::UnsafeMergeFrom(const DetectionBox& from) { - GOOGLE_DCHECK(&from != this); - if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { - if (from.has_class_()) { - set_class_(from.class_()); - } - if (from.has_score()) { - set_score(from.score()); - } - if (from.has_left_top_x()) { - set_left_top_x(from.left_top_x()); - } - if (from.has_left_top_y()) { - set_left_top_y(from.left_top_y()); - } - if (from.has_right_bottom_x()) { - set_right_bottom_x(from.right_bottom_x()); - } - if (from.has_right_bottom_y()) { - set_right_bottom_y(from.right_bottom_y()); - } - } - if (from._internal_metadata_.have_unknown_fields()) { - ::google::protobuf::UnknownFieldSet::MergeToInternalMetdata( - from.unknown_fields(), &_internal_metadata_); - } -} - -void DetectionBox::CopyFrom(const ::google::protobuf::Message& from) { -// @@protoc_insertion_point(generalized_copy_from_start:PaddleSolution.DetectionBox) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -void DetectionBox::CopyFrom(const DetectionBox& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:PaddleSolution.DetectionBox) - if (&from == this) return; - Clear(); - UnsafeMergeFrom(from); -} - -bool DetectionBox::IsInitialized() const { - - return true; -} - -void DetectionBox::Swap(DetectionBox* other) { - if (other == this) return; - InternalSwap(other); -} -void DetectionBox::InternalSwap(DetectionBox* other) { - std::swap(class__, other->class__); - std::swap(score_, other->score_); - std::swap(left_top_x_, other->left_top_x_); - std::swap(left_top_y_, other->left_top_y_); - std::swap(right_bottom_x_, other->right_bottom_x_); - std::swap(right_bottom_y_, other->right_bottom_y_); - std::swap(_has_bits_[0], other->_has_bits_[0]); - _internal_metadata_.Swap(&other->_internal_metadata_); - std::swap(_cached_size_, other->_cached_size_); -} - -::google::protobuf::Metadata DetectionBox::GetMetadata() const { - protobuf_AssignDescriptorsOnce(); - ::google::protobuf::Metadata metadata; - metadata.descriptor = DetectionBox_descriptor_; - metadata.reflection = DetectionBox_reflection_; - return metadata; -} - -#if PROTOBUF_INLINE_NOT_IN_HEADERS -// DetectionBox - -// optional int32 class = 1; -bool DetectionBox::has_class_() const { - return (_has_bits_[0] & 0x00000001u) != 0; -} -void DetectionBox::set_has_class_() { - _has_bits_[0] |= 0x00000001u; -} -void DetectionBox::clear_has_class_() { - _has_bits_[0] &= ~0x00000001u; -} -void DetectionBox::clear_class_() { - class__ = 0; - clear_has_class_(); -} -::google::protobuf::int32 DetectionBox::class_() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.class) - return class__; -} -void DetectionBox::set_class_(::google::protobuf::int32 value) { - set_has_class_(); - class__ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.class) -} - -// optional float score = 2; -bool DetectionBox::has_score() const { - return (_has_bits_[0] & 0x00000002u) != 0; -} -void DetectionBox::set_has_score() { - _has_bits_[0] |= 0x00000002u; -} -void DetectionBox::clear_has_score() { - _has_bits_[0] &= ~0x00000002u; -} -void DetectionBox::clear_score() { - score_ = 0; - clear_has_score(); -} -float DetectionBox::score() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.score) - return score_; -} -void DetectionBox::set_score(float value) { - set_has_score(); - score_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.score) -} - -// optional float left_top_x = 3; -bool DetectionBox::has_left_top_x() const { - return (_has_bits_[0] & 0x00000004u) != 0; -} -void DetectionBox::set_has_left_top_x() { - _has_bits_[0] |= 0x00000004u; -} -void DetectionBox::clear_has_left_top_x() { - _has_bits_[0] &= ~0x00000004u; -} -void DetectionBox::clear_left_top_x() { - left_top_x_ = 0; - clear_has_left_top_x(); -} -float DetectionBox::left_top_x() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.left_top_x) - return left_top_x_; -} -void DetectionBox::set_left_top_x(float value) { - set_has_left_top_x(); - left_top_x_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.left_top_x) -} - -// optional float left_top_y = 4; -bool DetectionBox::has_left_top_y() const { - return (_has_bits_[0] & 0x00000008u) != 0; -} -void DetectionBox::set_has_left_top_y() { - _has_bits_[0] |= 0x00000008u; -} -void DetectionBox::clear_has_left_top_y() { - _has_bits_[0] &= ~0x00000008u; -} -void DetectionBox::clear_left_top_y() { - left_top_y_ = 0; - clear_has_left_top_y(); -} -float DetectionBox::left_top_y() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.left_top_y) - return left_top_y_; -} -void DetectionBox::set_left_top_y(float value) { - set_has_left_top_y(); - left_top_y_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.left_top_y) -} - -// optional float right_bottom_x = 5; -bool DetectionBox::has_right_bottom_x() const { - return (_has_bits_[0] & 0x00000010u) != 0; -} -void DetectionBox::set_has_right_bottom_x() { - _has_bits_[0] |= 0x00000010u; -} -void DetectionBox::clear_has_right_bottom_x() { - _has_bits_[0] &= ~0x00000010u; -} -void DetectionBox::clear_right_bottom_x() { - right_bottom_x_ = 0; - clear_has_right_bottom_x(); -} -float DetectionBox::right_bottom_x() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.right_bottom_x) - return right_bottom_x_; -} -void DetectionBox::set_right_bottom_x(float value) { - set_has_right_bottom_x(); - right_bottom_x_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.right_bottom_x) -} - -// optional float right_bottom_y = 6; -bool DetectionBox::has_right_bottom_y() const { - return (_has_bits_[0] & 0x00000020u) != 0; -} -void DetectionBox::set_has_right_bottom_y() { - _has_bits_[0] |= 0x00000020u; -} -void DetectionBox::clear_has_right_bottom_y() { - _has_bits_[0] &= ~0x00000020u; -} -void DetectionBox::clear_right_bottom_y() { - right_bottom_y_ = 0; - clear_has_right_bottom_y(); -} -float DetectionBox::right_bottom_y() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.right_bottom_y) - return right_bottom_y_; -} -void DetectionBox::set_right_bottom_y(float value) { - set_has_right_bottom_y(); - right_bottom_y_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.right_bottom_y) -} - -inline const DetectionBox* DetectionBox::internal_default_instance() { - return &DetectionBox_default_instance_.get(); -} -#endif // PROTOBUF_INLINE_NOT_IN_HEADERS - -// =================================================================== - -#if !defined(_MSC_VER) || _MSC_VER >= 1900 -const int DetectionResult::kFilenameFieldNumber; -const int DetectionResult::kDetectionBoxesFieldNumber; -#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 - -DetectionResult::DetectionResult() - : ::google::protobuf::Message(), _internal_metadata_(NULL) { - if (this != internal_default_instance()) protobuf_InitDefaults_detection_5fresult_2eproto(); - SharedCtor(); - // @@protoc_insertion_point(constructor:PaddleSolution.DetectionResult) -} - -void DetectionResult::InitAsDefaultInstance() { -} - -DetectionResult::DetectionResult(const DetectionResult& from) - : ::google::protobuf::Message(), - _internal_metadata_(NULL) { - SharedCtor(); - UnsafeMergeFrom(from); - // @@protoc_insertion_point(copy_constructor:PaddleSolution.DetectionResult) -} - -void DetectionResult::SharedCtor() { - _cached_size_ = 0; - filename_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} - -DetectionResult::~DetectionResult() { - // @@protoc_insertion_point(destructor:PaddleSolution.DetectionResult) - SharedDtor(); -} - -void DetectionResult::SharedDtor() { - filename_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} - -void DetectionResult::SetCachedSize(int size) const { - GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); - _cached_size_ = size; - GOOGLE_SAFE_CONCURRENT_WRITES_END(); -} -const ::google::protobuf::Descriptor* DetectionResult::descriptor() { - protobuf_AssignDescriptorsOnce(); - return DetectionResult_descriptor_; -} - -const DetectionResult& DetectionResult::default_instance() { - protobuf_InitDefaults_detection_5fresult_2eproto(); - return *internal_default_instance(); -} - -::google::protobuf::internal::ExplicitlyConstructed DetectionResult_default_instance_; - -DetectionResult* DetectionResult::New(::google::protobuf::Arena* arena) const { - DetectionResult* n = new DetectionResult; - if (arena != NULL) { - arena->Own(n); - } - return n; -} - -void DetectionResult::Clear() { -// @@protoc_insertion_point(message_clear_start:PaddleSolution.DetectionResult) - if (has_filename()) { - filename_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - } - detection_boxes_.Clear(); - _has_bits_.Clear(); - if (_internal_metadata_.have_unknown_fields()) { - mutable_unknown_fields()->Clear(); - } -} - -bool DetectionResult::MergePartialFromCodedStream( - ::google::protobuf::io::CodedInputStream* input) { -#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure - ::google::protobuf::uint32 tag; - // @@protoc_insertion_point(parse_start:PaddleSolution.DetectionResult) - for (;;) { - ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); - tag = p.first; - if (!p.second) goto handle_unusual; - switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { - // optional string filename = 1; - case 1: { - if (tag == 10) { - DO_(::google::protobuf::internal::WireFormatLite::ReadString( - input, this->mutable_filename())); - ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( - this->filename().data(), this->filename().length(), - ::google::protobuf::internal::WireFormat::PARSE, - "PaddleSolution.DetectionResult.filename"); - } else { - goto handle_unusual; - } - if (input->ExpectTag(18)) goto parse_detection_boxes; - break; - } - - // repeated .PaddleSolution.DetectionBox detection_boxes = 2; - case 2: { - if (tag == 18) { - parse_detection_boxes: - DO_(input->IncrementRecursionDepth()); - parse_loop_detection_boxes: - DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtualNoRecursionDepth( - input, add_detection_boxes())); - } else { - goto handle_unusual; - } - if (input->ExpectTag(18)) goto parse_loop_detection_boxes; - input->UnsafeDecrementRecursionDepth(); - if (input->ExpectAtEnd()) goto success; - break; - } - - default: { - handle_unusual: - if (tag == 0 || - ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == - ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { - goto success; - } - DO_(::google::protobuf::internal::WireFormat::SkipField( - input, tag, mutable_unknown_fields())); - break; - } - } - } -success: - // @@protoc_insertion_point(parse_success:PaddleSolution.DetectionResult) - return true; -failure: - // @@protoc_insertion_point(parse_failure:PaddleSolution.DetectionResult) - return false; -#undef DO_ -} - -void DetectionResult::SerializeWithCachedSizes( - ::google::protobuf::io::CodedOutputStream* output) const { - // @@protoc_insertion_point(serialize_start:PaddleSolution.DetectionResult) - // optional string filename = 1; - if (has_filename()) { - ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( - this->filename().data(), this->filename().length(), - ::google::protobuf::internal::WireFormat::SERIALIZE, - "PaddleSolution.DetectionResult.filename"); - ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( - 1, this->filename(), output); - } - - // repeated .PaddleSolution.DetectionBox detection_boxes = 2; - for (unsigned int i = 0, n = this->detection_boxes_size(); i < n; i++) { - ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( - 2, this->detection_boxes(i), output); - } - - if (_internal_metadata_.have_unknown_fields()) { - ::google::protobuf::internal::WireFormat::SerializeUnknownFields( - unknown_fields(), output); - } - // @@protoc_insertion_point(serialize_end:PaddleSolution.DetectionResult) -} - -::google::protobuf::uint8* DetectionResult::InternalSerializeWithCachedSizesToArray( - bool deterministic, ::google::protobuf::uint8* target) const { - (void)deterministic; // Unused - // @@protoc_insertion_point(serialize_to_array_start:PaddleSolution.DetectionResult) - // optional string filename = 1; - if (has_filename()) { - ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( - this->filename().data(), this->filename().length(), - ::google::protobuf::internal::WireFormat::SERIALIZE, - "PaddleSolution.DetectionResult.filename"); - target = - ::google::protobuf::internal::WireFormatLite::WriteStringToArray( - 1, this->filename(), target); - } - - // repeated .PaddleSolution.DetectionBox detection_boxes = 2; - for (unsigned int i = 0, n = this->detection_boxes_size(); i < n; i++) { - target = ::google::protobuf::internal::WireFormatLite:: - InternalWriteMessageNoVirtualToArray( - 2, this->detection_boxes(i), false, target); - } - - if (_internal_metadata_.have_unknown_fields()) { - target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( - unknown_fields(), target); - } - // @@protoc_insertion_point(serialize_to_array_end:PaddleSolution.DetectionResult) - return target; -} - -size_t DetectionResult::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:PaddleSolution.DetectionResult) - size_t total_size = 0; - - // optional string filename = 1; - if (has_filename()) { - total_size += 1 + - ::google::protobuf::internal::WireFormatLite::StringSize( - this->filename()); - } - - // repeated .PaddleSolution.DetectionBox detection_boxes = 2; - { - unsigned int count = this->detection_boxes_size(); - total_size += 1UL * count; - for (unsigned int i = 0; i < count; i++) { - total_size += - ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( - this->detection_boxes(i)); - } - } - - if (_internal_metadata_.have_unknown_fields()) { - total_size += - ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( - unknown_fields()); - } - int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); - GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); - _cached_size_ = cached_size; - GOOGLE_SAFE_CONCURRENT_WRITES_END(); - return total_size; -} - -void DetectionResult::MergeFrom(const ::google::protobuf::Message& from) { -// @@protoc_insertion_point(generalized_merge_from_start:PaddleSolution.DetectionResult) - if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__); - const DetectionResult* source = - ::google::protobuf::internal::DynamicCastToGenerated( - &from); - if (source == NULL) { - // @@protoc_insertion_point(generalized_merge_from_cast_fail:PaddleSolution.DetectionResult) - ::google::protobuf::internal::ReflectionOps::Merge(from, this); - } else { - // @@protoc_insertion_point(generalized_merge_from_cast_success:PaddleSolution.DetectionResult) - UnsafeMergeFrom(*source); - } -} - -void DetectionResult::MergeFrom(const DetectionResult& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:PaddleSolution.DetectionResult) - if (GOOGLE_PREDICT_TRUE(&from != this)) { - UnsafeMergeFrom(from); - } else { - MergeFromFail(__LINE__); - } -} - -void DetectionResult::UnsafeMergeFrom(const DetectionResult& from) { - GOOGLE_DCHECK(&from != this); - detection_boxes_.MergeFrom(from.detection_boxes_); - if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { - if (from.has_filename()) { - set_has_filename(); - filename_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.filename_); - } - } - if (from._internal_metadata_.have_unknown_fields()) { - ::google::protobuf::UnknownFieldSet::MergeToInternalMetdata( - from.unknown_fields(), &_internal_metadata_); - } -} - -void DetectionResult::CopyFrom(const ::google::protobuf::Message& from) { -// @@protoc_insertion_point(generalized_copy_from_start:PaddleSolution.DetectionResult) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -void DetectionResult::CopyFrom(const DetectionResult& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:PaddleSolution.DetectionResult) - if (&from == this) return; - Clear(); - UnsafeMergeFrom(from); -} - -bool DetectionResult::IsInitialized() const { - - return true; -} - -void DetectionResult::Swap(DetectionResult* other) { - if (other == this) return; - InternalSwap(other); -} -void DetectionResult::InternalSwap(DetectionResult* other) { - filename_.Swap(&other->filename_); - detection_boxes_.UnsafeArenaSwap(&other->detection_boxes_); - std::swap(_has_bits_[0], other->_has_bits_[0]); - _internal_metadata_.Swap(&other->_internal_metadata_); - std::swap(_cached_size_, other->_cached_size_); -} - -::google::protobuf::Metadata DetectionResult::GetMetadata() const { - protobuf_AssignDescriptorsOnce(); - ::google::protobuf::Metadata metadata; - metadata.descriptor = DetectionResult_descriptor_; - metadata.reflection = DetectionResult_reflection_; - return metadata; -} - -#if PROTOBUF_INLINE_NOT_IN_HEADERS -// DetectionResult - -// optional string filename = 1; -bool DetectionResult::has_filename() const { - return (_has_bits_[0] & 0x00000001u) != 0; -} -void DetectionResult::set_has_filename() { - _has_bits_[0] |= 0x00000001u; -} -void DetectionResult::clear_has_filename() { - _has_bits_[0] &= ~0x00000001u; -} -void DetectionResult::clear_filename() { - filename_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - clear_has_filename(); -} -const ::std::string& DetectionResult::filename() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionResult.filename) - return filename_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -void DetectionResult::set_filename(const ::std::string& value) { - set_has_filename(); - filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionResult.filename) -} -void DetectionResult::set_filename(const char* value) { - set_has_filename(); - filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:PaddleSolution.DetectionResult.filename) -} -void DetectionResult::set_filename(const char* value, size_t size) { - set_has_filename(); - filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), - ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:PaddleSolution.DetectionResult.filename) -} -::std::string* DetectionResult::mutable_filename() { - set_has_filename(); - // @@protoc_insertion_point(field_mutable:PaddleSolution.DetectionResult.filename) - return filename_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -::std::string* DetectionResult::release_filename() { - // @@protoc_insertion_point(field_release:PaddleSolution.DetectionResult.filename) - clear_has_filename(); - return filename_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -void DetectionResult::set_allocated_filename(::std::string* filename) { - if (filename != NULL) { - set_has_filename(); - } else { - clear_has_filename(); - } - filename_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), filename); - // @@protoc_insertion_point(field_set_allocated:PaddleSolution.DetectionResult.filename) -} - -// repeated .PaddleSolution.DetectionBox detection_boxes = 2; -int DetectionResult::detection_boxes_size() const { - return detection_boxes_.size(); -} -void DetectionResult::clear_detection_boxes() { - detection_boxes_.Clear(); -} -const ::PaddleSolution::DetectionBox& DetectionResult::detection_boxes(int index) const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_.Get(index); -} -::PaddleSolution::DetectionBox* DetectionResult::mutable_detection_boxes(int index) { - // @@protoc_insertion_point(field_mutable:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_.Mutable(index); -} -::PaddleSolution::DetectionBox* DetectionResult::add_detection_boxes() { - // @@protoc_insertion_point(field_add:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_.Add(); -} -::google::protobuf::RepeatedPtrField< ::PaddleSolution::DetectionBox >* -DetectionResult::mutable_detection_boxes() { - // @@protoc_insertion_point(field_mutable_list:PaddleSolution.DetectionResult.detection_boxes) - return &detection_boxes_; -} -const ::google::protobuf::RepeatedPtrField< ::PaddleSolution::DetectionBox >& -DetectionResult::detection_boxes() const { - // @@protoc_insertion_point(field_list:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_; -} - -inline const DetectionResult* DetectionResult::internal_default_instance() { - return &DetectionResult_default_instance_.get(); -} -#endif // PROTOBUF_INLINE_NOT_IN_HEADERS - -// @@protoc_insertion_point(namespace_scope) - -} // namespace PaddleSolution - -// @@protoc_insertion_point(global_scope) diff --git a/inference/utils/detection_result.pb.h b/inference/utils/detection_result.pb.h deleted file mode 100644 index 1b2f89ea9ca13f3f949bd19b097bb514a4afc525..0000000000000000000000000000000000000000 --- a/inference/utils/detection_result.pb.h +++ /dev/null @@ -1,563 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: detection_result.proto - -#ifndef PROTOBUF_detection_5fresult_2eproto__INCLUDED -#define PROTOBUF_detection_5fresult_2eproto__INCLUDED - -#include - -#include - -#if GOOGLE_PROTOBUF_VERSION < 3001000 -#error This file was generated by a newer version of protoc which is -#error incompatible with your Protocol Buffer headers. Please update -#error your headers. -#endif -#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION -#error This file was generated by an older version of protoc which is -#error incompatible with your Protocol Buffer headers. Please -#error regenerate this file with a newer version of protoc. -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -// @@protoc_insertion_point(includes) - -namespace PaddleSolution { - -// Internal implementation detail -- do not call these. -void protobuf_AddDesc_detection_5fresult_2eproto(); -void protobuf_InitDefaults_detection_5fresult_2eproto(); -void protobuf_AssignDesc_detection_5fresult_2eproto(); -void protobuf_ShutdownFile_detection_5fresult_2eproto(); - -class DetectionBox; -class DetectionResult; - -// =================================================================== - -class DetectionBox : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:PaddleSolution.DetectionBox) */ { - public: - DetectionBox(); - virtual ~DetectionBox(); - - DetectionBox(const DetectionBox& from); - - inline DetectionBox& operator=(const DetectionBox& from) { - CopyFrom(from); - return *this; - } - - inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { - return _internal_metadata_.unknown_fields(); - } - - inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { - return _internal_metadata_.mutable_unknown_fields(); - } - - static const ::google::protobuf::Descriptor* descriptor(); - static const DetectionBox& default_instance(); - - static const DetectionBox* internal_default_instance(); - - void Swap(DetectionBox* other); - - // implements Message ---------------------------------------------- - - inline DetectionBox* New() const { return New(NULL); } - - DetectionBox* New(::google::protobuf::Arena* arena) const; - void CopyFrom(const ::google::protobuf::Message& from); - void MergeFrom(const ::google::protobuf::Message& from); - void CopyFrom(const DetectionBox& from); - void MergeFrom(const DetectionBox& from); - void Clear(); - bool IsInitialized() const; - - size_t ByteSizeLong() const; - bool MergePartialFromCodedStream( - ::google::protobuf::io::CodedInputStream* input); - void SerializeWithCachedSizes( - ::google::protobuf::io::CodedOutputStream* output) const; - ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( - bool deterministic, ::google::protobuf::uint8* output) const; - ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { - return InternalSerializeWithCachedSizesToArray(false, output); - } - int GetCachedSize() const { return _cached_size_; } - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const; - void InternalSwap(DetectionBox* other); - void UnsafeMergeFrom(const DetectionBox& from); - private: - inline ::google::protobuf::Arena* GetArenaNoVirtual() const { - return _internal_metadata_.arena(); - } - inline void* MaybeArenaPtr() const { - return _internal_metadata_.raw_arena_ptr(); - } - public: - - ::google::protobuf::Metadata GetMetadata() const; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - // optional int32 class = 1; - bool has_class_() const; - void clear_class_(); - static const int kClassFieldNumber = 1; - ::google::protobuf::int32 class_() const; - void set_class_(::google::protobuf::int32 value); - - // optional float score = 2; - bool has_score() const; - void clear_score(); - static const int kScoreFieldNumber = 2; - float score() const; - void set_score(float value); - - // optional float left_top_x = 3; - bool has_left_top_x() const; - void clear_left_top_x(); - static const int kLeftTopXFieldNumber = 3; - float left_top_x() const; - void set_left_top_x(float value); - - // optional float left_top_y = 4; - bool has_left_top_y() const; - void clear_left_top_y(); - static const int kLeftTopYFieldNumber = 4; - float left_top_y() const; - void set_left_top_y(float value); - - // optional float right_bottom_x = 5; - bool has_right_bottom_x() const; - void clear_right_bottom_x(); - static const int kRightBottomXFieldNumber = 5; - float right_bottom_x() const; - void set_right_bottom_x(float value); - - // optional float right_bottom_y = 6; - bool has_right_bottom_y() const; - void clear_right_bottom_y(); - static const int kRightBottomYFieldNumber = 6; - float right_bottom_y() const; - void set_right_bottom_y(float value); - - // @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox) - private: - inline void set_has_class_(); - inline void clear_has_class_(); - inline void set_has_score(); - inline void clear_has_score(); - inline void set_has_left_top_x(); - inline void clear_has_left_top_x(); - inline void set_has_left_top_y(); - inline void clear_has_left_top_y(); - inline void set_has_right_bottom_x(); - inline void clear_has_right_bottom_x(); - inline void set_has_right_bottom_y(); - inline void clear_has_right_bottom_y(); - - ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; - ::google::protobuf::internal::HasBits<1> _has_bits_; - mutable int _cached_size_; - ::google::protobuf::int32 class__; - float score_; - float left_top_x_; - float left_top_y_; - float right_bottom_x_; - float right_bottom_y_; - friend void protobuf_InitDefaults_detection_5fresult_2eproto_impl(); - friend void protobuf_AddDesc_detection_5fresult_2eproto_impl(); - friend void protobuf_AssignDesc_detection_5fresult_2eproto(); - friend void protobuf_ShutdownFile_detection_5fresult_2eproto(); - - void InitAsDefaultInstance(); -}; -extern ::google::protobuf::internal::ExplicitlyConstructed DetectionBox_default_instance_; - -// ------------------------------------------------------------------- - -class DetectionResult : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:PaddleSolution.DetectionResult) */ { - public: - DetectionResult(); - virtual ~DetectionResult(); - - DetectionResult(const DetectionResult& from); - - inline DetectionResult& operator=(const DetectionResult& from) { - CopyFrom(from); - return *this; - } - - inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { - return _internal_metadata_.unknown_fields(); - } - - inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { - return _internal_metadata_.mutable_unknown_fields(); - } - - static const ::google::protobuf::Descriptor* descriptor(); - static const DetectionResult& default_instance(); - - static const DetectionResult* internal_default_instance(); - - void Swap(DetectionResult* other); - - // implements Message ---------------------------------------------- - - inline DetectionResult* New() const { return New(NULL); } - - DetectionResult* New(::google::protobuf::Arena* arena) const; - void CopyFrom(const ::google::protobuf::Message& from); - void MergeFrom(const ::google::protobuf::Message& from); - void CopyFrom(const DetectionResult& from); - void MergeFrom(const DetectionResult& from); - void Clear(); - bool IsInitialized() const; - - size_t ByteSizeLong() const; - bool MergePartialFromCodedStream( - ::google::protobuf::io::CodedInputStream* input); - void SerializeWithCachedSizes( - ::google::protobuf::io::CodedOutputStream* output) const; - ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( - bool deterministic, ::google::protobuf::uint8* output) const; - ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { - return InternalSerializeWithCachedSizesToArray(false, output); - } - int GetCachedSize() const { return _cached_size_; } - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const; - void InternalSwap(DetectionResult* other); - void UnsafeMergeFrom(const DetectionResult& from); - private: - inline ::google::protobuf::Arena* GetArenaNoVirtual() const { - return _internal_metadata_.arena(); - } - inline void* MaybeArenaPtr() const { - return _internal_metadata_.raw_arena_ptr(); - } - public: - - ::google::protobuf::Metadata GetMetadata() const; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - // optional string filename = 1; - bool has_filename() const; - void clear_filename(); - static const int kFilenameFieldNumber = 1; - const ::std::string& filename() const; - void set_filename(const ::std::string& value); - void set_filename(const char* value); - void set_filename(const char* value, size_t size); - ::std::string* mutable_filename(); - ::std::string* release_filename(); - void set_allocated_filename(::std::string* filename); - - // repeated .PaddleSolution.DetectionBox detection_boxes = 2; - int detection_boxes_size() const; - void clear_detection_boxes(); - static const int kDetectionBoxesFieldNumber = 2; - const ::PaddleSolution::DetectionBox& detection_boxes(int index) const; - ::PaddleSolution::DetectionBox* mutable_detection_boxes(int index); - ::PaddleSolution::DetectionBox* add_detection_boxes(); - ::google::protobuf::RepeatedPtrField< ::PaddleSolution::DetectionBox >* - mutable_detection_boxes(); - const ::google::protobuf::RepeatedPtrField< ::PaddleSolution::DetectionBox >& - detection_boxes() const; - - // @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult) - private: - inline void set_has_filename(); - inline void clear_has_filename(); - - ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; - ::google::protobuf::internal::HasBits<1> _has_bits_; - mutable int _cached_size_; - ::google::protobuf::RepeatedPtrField< ::PaddleSolution::DetectionBox > detection_boxes_; - ::google::protobuf::internal::ArenaStringPtr filename_; - friend void protobuf_InitDefaults_detection_5fresult_2eproto_impl(); - friend void protobuf_AddDesc_detection_5fresult_2eproto_impl(); - friend void protobuf_AssignDesc_detection_5fresult_2eproto(); - friend void protobuf_ShutdownFile_detection_5fresult_2eproto(); - - void InitAsDefaultInstance(); -}; -extern ::google::protobuf::internal::ExplicitlyConstructed DetectionResult_default_instance_; - -// =================================================================== - - -// =================================================================== - -#if !PROTOBUF_INLINE_NOT_IN_HEADERS -// DetectionBox - -// optional int32 class = 1; -inline bool DetectionBox::has_class_() const { - return (_has_bits_[0] & 0x00000001u) != 0; -} -inline void DetectionBox::set_has_class_() { - _has_bits_[0] |= 0x00000001u; -} -inline void DetectionBox::clear_has_class_() { - _has_bits_[0] &= ~0x00000001u; -} -inline void DetectionBox::clear_class_() { - class__ = 0; - clear_has_class_(); -} -inline ::google::protobuf::int32 DetectionBox::class_() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.class) - return class__; -} -inline void DetectionBox::set_class_(::google::protobuf::int32 value) { - set_has_class_(); - class__ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.class) -} - -// optional float score = 2; -inline bool DetectionBox::has_score() const { - return (_has_bits_[0] & 0x00000002u) != 0; -} -inline void DetectionBox::set_has_score() { - _has_bits_[0] |= 0x00000002u; -} -inline void DetectionBox::clear_has_score() { - _has_bits_[0] &= ~0x00000002u; -} -inline void DetectionBox::clear_score() { - score_ = 0; - clear_has_score(); -} -inline float DetectionBox::score() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.score) - return score_; -} -inline void DetectionBox::set_score(float value) { - set_has_score(); - score_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.score) -} - -// optional float left_top_x = 3; -inline bool DetectionBox::has_left_top_x() const { - return (_has_bits_[0] & 0x00000004u) != 0; -} -inline void DetectionBox::set_has_left_top_x() { - _has_bits_[0] |= 0x00000004u; -} -inline void DetectionBox::clear_has_left_top_x() { - _has_bits_[0] &= ~0x00000004u; -} -inline void DetectionBox::clear_left_top_x() { - left_top_x_ = 0; - clear_has_left_top_x(); -} -inline float DetectionBox::left_top_x() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.left_top_x) - return left_top_x_; -} -inline void DetectionBox::set_left_top_x(float value) { - set_has_left_top_x(); - left_top_x_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.left_top_x) -} - -// optional float left_top_y = 4; -inline bool DetectionBox::has_left_top_y() const { - return (_has_bits_[0] & 0x00000008u) != 0; -} -inline void DetectionBox::set_has_left_top_y() { - _has_bits_[0] |= 0x00000008u; -} -inline void DetectionBox::clear_has_left_top_y() { - _has_bits_[0] &= ~0x00000008u; -} -inline void DetectionBox::clear_left_top_y() { - left_top_y_ = 0; - clear_has_left_top_y(); -} -inline float DetectionBox::left_top_y() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.left_top_y) - return left_top_y_; -} -inline void DetectionBox::set_left_top_y(float value) { - set_has_left_top_y(); - left_top_y_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.left_top_y) -} - -// optional float right_bottom_x = 5; -inline bool DetectionBox::has_right_bottom_x() const { - return (_has_bits_[0] & 0x00000010u) != 0; -} -inline void DetectionBox::set_has_right_bottom_x() { - _has_bits_[0] |= 0x00000010u; -} -inline void DetectionBox::clear_has_right_bottom_x() { - _has_bits_[0] &= ~0x00000010u; -} -inline void DetectionBox::clear_right_bottom_x() { - right_bottom_x_ = 0; - clear_has_right_bottom_x(); -} -inline float DetectionBox::right_bottom_x() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.right_bottom_x) - return right_bottom_x_; -} -inline void DetectionBox::set_right_bottom_x(float value) { - set_has_right_bottom_x(); - right_bottom_x_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.right_bottom_x) -} - -// optional float right_bottom_y = 6; -inline bool DetectionBox::has_right_bottom_y() const { - return (_has_bits_[0] & 0x00000020u) != 0; -} -inline void DetectionBox::set_has_right_bottom_y() { - _has_bits_[0] |= 0x00000020u; -} -inline void DetectionBox::clear_has_right_bottom_y() { - _has_bits_[0] &= ~0x00000020u; -} -inline void DetectionBox::clear_right_bottom_y() { - right_bottom_y_ = 0; - clear_has_right_bottom_y(); -} -inline float DetectionBox::right_bottom_y() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionBox.right_bottom_y) - return right_bottom_y_; -} -inline void DetectionBox::set_right_bottom_y(float value) { - set_has_right_bottom_y(); - right_bottom_y_ = value; - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionBox.right_bottom_y) -} - -inline const DetectionBox* DetectionBox::internal_default_instance() { - return &DetectionBox_default_instance_.get(); -} -// ------------------------------------------------------------------- - -// DetectionResult - -// optional string filename = 1; -inline bool DetectionResult::has_filename() const { - return (_has_bits_[0] & 0x00000001u) != 0; -} -inline void DetectionResult::set_has_filename() { - _has_bits_[0] |= 0x00000001u; -} -inline void DetectionResult::clear_has_filename() { - _has_bits_[0] &= ~0x00000001u; -} -inline void DetectionResult::clear_filename() { - filename_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - clear_has_filename(); -} -inline const ::std::string& DetectionResult::filename() const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionResult.filename) - return filename_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -inline void DetectionResult::set_filename(const ::std::string& value) { - set_has_filename(); - filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:PaddleSolution.DetectionResult.filename) -} -inline void DetectionResult::set_filename(const char* value) { - set_has_filename(); - filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:PaddleSolution.DetectionResult.filename) -} -inline void DetectionResult::set_filename(const char* value, size_t size) { - set_has_filename(); - filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), - ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:PaddleSolution.DetectionResult.filename) -} -inline ::std::string* DetectionResult::mutable_filename() { - set_has_filename(); - // @@protoc_insertion_point(field_mutable:PaddleSolution.DetectionResult.filename) - return filename_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -inline ::std::string* DetectionResult::release_filename() { - // @@protoc_insertion_point(field_release:PaddleSolution.DetectionResult.filename) - clear_has_filename(); - return filename_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -inline void DetectionResult::set_allocated_filename(::std::string* filename) { - if (filename != NULL) { - set_has_filename(); - } else { - clear_has_filename(); - } - filename_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), filename); - // @@protoc_insertion_point(field_set_allocated:PaddleSolution.DetectionResult.filename) -} - -// repeated .PaddleSolution.DetectionBox detection_boxes = 2; -inline int DetectionResult::detection_boxes_size() const { - return detection_boxes_.size(); -} -inline void DetectionResult::clear_detection_boxes() { - detection_boxes_.Clear(); -} -inline const ::PaddleSolution::DetectionBox& DetectionResult::detection_boxes(int index) const { - // @@protoc_insertion_point(field_get:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_.Get(index); -} -inline ::PaddleSolution::DetectionBox* DetectionResult::mutable_detection_boxes(int index) { - // @@protoc_insertion_point(field_mutable:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_.Mutable(index); -} -inline ::PaddleSolution::DetectionBox* DetectionResult::add_detection_boxes() { - // @@protoc_insertion_point(field_add:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_.Add(); -} -inline ::google::protobuf::RepeatedPtrField< ::PaddleSolution::DetectionBox >* -DetectionResult::mutable_detection_boxes() { - // @@protoc_insertion_point(field_mutable_list:PaddleSolution.DetectionResult.detection_boxes) - return &detection_boxes_; -} -inline const ::google::protobuf::RepeatedPtrField< ::PaddleSolution::DetectionBox >& -DetectionResult::detection_boxes() const { - // @@protoc_insertion_point(field_list:PaddleSolution.DetectionResult.detection_boxes) - return detection_boxes_; -} - -inline const DetectionResult* DetectionResult::internal_default_instance() { - return &DetectionResult_default_instance_.get(); -} -#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS -// ------------------------------------------------------------------- - - -// @@protoc_insertion_point(namespace_scope) - -} // namespace PaddleSolution - -// @@protoc_insertion_point(global_scope) - -#endif // PROTOBUF_detection_5fresult_2eproto__INCLUDED diff --git a/inference/utils/detection_result.proto b/inference/utils/detection_result.proto deleted file mode 100644 index 2d1cbb2464ac09b0dcea01f8331da5ee7894a4d5..0000000000000000000000000000000000000000 --- a/inference/utils/detection_result.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto2"; -package PaddleSolution; - -message DetectionBox { - optional int32 class = 1; - optional float score = 2; - optional float left_top_x = 3; - optional float left_top_y = 4; - optional float right_bottom_x = 5; - optional float right_bottom_y = 6; -} - -message DetectionResult { - optional string filename = 1; - repeated DetectionBox detection_boxes = 2; -} - -//message DetectionResultsContainer { -// repeated DetectionResult result = 1; -//} - diff --git a/inference/utils/utils.h b/inference/utils/utils.h deleted file mode 100644 index 11301cedc24c405de332711d77230e59d0137fdd..0000000000000000000000000000000000000000 --- a/inference/utils/utils.h +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include -#include -#include -#ifdef _WIN32 -#include -#else -#include -#include -#endif - -namespace PaddleSolution { -namespace utils { - -enum SCALE_TYPE{ - UNPADDING, - RANGE_SCALING -}; - -inline std::string path_join(const std::string& dir, const std::string& path) { - std::string seperator = "/"; - #ifdef _WIN32 - seperator = "\\"; - #endif - return dir + seperator + path; -} -#ifndef _WIN32 -// scan a directory and get all files with input extensions -inline std::vector get_directory_images(const std::string& path, - const std::string& exts) { - std::vector imgs; - struct dirent *entry; - DIR *dir = opendir(path.c_str()); - if (dir == NULL) { - closedir(dir); - return imgs; - } - while ((entry = readdir(dir)) != NULL) { - std::string item = entry->d_name; - auto ext = strrchr(entry->d_name, '.'); - if (!ext || std::string(ext) == "." || std::string(ext) == "..") { - continue; - } - if (exts.find(ext) != std::string::npos) { - imgs.push_back(path_join(path, entry->d_name)); - } - } - sort(imgs.begin(), imgs.end()); - return imgs; -} -#else -// scan a directory and get all files with input extensions -inline std::vector get_directory_images(const std::string& path, - const std::string& exts) { - std::vector imgs; - for (const auto& item : - std::experimental::filesystem::directory_iterator(path)) { - auto suffix = item.path().extension().string(); - if (exts.find(suffix) != std::string::npos && suffix.size() > 0) { - auto fullname = path_join(path, item.path().filename().string()); - imgs.push_back(item.path().string()); - } - } - sort(imgs.begin(), imgs.end()); - return imgs; -} -#endif - -inline int scaling(int resize_type, int &w, int &h, int new_w, int new_h, - int target_size, int max_size, float &im_scale_ratio) { - if (w <= 0 || h <= 0 || new_w <= 0 || new_h <= 0) { - return -1; - } - switch (resize_type) { - case SCALE_TYPE::UNPADDING: - { - w = new_w; - h = new_h; - im_scale_ratio = 0; - } - break; - case SCALE_TYPE::RANGE_SCALING: - { - int im_max_size = std::max(w, h); - int im_min_size = std::min(w, h); - float scale_ratio = static_cast(target_size) - / static_cast(im_min_size); - if (max_size > 0) { - if (round(scale_ratio * im_max_size) > max_size) { - scale_ratio = static_cast(max_size) - / static_cast(im_max_size); - } - } - w = round(scale_ratio * static_cast(w)); - h = round(scale_ratio * static_cast(h)); - im_scale_ratio = scale_ratio; - } - break; - default : - { - std::cout << "Can't support this type of scaling strategy." - << std::endl; - std::cout << "Throw exception at file " << __FILE__ - << " on line " << __LINE__ << std::endl; - throw 0; - } - break; - } - return 0; -} -} // namespace utils -} // namespace PaddleSolution