diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..c796dfd89f473684cce1584ebb28f2850063a78f
Binary files /dev/null and b/.DS_Store differ
diff --git a/.github/ISSUE_TEMPLATE/1_data.md b/.github/ISSUE_TEMPLATE/1_data.md
index 05627aa353d1cf06074445d2bb5344d94727fedf..674da8e938470dd8da8a5d7c6cbf946c58e3eca4 100644
--- a/.github/ISSUE_TEMPLATE/1_data.md
+++ b/.github/ISSUE_TEMPLATE/1_data.md
@@ -2,5 +2,4 @@
name: 1. 数据类问题
about: 数据标注、格式转换等问题
---
-
-说明数据类型(图像分类、目标检测、实例分割或语义分割)
+数据类型:请说明你的数据类型,如图像分类、目标检测、实例分割或语义分割
diff --git a/.github/ISSUE_TEMPLATE/2_train.md b/.github/ISSUE_TEMPLATE/2_train.md
index 489159731bfef42773dffa15cd30582d5c53f992..51edf12bdacd66cea3347cb174c08ab2aee56a33 100644
--- a/.github/ISSUE_TEMPLATE/2_train.md
+++ b/.github/ISSUE_TEMPLATE/2_train.md
@@ -3,4 +3,8 @@ name: 2. 模型训练
about: 模型训练中的问题
---
-如模型训练出错,建议贴上模型训练代码,以便开发人员分析,并快速响应
+问题类型:模型训练
+**问题描述**
+
+====================
+请在这里描述您在使用过程中的问题,如模型训练出错,建议贴上模型训练代码,以便开发人员分析,并快速响应
diff --git a/.github/ISSUE_TEMPLATE/3_deploy.md b/.github/ISSUE_TEMPLATE/3_deploy.md
index d012d10125c957e702f3877dc087b7331baceb0a..fc74abd33050ba2ee9b27a09ec0f5bb638ebc139 100644
--- a/.github/ISSUE_TEMPLATE/3_deploy.md
+++ b/.github/ISSUE_TEMPLATE/3_deploy.md
@@ -3,4 +3,9 @@ name: 3. 模型部署
about: 模型部署相关问题,包括C++、Python、Paddle Lite等
---
-说明您的部署环境,部署需求,模型类型和应用场景等,便于开发人员快速响应。
+问题类型:模型部署
+**问题描述**
+
+========================
+
+请在这里描述您在使用过程中的问题,说明您的部署环境,部署需求,模型类型和应用场景等,便于开发人员快速响应。
diff --git a/.github/ISSUE_TEMPLATE/4_gui.md b/.github/ISSUE_TEMPLATE/4_gui.md
index 780c8b903b9137f72037e311213443c8678f61d9..7ba7fe77ff08cd18d4ccd81c686a2c03968b40f6 100644
--- a/.github/ISSUE_TEMPLATE/4_gui.md
+++ b/.github/ISSUE_TEMPLATE/4_gui.md
@@ -2,5 +2,8 @@
name: 4. PaddleX GUI使用问题
about: Paddle GUI客户端使用问题
---
+问题类型:PaddleX GUI
+**问题描述**
-PaddleX GUI: https://www.paddlepaddle.org.cn/paddle/paddleX (请在ISSUE内容中保留此行内容)
+===================================
+请在这里描述您在使用GUI过程中的问题
diff --git a/.github/ISSUE_TEMPLATE/5_other.md b/.github/ISSUE_TEMPLATE/5_other.md
index 8ddfe49b544621918355f5c114c1124bdecc8ef3..f347d4f5cdc87b51ec37742a28a836eb8943bd71 100644
--- a/.github/ISSUE_TEMPLATE/5_other.md
+++ b/.github/ISSUE_TEMPLATE/5_other.md
@@ -2,3 +2,10 @@
name: 5. 其它类型问题
about: 所有问题都可以在这里提
---
+
+问题类型:其它
+**问题描述**
+
+========================
+
+请在这里描述您的问题
diff --git a/README.md b/README.md
index add63566f2632a0e535504a94da0605ce0618bc7..22392ad80dc3950a6d815cf8cc176b2e2f13e901 100644
--- a/README.md
+++ b/README.md
@@ -14,10 +14,13 @@


+[完整PaddleX在线使用文档目录](https://paddlex.readthedocs.io/zh_CN/develop/index.html)
+
集成飞桨智能视觉领域**图像分类**、**目标检测**、**语义分割**、**实例分割**任务能力,将深度学习开发全流程从**数据准备**、**模型训练与优化**到**多端部署**端到端打通,并提供**统一任务API接口**及**图形化开发界面Demo**。开发者无需分别安装不同套件,以**低代码**的形式即可快速完成飞桨全流程开发。
**PaddleX** 经过**质检**、**安防**、**巡检**、**遥感**、**零售**、**医疗**等十多个行业实际应用场景验证,沉淀产业实际经验,**并提供丰富的案例实践教程**,全程助力开发者产业实践落地。
+
## 安装
@@ -29,7 +32,7 @@
通过简洁易懂的Python API,在兼顾功能全面性、开发灵活性、集成方便性的基础上,给开发者最流畅的深度学习开发体验。
**前置依赖**
-> - paddlepaddle >= 1.8.0
+> - paddlepaddle >= 1.8.4
> - python >= 3.6
> - cython
> - pycocotools
@@ -44,10 +47,11 @@ pip install paddlex -i https://mirror.baidu.com/pypi/simple
无代码开发的可视化客户端,应用Paddle API实现,使开发者快速进行产业项目验证,并为用户开发自有深度学习软件/应用提供参照。
-- 前往[PaddleX官网](https://www.paddlepaddle.org.cn/paddle/paddlex),申请下载Paddle X GUI一键绿色安装包。
+- 前往[PaddleX官网](https://www.paddlepaddle.org.cn/paddle/paddlex),申请下载PaddleX GUI一键绿色安装包。
- 前往[PaddleX GUI使用教程](./docs/gui/how_to_use.md)了解PaddleX GUI使用详情。
+- [PaddleX GUI安装环境说明](./docs/gui/download.md)
## 产品模块说明
@@ -104,15 +108,15 @@ pip install paddlex -i https://mirror.baidu.com/pypi/simple
## 交流与反馈
- 项目官网:https://www.paddlepaddle.org.cn/paddle/paddlex
-- PaddleX用户交流群:1045148026 (手机QQ扫描如下二维码快速加入)
- 
+- PaddleX用户交流群:957286141 (手机QQ扫描如下二维码快速加入)
+ 
## 更新日志
> [历史版本及更新内容](https://paddlex.readthedocs.io/zh_CN/develop/change_log.html)
-
+- 2020.09.05 v1.2.0
- 2020.07.13 v1.1.0
- 2020.07.12 v1.0.8
- 2020.05.20 v1.0.0
diff --git a/deploy/README.md b/deploy/README.md
index 7fe3219882c3c8d863824829baf6742b74759d2f..15fbe898d3a4ebbf488b5c0fc1f665bf847f3aa9 100644
--- a/deploy/README.md
+++ b/deploy/README.md
@@ -14,3 +14,5 @@
- [模型量化](../docs/deploy/paddlelite/slim/quant.md)
- [模型裁剪](../docs/deploy/paddlelite/slim/prune.md)
- [Android平台](../docs/deploy/paddlelite/android.md)
+- [OpenVINO部署](../docs/deploy/openvino/introduction.md)
+- [树莓派部署](../docs/deploy/raspberry/Raspberry.md)
\ No newline at end of file
diff --git a/deploy/cpp/CMakeLists.txt b/deploy/cpp/CMakeLists.txt
index 349afa2cae5bf40721cafdf38bbf28ddd621beeb..a54979683cd14d2a352cb789b9d6dc7bd26d0a46 100644
--- a/deploy/cpp/CMakeLists.txt
+++ b/deploy/cpp/CMakeLists.txt
@@ -320,46 +320,34 @@ target_link_libraries(video_segmenter ${DEPS})
if (WIN32 AND WITH_MKL)
add_custom_command(TARGET classifier POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/mkldnn.dll
)
add_custom_command(TARGET detector POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/mkldnn.dll
)
add_custom_command(TARGET segmenter POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/mkldnn.dll
)
add_custom_command(TARGET video_classifier POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/mkldnn.dll
)
add_custom_command(TARGET video_detector POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/kldnn.dll
)
add_custom_command(TARGET video_segmenter POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/mkldnn.dll
)
# for encryption
if (EXISTS "${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll")
diff --git a/deploy/cpp/demo/classifier.cpp b/deploy/cpp/demo/classifier.cpp
index cf3bb5ccf64c43ec42d59a9b73fdced6b50b8dc5..548eaff411a737ea0ffcfca63d36a7f18cd9d994 100644
--- a/deploy/cpp/demo/classifier.cpp
+++ b/deploy/cpp/demo/classifier.cpp
@@ -29,6 +29,10 @@ using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_bool(use_mkl, true, "Infering with MKL");
+DEFINE_int32(mkl_thread_num,
+ omp_get_num_procs(),
+ "Number of mkl threads");
DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file");
@@ -56,6 +60,8 @@ int main(int argc, char** argv) {
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
+ FLAGS_use_mkl,
+ FLAGS_mkl_thread_num,
FLAGS_gpu_id,
FLAGS_key);
diff --git a/deploy/cpp/demo/detector.cpp b/deploy/cpp/demo/detector.cpp
index ef7fd782715bef5d9cc1dae43c87ceaa123e914f..f5fefc05d0bbc4bbd482c23f0db8c066b7d1013b 100644
--- a/deploy/cpp/demo/detector.cpp
+++ b/deploy/cpp/demo/detector.cpp
@@ -31,6 +31,10 @@ using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_bool(use_mkl, true, "Infering with MKL");
+DEFINE_int32(mkl_thread_num,
+ omp_get_num_procs(),
+ "Number of mkl threads");
DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file");
@@ -61,6 +65,8 @@ int main(int argc, char** argv) {
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
+ FLAGS_use_mkl,
+ FLAGS_mkl_thread_num,
FLAGS_gpu_id,
FLAGS_key);
int imgs = 1;
diff --git a/deploy/cpp/demo/segmenter.cpp b/deploy/cpp/demo/segmenter.cpp
index d13a328f5beecc90fe9257a4f32ee63a8fe609a5..0d888001490759f65790d51837e2e69a6f448c4b 100644
--- a/deploy/cpp/demo/segmenter.cpp
+++ b/deploy/cpp/demo/segmenter.cpp
@@ -30,6 +30,10 @@ using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_bool(use_mkl, true, "Infering with MKL");
+DEFINE_int32(mkl_thread_num,
+ omp_get_num_procs(),
+ "Number of mkl threads");
DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file");
@@ -58,6 +62,8 @@ int main(int argc, char** argv) {
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
+ FLAGS_use_mkl,
+ FLAGS_mkl_thread_num,
FLAGS_gpu_id,
FLAGS_key);
int imgs = 1;
diff --git a/deploy/cpp/demo/video_classifier.cpp b/deploy/cpp/demo/video_classifier.cpp
index 96be867d40800455184b7938dc829e8a0b8f8390..c0485791ccb42fc880ab384ae2cf5e1d9d48b1ae 100644
--- a/deploy/cpp/demo/video_classifier.cpp
+++ b/deploy/cpp/demo/video_classifier.cpp
@@ -35,8 +35,12 @@ using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_bool(use_mkl, true, "Infering with MKL");
DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
+DEFINE_int32(mkl_thread_num,
+ omp_get_num_procs(),
+ "Number of mkl threads");
DEFINE_bool(use_camera, false, "Infering with Camera");
DEFINE_int32(camera_id, 0, "Camera id");
DEFINE_string(video_path, "", "Path of input video");
@@ -62,6 +66,8 @@ int main(int argc, char** argv) {
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
+ FLAGS_use_mkl,
+ FLAGS_mkl_thread_num,
FLAGS_gpu_id,
FLAGS_key);
diff --git a/deploy/cpp/demo/video_detector.cpp b/deploy/cpp/demo/video_detector.cpp
index ee4d5bdb138d03020042e60d41ded0ca1efde46d..e617dbd1339b73676225a65a667a42a06abfa63e 100644
--- a/deploy/cpp/demo/video_detector.cpp
+++ b/deploy/cpp/demo/video_detector.cpp
@@ -35,6 +35,7 @@ using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_bool(use_mkl, true, "Infering with MKL");
DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_bool(use_camera, false, "Infering with Camera");
DEFINE_int32(camera_id, 0, "Camera id");
@@ -42,6 +43,9 @@ DEFINE_string(video_path, "", "Path of input video");
DEFINE_bool(show_result, false, "show the result of each frame with a window");
DEFINE_bool(save_result, true, "save the result of each frame to a video");
DEFINE_string(key, "", "key of encryption");
+DEFINE_int32(mkl_thread_num,
+ omp_get_num_procs(),
+ "Number of mkl threads");
DEFINE_string(save_dir, "output", "Path to save visualized image");
DEFINE_double(threshold,
0.5,
@@ -64,6 +68,8 @@ int main(int argc, char** argv) {
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
+ FLAGS_use_mkl,
+ FLAGS_mkl_thread_num,
FLAGS_gpu_id,
FLAGS_key);
// Open video
diff --git a/deploy/cpp/demo/video_segmenter.cpp b/deploy/cpp/demo/video_segmenter.cpp
index 6a835117cd1434b5f26e0fb660e6fe07ef56e607..35af64f4b00ea5983653bb135394da9389539604 100644
--- a/deploy/cpp/demo/video_segmenter.cpp
+++ b/deploy/cpp/demo/video_segmenter.cpp
@@ -35,8 +35,12 @@ using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_bool(use_mkl, true, "Infering with MKL");
DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
+DEFINE_int32(mkl_thread_num,
+ omp_get_num_procs(),
+ "Number of mkl threads");
DEFINE_bool(use_camera, false, "Infering with Camera");
DEFINE_int32(camera_id, 0, "Camera id");
DEFINE_string(video_path, "", "Path of input video");
@@ -62,6 +66,8 @@ int main(int argc, char** argv) {
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
+ FLAGS_use_mkl,
+ FLAGS_mkl_thread_num,
FLAGS_gpu_id,
FLAGS_key);
// Open video
diff --git a/deploy/cpp/include/paddlex/paddlex.h b/deploy/cpp/include/paddlex/paddlex.h
index 00b1a05ac8127d403dd7325f3357ece75ec23a58..327058e4bd3251f41be82309f154b41eae11027c 100644
--- a/deploy/cpp/include/paddlex/paddlex.h
+++ b/deploy/cpp/include/paddlex/paddlex.h
@@ -70,6 +70,8 @@ class Model {
* @param model_dir: the directory which contains model.yml
* @param use_gpu: use gpu or not when infering
* @param use_trt: use Tensor RT or not when infering
+ * @param use_mkl: use mkl or not when infering
+ * @param mkl_thread_num: number of threads for mkldnn when infering
* @param gpu_id: the id of gpu when infering with using gpu
* @param key: the key of encryption when using encrypted model
* @param use_ir_optim: use ir optimization when infering
@@ -77,15 +79,26 @@ class Model {
void Init(const std::string& model_dir,
bool use_gpu = false,
bool use_trt = false,
+ bool use_mkl = true,
+ int mkl_thread_num = 4,
int gpu_id = 0,
std::string key = "",
bool use_ir_optim = true) {
- create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, use_ir_optim);
+ create_predictor(
+ model_dir,
+ use_gpu,
+ use_trt,
+ use_mkl,
+ mkl_thread_num,
+ gpu_id,
+ key,
+ use_ir_optim);
}
-
void create_predictor(const std::string& model_dir,
bool use_gpu = false,
bool use_trt = false,
+ bool use_mkl = true,
+ int mkl_thread_num = 4,
int gpu_id = 0,
std::string key = "",
bool use_ir_optim = true);
@@ -219,5 +232,7 @@ class Model {
std::vector outputs_;
// a predictor which run the model predicting
std::unique_ptr predictor_;
+ // input channel
+ int input_channel_;
};
} // namespace PaddleX
diff --git a/deploy/cpp/include/paddlex/results.h b/deploy/cpp/include/paddlex/results.h
index 72caa1f5d4f78275ca9c4de55aa89bc22edd02e5..e3526bf69b854d19a99cc001df226c5d51c7094d 100644
--- a/deploy/cpp/include/paddlex/results.h
+++ b/deploy/cpp/include/paddlex/results.h
@@ -37,7 +37,7 @@ struct Mask {
};
/*
- * @brief
+ * @brief
* This class represents target box in detection or instance segmentation tasks.
* */
struct Box {
@@ -47,7 +47,7 @@ struct Box {
// confidence score
float score;
std::vector coordinate;
- Mask mask;
+ Mask mask;
};
/*
diff --git a/deploy/cpp/include/paddlex/transforms.h b/deploy/cpp/include/paddlex/transforms.h
index 7e936dc17f4b6e58cdb8cdc36639173ccc24177c..46d0768b1bc6bcb2f2d70b541dd29314653873ac 100644
--- a/deploy/cpp/include/paddlex/transforms.h
+++ b/deploy/cpp/include/paddlex/transforms.h
@@ -21,6 +21,7 @@
#include
#include
#include
+#include
#include
#include
@@ -81,6 +82,16 @@ class Normalize : public Transform {
virtual void Init(const YAML::Node& item) {
mean_ = item["mean"].as>();
std_ = item["std"].as>();
+ if (item["min_val"].IsDefined()) {
+ min_val_ = item["min_val"].as>();
+ } else {
+ min_val_ = std::vector(mean_.size(), 0.);
+ }
+ if (item["max_val"].IsDefined()) {
+ max_val_ = item["max_val"].as>();
+ } else {
+ max_val_ = std::vector(mean_.size(), 255.);
+ }
}
virtual bool Run(cv::Mat* im, ImageBlob* data);
@@ -88,6 +99,8 @@ class Normalize : public Transform {
private:
std::vector mean_;
std::vector std_;
+ std::vector min_val_;
+ std::vector max_val_;
};
/*
@@ -216,8 +229,7 @@ class Padding : public Transform {
}
if (item["im_padding_value"].IsDefined()) {
im_value_ = item["im_padding_value"].as>();
- }
- else {
+ } else {
im_value_ = {0, 0, 0};
}
}
@@ -229,6 +241,25 @@ class Padding : public Transform {
int height_ = 0;
std::vector im_value_;
};
+
+/*
+ * @brief
+ * This class execute clip operation on image matrix
+ * */
+class Clip : public Transform {
+ public:
+ virtual void Init(const YAML::Node& item) {
+ min_val_ = item["min_val"].as>();
+ max_val_ = item["max_val"].as>();
+ }
+
+ virtual bool Run(cv::Mat* im, ImageBlob* data);
+
+ private:
+ std::vector min_val_;
+ std::vector max_val_;
+};
+
/*
* @brief
* This class is transform operations manager. It stores all neccessary
diff --git a/deploy/cpp/scripts/bootstrap.sh b/deploy/cpp/scripts/bootstrap.sh
index bb9756204e9e610365f67aa37dc78d1b5eaf80b8..e2434d13277a0f058158ba3cfcc883430825c745 100644
--- a/deploy/cpp/scripts/bootstrap.sh
+++ b/deploy/cpp/scripts/bootstrap.sh
@@ -8,10 +8,37 @@ fi
# download pre-compiled opencv lib
OPENCV_URL=https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2
+{
+ system_name=`awk -F= '/^NAME/{print $2}' /etc/os-release `
+} || {
+ echo "[ERROR] There's some problems, maybe caused by your system is not Ubuntu, refer this doc for more informat: https://github.com/PaddlePaddle/PaddleX/tree/develop/docs/deploy/opencv.md"
+ exit -1
+}
+
+# download pre-compiled opencv lib
+OPENCV_URL=https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2
+if [ $system_name == '"Ubuntu"' ]
+then
+ system_version=`awk -F= '/^VERSION_ID/{print $2}' /etc/os-release `
+ if [ $system_version == '"18.04"' ]
+ then
+ OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/opencv3.4.6gcc4.8ffmpeg_ubuntu_18.04.tar.gz2
+ elif [ $system_version == '"16.04"' ]
+ then
+ OPENCV_URL=https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2
+ else
+ echo "[ERROR] Cannot find pre-comipled opencv lib for your system environment, refer this doc for more information: https://github.com/PaddlePaddle/PaddleX/tree/develop/docs/deploy/opencv.md"
+ exit -1
+ fi
+else
+ echo "[ERROR] Cannot find pre-comipled opencv lib for your system environment, refer this doc for more information: https://github.com/PaddlePaddle/PaddleX/tree/develop/docs/deploy/opencv.md"
+ exit -1
+fi
+
if [ ! -d "./deps/opencv3.4.6gcc4.8ffmpeg/" ]; then
mkdir -p deps
cd deps
- wget -c ${OPENCV_URL}
+ wget -c ${OPENCV_URL} -O opencv3.4.6gcc4.8ffmpeg.tar.gz2
tar xvfj opencv3.4.6gcc4.8ffmpeg.tar.gz2
rm -rf opencv3.4.6gcc4.8ffmpeg.tar.gz2
cd ..
diff --git a/deploy/cpp/scripts/build.sh b/deploy/cpp/scripts/build.sh
index 6d6ad25b24170a27639f9b1d651888c4027dbeed..790e2160194a3d5fc73f4c4c608ab31af0f6a5e7 100644
--- a/deploy/cpp/scripts/build.sh
+++ b/deploy/cpp/scripts/build.sh
@@ -5,9 +5,9 @@ WITH_MKL=ON
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=OFF
# TensorRT 的路径,如果需要集成TensorRT,需修改为您实际安装的TensorRT路径
-TENSORRT_DIR=/root/projects/TensorRT/
+TENSORRT_DIR=$(pwd)/TensorRT/
# Paddle 预测库路径, 请修改为您实际安装的预测库路径
-PADDLE_DIR=/root/projects/fluid_inference
+PADDLE_DIR=$(pwd)/fluid_inference
# Paddle 的预测库是否使用静态库来编译
# 使用TensorRT时,Paddle的预测库通常为动态库
WITH_STATIC_LIB=OFF
@@ -16,14 +16,18 @@ CUDA_LIB=/usr/local/cuda/lib64
# CUDNN 的 lib 路径
CUDNN_LIB=/usr/local/cuda/lib64
+{
+ bash $(pwd)/scripts/bootstrap.sh # 下载预编译版本的加密工具和opencv依赖库
+} || {
+ echo "Fail to execute script/bootstrap.sh"
+ exit -1
+}
+
# 是否加载加密后的模型
WITH_ENCRYPTION=ON
# 加密工具的路径, 如果使用自带预编译版本可不修改
-sh $(pwd)/scripts/bootstrap.sh # 下载预编译版本的加密工具
ENCRYPTION_DIR=$(pwd)/paddlex-encryption
-
# OPENCV 路径, 如果使用自带预编译版本可不修改
-sh $(pwd)/scripts/bootstrap.sh # 下载预编译版本的opencv
OPENCV_DIR=$(pwd)/deps/opencv3.4.6gcc4.8ffmpeg/
# 以下无需改动
diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp
index 47dc5b9e9e9104e2d4983a8ac077e5a0810610cf..6d3c23094c3944b9359c701c4c3359c26313d1e3 100644
--- a/deploy/cpp/src/paddlex.cpp
+++ b/deploy/cpp/src/paddlex.cpp
@@ -11,16 +11,25 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+
+#include
#include
#include
#include
#include
#include "include/paddlex/paddlex.h"
+
+#include
+#include
+#include
+
namespace PaddleX {
void Model::create_predictor(const std::string& model_dir,
bool use_gpu,
bool use_trt,
+ bool use_mkl,
+ int mkl_thread_num,
int gpu_id,
std::string key,
bool use_ir_optim) {
@@ -40,7 +49,7 @@ void Model::create_predictor(const std::string& model_dir,
}
#endif
if (yaml_input == "") {
- // 读取配置文件
+ // read yaml file
std::ifstream yaml_fin(yaml_file);
yaml_fin.seekg(0, std::ios::end);
size_t yaml_file_size = yaml_fin.tellg();
@@ -48,7 +57,7 @@ void Model::create_predictor(const std::string& model_dir,
yaml_fin.seekg(0);
yaml_fin.read(&yaml_input[0], yaml_file_size);
}
- // 读取配置文件内容
+ // load yaml file
if (!load_config(yaml_input)) {
std::cerr << "Parse file 'model.yml' failed!" << std::endl;
exit(-1);
@@ -57,6 +66,15 @@ void Model::create_predictor(const std::string& model_dir,
if (key == "") {
config.SetModel(model_file, params_file);
}
+ if (use_mkl && !use_gpu) {
+ if (name != "HRNet" && name != "DeepLabv3p" && name != "PPYOLO") {
+ config.EnableMKLDNN();
+ config.SetCpuMathLibraryNumThreads(mkl_thread_num);
+ } else {
+ std::cerr << "HRNet/DeepLabv3p/PPYOLO are not supported "
+ << "for the use of mkldnn" << std::endl;
+ }
+ }
if (use_gpu) {
config.EnableUseGpu(100, gpu_id);
} else {
@@ -64,15 +82,15 @@ void Model::create_predictor(const std::string& model_dir,
}
config.SwitchUseFeedFetchOps(false);
config.SwitchSpecifyInputNames(true);
- // 开启图优化
+ // enable graph Optim
#if defined(__arm__) || defined(__aarch64__)
config.SwitchIrOptim(false);
#else
config.SwitchIrOptim(use_ir_optim);
#endif
- // 开启内存优化
+ // enable Memory Optim
config.EnableMemoryOptim();
- if (use_trt) {
+ if (use_trt && use_gpu) {
config.EnableTensorRtEngine(
1 << 20 /* workspace_size*/,
32 /* max_batch_size*/,
@@ -108,14 +126,19 @@ bool Model::load_config(const std::string& yaml_input) {
return false;
}
}
- // 构建数据处理流
+ // build data preprocess stream
transforms_.Init(config["Transforms"], to_rgb);
- // 读入label list
+ // read label list
labels.clear();
for (const auto& item : config["_Attributes"]["labels"]) {
int index = labels.size();
labels[index] = item.as();
}
+ if (config["_init_params"]["input_channel"].IsDefined()) {
+ input_channel_ = config["_init_params"]["input_channel"].as();
+ } else {
+ input_channel_ = 3;
+ }
return true;
}
@@ -152,19 +175,19 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
"to function predict()!" << std::endl;
return false;
}
- // 处理输入图像
+ // im preprocess
if (!preprocess(im, &inputs_)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
- // 使用加载的模型进行预测
+ // predict
auto in_tensor = predictor_->GetInputTensor("image");
int h = inputs_.new_im_size_[0];
int w = inputs_.new_im_size_[1];
- in_tensor->Reshape({1, 3, h, w});
+ in_tensor->Reshape({1, input_channel_, h, w});
in_tensor->copy_from_cpu(inputs_.im_data_.data());
predictor_->ZeroCopyRun();
- // 取出模型的输出结果
+ // get result
auto output_names = predictor_->GetOutputNames();
auto output_tensor = predictor_->GetOutputTensor(output_names[0]);
std::vector output_shape = output_tensor->shape();
@@ -174,7 +197,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
}
outputs_.resize(size);
output_tensor->copy_to_cpu(outputs_.data());
- // 对模型输出结果进行后处理
+ // postprocess
auto ptr = std::max_element(std::begin(outputs_), std::end(outputs_));
result->category_id = std::distance(std::begin(outputs_), ptr);
result->score = *ptr;
@@ -198,27 +221,27 @@ bool Model::predict(const std::vector& im_batch,
return false;
}
inputs_batch_.assign(im_batch.size(), ImageBlob());
- // 处理输入图像
+ // preprocess
if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
- // 使用加载的模型进行预测
+ // predict
int batch_size = im_batch.size();
auto in_tensor = predictor_->GetInputTensor("image");
int h = inputs_batch_[0].new_im_size_[0];
int w = inputs_batch_[0].new_im_size_[1];
- in_tensor->Reshape({batch_size, 3, h, w});
- std::vector inputs_data(batch_size * 3 * h * w);
+ in_tensor->Reshape({batch_size, input_channel_, h, w});
+ std::vector inputs_data(batch_size * input_channel_ * h * w);
for (int i = 0; i < batch_size; ++i) {
std::copy(inputs_batch_[i].im_data_.begin(),
inputs_batch_[i].im_data_.end(),
- inputs_data.begin() + i * 3 * h * w);
+ inputs_data.begin() + i * input_channel_ * h * w);
}
in_tensor->copy_from_cpu(inputs_data.data());
// in_tensor->copy_from_cpu(inputs_.im_data_.data());
predictor_->ZeroCopyRun();
- // 取出模型的输出结果
+ // get result
auto output_names = predictor_->GetOutputNames();
auto output_tensor = predictor_->GetOutputTensor(output_names[0]);
std::vector output_shape = output_tensor->shape();
@@ -228,7 +251,7 @@ bool Model::predict(const std::vector& im_batch,
}
outputs_.resize(size);
output_tensor->copy_to_cpu(outputs_.data());
- // 对模型输出结果进行后处理
+ // postprocess
(*results).clear();
(*results).resize(batch_size);
int single_batch_size = size / batch_size;
@@ -258,7 +281,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
return false;
}
- // 处理输入图像
+ // preprocess
if (!preprocess(im, &inputs_)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
@@ -267,10 +290,10 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
int h = inputs_.new_im_size_[0];
int w = inputs_.new_im_size_[1];
auto im_tensor = predictor_->GetInputTensor("image");
- im_tensor->Reshape({1, 3, h, w});
+ im_tensor->Reshape({1, input_channel_, h, w});
im_tensor->copy_from_cpu(inputs_.im_data_.data());
- if (name == "YOLOv3") {
+ if (name == "YOLOv3" || name == "PPYOLO") {
auto im_size_tensor = predictor_->GetInputTensor("im_size");
im_size_tensor->Reshape({1, 2});
im_size_tensor->copy_from_cpu(inputs_.ori_im_size_.data());
@@ -288,7 +311,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
im_info_tensor->copy_from_cpu(im_info);
im_shape_tensor->copy_from_cpu(im_shape);
}
- // 使用加载的模型进行预测
+ // predict
predictor_->ZeroCopyRun();
std::vector output_box;
@@ -306,7 +329,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
return true;
}
int num_boxes = size / 6;
- // 解析预测框box
+ // box postprocess
for (int i = 0; i < num_boxes; ++i) {
Box box;
box.category_id = static_cast(round(output_box[i * 6]));
@@ -321,7 +344,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
box.coordinate = {xmin, ymin, w, h};
result->boxes.push_back(std::move(box));
}
- // 实例分割需解析mask
+ // mask postprocess
if (name == "MaskRCNN") {
std::vector output_mask;
auto output_mask_tensor = predictor_->GetOutputTensor(output_names[1]);
@@ -337,12 +360,22 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
result->mask_resolution = output_mask_shape[2];
for (int i = 0; i < result->boxes.size(); ++i) {
Box* box = &result->boxes[i];
- auto begin_mask =
- output_mask.begin() + (i * classes + box->category_id) * mask_pixels;
- auto end_mask = begin_mask + mask_pixels;
- box->mask.data.assign(begin_mask, end_mask);
box->mask.shape = {static_cast(box->coordinate[2]),
static_cast(box->coordinate[3])};
+ auto begin_mask =
+ output_mask.data() + (i * classes + box->category_id) * mask_pixels;
+ cv::Mat bin_mask(result->mask_resolution,
+ result->mask_resolution,
+ CV_32FC1,
+ begin_mask);
+ cv::resize(bin_mask,
+ bin_mask,
+ cv::Size(box->mask.shape[0], box->mask.shape[1]));
+ cv::threshold(bin_mask, bin_mask, 0.5, 1, cv::THRESH_BINARY);
+ auto mask_int_begin = reinterpret_cast(bin_mask.data);
+ auto mask_int_end =
+ mask_int_begin + box->mask.shape[0] * box->mask.shape[1];
+ box->mask.data.assign(mask_int_begin, mask_int_end);
}
}
return true;
@@ -366,12 +399,12 @@ bool Model::predict(const std::vector& im_batch,
inputs_batch_.assign(im_batch.size(), ImageBlob());
int batch_size = im_batch.size();
- // 处理输入图像
+ // preprocess
if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
- // 对RCNN类模型做批量padding
+ // RCNN model padding
if (batch_size > 1) {
if (name == "FasterRCNN" || name == "MaskRCNN") {
int max_h = -1;
@@ -411,15 +444,15 @@ bool Model::predict(const std::vector& im_batch,
int h = inputs_batch_[0].new_im_size_[0];
int w = inputs_batch_[0].new_im_size_[1];
auto im_tensor = predictor_->GetInputTensor("image");
- im_tensor->Reshape({batch_size, 3, h, w});
- std::vector inputs_data(batch_size * 3 * h * w);
+ im_tensor->Reshape({batch_size, input_channel_, h, w});
+ std::vector inputs_data(batch_size * input_channel_ * h * w);
for (int i = 0; i < batch_size; ++i) {
std::copy(inputs_batch_[i].im_data_.begin(),
inputs_batch_[i].im_data_.end(),
- inputs_data.begin() + i * 3 * h * w);
+ inputs_data.begin() + i * input_channel_ * h * w);
}
im_tensor->copy_from_cpu(inputs_data.data());
- if (name == "YOLOv3") {
+ if (name == "YOLOv3" || name == "PPYOLO") {
auto im_size_tensor = predictor_->GetInputTensor("im_size");
im_size_tensor->Reshape({batch_size, 2});
std::vector inputs_data_size(batch_size * 2);
@@ -452,10 +485,10 @@ bool Model::predict(const std::vector& im_batch,
im_info_tensor->copy_from_cpu(im_info.data());
im_shape_tensor->copy_from_cpu(im_shape.data());
}
- // 使用加载的模型进行预测
+ // predict
predictor_->ZeroCopyRun();
- // 读取所有box
+ // get all box
std::vector output_box;
auto output_names = predictor_->GetOutputNames();
auto output_box_tensor = predictor_->GetOutputTensor(output_names[0]);
@@ -472,7 +505,7 @@ bool Model::predict(const std::vector& im_batch,
}
auto lod_vector = output_box_tensor->lod();
int num_boxes = size / 6;
- // 解析预测框box
+ // box postprocess
(*results).clear();
(*results).resize(batch_size);
for (int i = 0; i < lod_vector[0].size() - 1; ++i) {
@@ -492,7 +525,7 @@ bool Model::predict(const std::vector& im_batch,
}
}
- // 实例分割需解析mask
+ // mask postprocess
if (name == "MaskRCNN") {
std::vector output_mask;
auto output_mask_tensor = predictor_->GetOutputTensor(output_names[1]);
@@ -509,14 +542,24 @@ bool Model::predict(const std::vector& im_batch,
for (int i = 0; i < lod_vector[0].size() - 1; ++i) {
(*results)[i].mask_resolution = output_mask_shape[2];
for (int j = 0; j < (*results)[i].boxes.size(); ++j) {
- Box* box = &(*results)[i].boxes[j];
+ Box* box = &(*results)[i].boxes[i];
int category_id = box->category_id;
- auto begin_mask = output_mask.begin() +
- (mask_idx * classes + category_id) * mask_pixels;
- auto end_mask = begin_mask + mask_pixels;
- box->mask.data.assign(begin_mask, end_mask);
box->mask.shape = {static_cast(box->coordinate[2]),
- static_cast(box->coordinate[3])};
+ static_cast(box->coordinate[3])};
+ auto begin_mask =
+ output_mask.data() + (i * classes + box->category_id) * mask_pixels;
+ cv::Mat bin_mask(output_mask_shape[2],
+ output_mask_shape[2],
+ CV_32FC1,
+ begin_mask);
+ cv::resize(bin_mask,
+ bin_mask,
+ cv::Size(box->mask.shape[0], box->mask.shape[1]));
+ cv::threshold(bin_mask, bin_mask, 0.5, 1, cv::THRESH_BINARY);
+ auto mask_int_begin = reinterpret_cast(bin_mask.data);
+ auto mask_int_end =
+ mask_int_begin + box->mask.shape[0] * box->mask.shape[1];
+ box->mask.data.assign(mask_int_begin, mask_int_end);
mask_idx++;
}
}
@@ -537,7 +580,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
return false;
}
- // 处理输入图像
+ // preprocess
if (!preprocess(im, &inputs_)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
@@ -546,13 +589,13 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
int h = inputs_.new_im_size_[0];
int w = inputs_.new_im_size_[1];
auto im_tensor = predictor_->GetInputTensor("image");
- im_tensor->Reshape({1, 3, h, w});
+ im_tensor->Reshape({1, input_channel_, h, w});
im_tensor->copy_from_cpu(inputs_.im_data_.data());
- // 使用加载的模型进行预测
+ // predict
predictor_->ZeroCopyRun();
- // 获取预测置信度,经过argmax后的labelmap
+ // get labelmap
auto output_names = predictor_->GetOutputNames();
auto output_label_tensor = predictor_->GetOutputTensor(output_names[0]);
std::vector output_label_shape = output_label_tensor->shape();
@@ -565,7 +608,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->label_map.data.resize(size);
output_label_tensor->copy_to_cpu(result->label_map.data.data());
- // 获取预测置信度scoremap
+ // get scoremap
auto output_score_tensor = predictor_->GetOutputTensor(output_names[1]);
std::vector output_score_shape = output_score_tensor->shape();
size = 1;
@@ -577,7 +620,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->score_map.data.resize(size);
output_score_tensor->copy_to_cpu(result->score_map.data.data());
- // 解析输出结果到原图大小
+ // get origin image result
std::vector label_map(result->label_map.data.begin(),
result->label_map.data.end());
cv::Mat mask_label(result->label_map.shape[1],
@@ -647,7 +690,7 @@ bool Model::predict(const std::vector& im_batch,
return false;
}
- // 处理输入图像
+ // preprocess
inputs_batch_.assign(im_batch.size(), ImageBlob());
if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
std::cerr << "Preprocess failed!" << std::endl;
@@ -660,20 +703,20 @@ bool Model::predict(const std::vector& im_batch,
int h = inputs_batch_[0].new_im_size_[0];
int w = inputs_batch_[0].new_im_size_[1];
auto im_tensor = predictor_->GetInputTensor("image");
- im_tensor->Reshape({batch_size, 3, h, w});
- std::vector inputs_data(batch_size * 3 * h * w);
+ im_tensor->Reshape({batch_size, input_channel_, h, w});
+ std::vector inputs_data(batch_size * input_channel_ * h * w);
for (int i = 0; i < batch_size; ++i) {
std::copy(inputs_batch_[i].im_data_.begin(),
inputs_batch_[i].im_data_.end(),
- inputs_data.begin() + i * 3 * h * w);
+ inputs_data.begin() + i * input_channel_ * h * w);
}
im_tensor->copy_from_cpu(inputs_data.data());
// im_tensor->copy_from_cpu(inputs_.im_data_.data());
- // 使用加载的模型进行预测
+ // predict
predictor_->ZeroCopyRun();
- // 获取预测置信度,经过argmax后的labelmap
+ // get labelmap
auto output_names = predictor_->GetOutputNames();
auto output_label_tensor = predictor_->GetOutputTensor(output_names[0]);
std::vector output_label_shape = output_label_tensor->shape();
@@ -698,7 +741,7 @@ bool Model::predict(const std::vector& im_batch,
(*results)[i].label_map.data.data());
}
- // 获取预测置信度scoremap
+ // get scoremap
auto output_score_tensor = predictor_->GetOutputTensor(output_names[1]);
std::vector output_score_shape = output_score_tensor->shape();
size = 1;
@@ -722,7 +765,7 @@ bool Model::predict(const std::vector& im_batch,
(*results)[i].score_map.data.data());
}
- // 解析输出结果到原图大小
+ // get origin image result
for (int i = 0; i < batch_size; ++i) {
std::vector label_map((*results)[i].label_map.data.begin(),
(*results)[i].label_map.data.end());
diff --git a/deploy/cpp/src/transforms.cpp b/deploy/cpp/src/transforms.cpp
index f623fc664e9d66002e0eb0065d034d90965eddf7..bf4fbb70a11c00b7a259824ed2544afef43e3631 100644
--- a/deploy/cpp/src/transforms.cpp
+++ b/deploy/cpp/src/transforms.cpp
@@ -12,12 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "include/paddlex/transforms.h"
+
+#include
+
#include
#include
#include
-#include
-
-#include "include/paddlex/transforms.h"
namespace PaddleX {
@@ -28,16 +29,20 @@ std::map interpolations = {{"LINEAR", cv::INTER_LINEAR},
{"LANCZOS4", cv::INTER_LANCZOS4}};
bool Normalize::Run(cv::Mat* im, ImageBlob* data) {
- for (int h = 0; h < im->rows; h++) {
- for (int w = 0; w < im->cols; w++) {
- im->at(h, w)[0] =
- (im->at(h, w)[0] / 255.0 - mean_[0]) / std_[0];
- im->at(h, w)[1] =
- (im->at(h, w)[1] / 255.0 - mean_[1]) / std_[1];
- im->at(h, w)[2] =
- (im->at(h, w)[2] / 255.0 - mean_[2]) / std_[2];
- }
+ std::vector range_val;
+ for (int c = 0; c < im->channels(); c++) {
+ range_val.push_back(max_val_[c] - min_val_[c]);
}
+
+ std::vector split_im;
+ cv::split(*im, split_im);
+ for (int c = 0; c < im->channels(); c++) {
+ cv::subtract(split_im[c], cv::Scalar(min_val_[c]), split_im[c]);
+ cv::divide(split_im[c], cv::Scalar(range_val[c]), split_im[c]);
+ cv::subtract(split_im[c], cv::Scalar(mean_[c]), split_im[c]);
+ cv::divide(split_im[c], cv::Scalar(std_[c]), split_im[c]);
+ }
+ cv::merge(split_im, *im);
return true;
}
@@ -111,11 +116,22 @@ bool Padding::Run(cv::Mat* im, ImageBlob* data) {
<< ", but they should be greater than 0." << std::endl;
return false;
}
- cv::Scalar value = cv::Scalar(im_value_[0], im_value_[1], im_value_[2]);
- cv::copyMakeBorder(
- *im, *im, 0, padding_h, 0, padding_w, cv::BORDER_CONSTANT, value);
+ std::vector padded_im_per_channel;
+ for (size_t i = 0; i < im->channels(); i++) {
+ const cv::Mat per_channel = cv::Mat(im->rows + padding_h,
+ im->cols + padding_w,
+ CV_32FC1,
+ cv::Scalar(im_value_[i]));
+ padded_im_per_channel.push_back(per_channel);
+ }
+ cv::Mat padded_im;
+ cv::merge(padded_im_per_channel, padded_im);
+ cv::Rect im_roi = cv::Rect(0, 0, im->cols, im->rows);
+ im->copyTo(padded_im(im_roi));
+ *im = padded_im;
data->new_im_size_[0] = im->rows;
data->new_im_size_[1] = im->cols;
+
return true;
}
@@ -161,12 +177,26 @@ bool Resize::Run(cv::Mat* im, ImageBlob* data) {
return true;
}
+bool Clip::Run(cv::Mat* im, ImageBlob* data) {
+ std::vector split_im;
+ cv::split(*im, split_im);
+ for (int c = 0; c < im->channels(); c++) {
+ cv::threshold(split_im[c], split_im[c], max_val_[c], max_val_[c],
+ cv::THRESH_TRUNC);
+ cv::subtract(cv::Scalar(0), split_im[c], split_im[c]);
+ cv::threshold(split_im[c], split_im[c], min_val_[c], min_val_[c],
+ cv::THRESH_TRUNC);
+ cv::divide(split_im[c], cv::Scalar(-1), split_im[c]);
+ }
+ cv::merge(split_im, *im);
+ return true;
+}
+
void Transforms::Init(const YAML::Node& transforms_node, bool to_rgb) {
transforms_.clear();
to_rgb_ = to_rgb;
for (const auto& item : transforms_node) {
std::string name = item.begin()->first.as();
- std::cout << "trans name: " << name << std::endl;
std::shared_ptr transform = CreateTransform(name);
transform->Init(item.begin()->second);
transforms_.push_back(transform);
@@ -187,6 +217,8 @@ std::shared_ptr Transforms::CreateTransform(
return std::make_shared();
} else if (transform_name == "ResizeByLong") {
return std::make_shared();
+ } else if (transform_name == "Clip") {
+ return std::make_shared();
} else {
std::cerr << "There's unexpected transform(name='" << transform_name
<< "')." << std::endl;
@@ -195,11 +227,11 @@ std::shared_ptr Transforms::CreateTransform(
}
bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
- // 按照transforms中预处理算子顺序处理图像
+ // do all preprocess ops by order
if (to_rgb_) {
cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
}
- (*im).convertTo(*im, CV_32FC3);
+ (*im).convertTo(*im, CV_32FC(im->channels()));
data->ori_im_size_[0] = im->rows;
data->ori_im_size_[1] = im->cols;
data->new_im_size_[0] = im->rows;
@@ -211,8 +243,8 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
}
}
- // 将图像由NHWC转为NCHW格式
- // 同时转为连续的内存块存储到ImageBlob
+ // data format NHWC to NCHW
+ // img data save to ImageBlob
int h = im->rows;
int w = im->cols;
int c = im->channels();
diff --git a/deploy/cpp/src/visualize.cpp b/deploy/cpp/src/visualize.cpp
index afc1733b497269b706bf4e07d82f3a7aa43087f5..d6efc7f9f5c19c436d9bc32a7a7330a0749b9dd5 100644
--- a/deploy/cpp/src/visualize.cpp
+++ b/deploy/cpp/src/visualize.cpp
@@ -47,7 +47,7 @@ cv::Mat Visualize(const cv::Mat& img,
boxes[i].coordinate[2],
boxes[i].coordinate[3]);
- // 生成预测框和标题
+ // draw box and title
std::string text = boxes[i].category;
int c1 = colormap[3 * boxes[i].category_id + 0];
int c2 = colormap[3 * boxes[i].category_id + 1];
@@ -63,13 +63,13 @@ cv::Mat Visualize(const cv::Mat& img,
origin.x = roi.x;
origin.y = roi.y;
- // 生成预测框标题的背景
+ // background
cv::Rect text_back = cv::Rect(boxes[i].coordinate[0],
boxes[i].coordinate[1] - text_size.height,
text_size.width,
text_size.height);
- // 绘图和文字
+ // draw
cv::rectangle(vis_img, roi, roi_color, 2);
cv::rectangle(vis_img, text_back, roi_color, -1);
cv::putText(vis_img,
@@ -80,18 +80,16 @@ cv::Mat Visualize(const cv::Mat& img,
cv::Scalar(255, 255, 255),
thickness);
- // 生成实例分割mask
+ // mask
if (boxes[i].mask.data.size() == 0) {
continue;
}
- cv::Mat bin_mask(result.mask_resolution,
- result.mask_resolution,
+ std::vector mask_data;
+ mask_data.assign(boxes[i].mask.data.begin(), boxes[i].mask.data.end());
+ cv::Mat bin_mask(boxes[i].mask.shape[1],
+ boxes[i].mask.shape[0],
CV_32FC1,
- boxes[i].mask.data.data());
- cv::resize(bin_mask,
- bin_mask,
- cv::Size(boxes[i].mask.shape[0], boxes[i].mask.shape[1]));
- cv::threshold(bin_mask, bin_mask, 0.5, 1, cv::THRESH_BINARY);
+ mask_data.data());
cv::Mat full_mask = cv::Mat::zeros(vis_img.size(), CV_8UC1);
bin_mask.copyTo(full_mask(roi));
cv::Mat mask_ch[3];
diff --git a/deploy/lite/android/sdk/src/main/java/com/baidu/paddlex/preprocess/Transforms.java b/deploy/lite/android/sdk/src/main/java/com/baidu/paddlex/preprocess/Transforms.java
index 940ebaa234db2e34faa2daaf74dfacc0e9d131fe..d88ec4bfa7017fede63ffccc154bcf4a34a8a878 100644
--- a/deploy/lite/android/sdk/src/main/java/com/baidu/paddlex/preprocess/Transforms.java
+++ b/deploy/lite/android/sdk/src/main/java/com/baidu/paddlex/preprocess/Transforms.java
@@ -23,6 +23,7 @@ import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import java.util.ArrayList;
+import java.util.Date;
import java.util.HashMap;
import java.util.List;
@@ -101,6 +102,15 @@ public class Transforms {
if (info.containsKey("coarsest_stride")) {
padding.coarsest_stride = (int) info.get("coarsest_stride");
}
+ if (info.containsKey("im_padding_value")) {
+ List im_padding_value = (List) info.get("im_padding_value");
+ if (im_padding_value.size()!=3){
+ Log.e(TAG, "len of im_padding_value in padding must == 3.");
+ }
+ for (int k =0; i> reverseReshapeInfo = new ArrayList>(imageBlob.getReshapeInfo().entrySet()).listIterator(imageBlob.getReshapeInfo().size());
while (reverseReshapeInfo.hasPrevious()) {
Map.Entry entry = reverseReshapeInfo.previous();
diff --git a/deploy/openvino/CMakeLists.txt b/deploy/openvino/CMakeLists.txt
old mode 100644
new mode 100755
index 8e32a9592fce38918e46ad9ab9e4b2d1fc97cd6e..e219c8537c40af153b48e5025d07f9292482686a
--- a/deploy/openvino/CMakeLists.txt
+++ b/deploy/openvino/CMakeLists.txt
@@ -8,7 +8,9 @@ SET(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
SET(OPENVINO_DIR "" CACHE PATH "Location of libraries")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(GFLAGS_DIR "" CACHE PATH "Location of libraries")
+SET(GLOG_DIR "" CACHE PATH "Location of libraries")
SET(NGRAPH_LIB "" CACHE PATH "Location of libraries")
+SET(ARCH "" CACHE PATH "Location of libraries")
include(cmake/yaml-cpp.cmake)
@@ -27,6 +29,12 @@ macro(safe_set_static_flag)
endforeach(flag_var)
endmacro()
+if(NOT WIN32)
+ if (NOT DEFINED ARCH OR ${ARCH} STREQUAL "")
+ message(FATAL_ERROR "please set ARCH with -DARCH=x86 OR armv7")
+ endif()
+endif()
+
if (NOT DEFINED OPENVINO_DIR OR ${OPENVINO_DIR} STREQUAL "")
message(FATAL_ERROR "please set OPENVINO_DIR with -DOPENVINO_DIR=/path/influence_engine")
endif()
@@ -39,19 +47,32 @@ if (NOT DEFINED GFLAGS_DIR OR ${GFLAGS_DIR} STREQUAL "")
message(FATAL_ERROR "please set GFLAGS_DIR with -DGFLAGS_DIR=/path/gflags")
endif()
+if (NOT DEFINED GLOG_DIR OR ${GLOG_DIR} STREQUAL "")
+ message(FATAL_ERROR "please set GLOG_DIR with -DLOG_DIR=/path/glog")
+endif()
+
if (NOT DEFINED NGRAPH_LIB OR ${NGRAPH_LIB} STREQUAL "")
message(FATAL_ERROR "please set NGRAPH_DIR with -DNGRAPH_DIR=/path/ngraph")
endif()
include_directories("${OPENVINO_DIR}")
-link_directories("${OPENVINO_DIR}/lib")
include_directories("${OPENVINO_DIR}/include")
-link_directories("${OPENVINO_DIR}/external/tbb/lib")
include_directories("${OPENVINO_DIR}/external/tbb/include/tbb")
+link_directories("${OPENVINO_DIR}/lib")
+link_directories("${OPENVINO_DIR}/external/tbb/lib")
+if(WIN32)
+ link_directories("${OPENVINO_DIR}/lib/intel64/Release")
+ link_directories("${OPENVINO_DIR}/bin/intel64/Release")
+endif()
+
+
link_directories("${GFLAGS_DIR}/lib")
include_directories("${GFLAGS_DIR}/include")
+link_directories("${GLOG_DIR}/lib")
+include_directories("${GLOG_DIR}/include")
+
link_directories("${NGRAPH_LIB}")
link_directories("${NGRAPH_LIB}/lib")
@@ -79,14 +100,29 @@ else()
set(CMAKE_STATIC_LIBRARY_PREFIX "")
endif()
-
-if(WITH_STATIC_LIB)
- set(DEPS ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
- set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
+if(WIN32)
+ set(DEPS ${OPENVINO_DIR}/lib/intel64/Release/inference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/Release/inference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
- set(DEPS ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_SHARED_LIBRARY_SUFFIX})
- set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_SHARED_LIBRARY_SUFFIX})
-endif()
+ if (ARCH STREQUAL "armv7")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv7-a")
+ if(WITH_STATIC_LIB)
+ set(DEPS ${OPENVINO_DIR}/lib/armv7l/libinference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/armv7l/libinference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else()
+ set(DEPS ${OPENVINO_DIR}/lib/armv7l/libinference_engine${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/armv7l/libinference_engine_legacy${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+ else()
+ if(WITH_STATIC_LIB)
+ set(DEPS ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else()
+ set(DEPS ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+ endif()
+endif(WIN32)
if (NOT WIN32)
set(DEPS ${DEPS}
@@ -94,7 +130,7 @@ if (NOT WIN32)
)
else()
set(DEPS ${DEPS}
- glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt)
+ glog gflags_static libyaml-cppmt)
set(DEPS ${DEPS} libcmt shlwapi)
endif(NOT WIN32)
@@ -105,7 +141,14 @@ if (NOT WIN32)
endif()
set(DEPS ${DEPS} ${OpenCV_LIBS})
-add_executable(classifier src/classifier.cpp src/transforms.cpp src/paddlex.cpp)
+add_executable(classifier demo/classifier.cpp src/transforms.cpp src/paddlex.cpp)
ADD_DEPENDENCIES(classifier ext-yaml-cpp)
target_link_libraries(classifier ${DEPS})
+add_executable(segmenter demo/segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+ADD_DEPENDENCIES(segmenter ext-yaml-cpp)
+target_link_libraries(segmenter ${DEPS})
+
+add_executable(detector demo/detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+ADD_DEPENDENCIES(detector ext-yaml-cpp)
+target_link_libraries(detector ${DEPS})
diff --git a/deploy/openvino/CMakeSettings.json b/deploy/openvino/CMakeSettings.json
old mode 100644
new mode 100755
index 861839dbc67816aeb96ca1ab174d95ca7dd292ef..bb3873b6022deb06ccec99830ed4d0d89aa42f6b
--- a/deploy/openvino/CMakeSettings.json
+++ b/deploy/openvino/CMakeSettings.json
@@ -1,27 +1,47 @@
{
- "configurations": [
+ "configurations": [
+ {
+ "name": "x64-Release",
+ "generator": "Ninja",
+ "configurationType": "RelWithDebInfo",
+ "inheritEnvironments": [ "msvc_x64_x64" ],
+ "buildRoot": "${projectDir}\\out\\build\\${name}",
+ "installRoot": "${projectDir}\\out\\install\\${name}",
+ "cmakeCommandArgs": "",
+ "buildCommandArgs": "-v",
+ "ctestCommandArgs": "",
+ "variables": [
{
- "name": "x64-Release",
- "generator": "Ninja",
- "configurationType": "RelWithDebInfo",
- "inheritEnvironments": [ "msvc_x64_x64" ],
- "buildRoot": "${projectDir}\\out\\build\\${name}",
- "installRoot": "${projectDir}\\out\\install\\${name}",
- "cmakeCommandArgs": "",
- "buildCommandArgs": "-v",
- "ctestCommandArgs": "",
- "variables": [
- {
- "name": "OPENCV_DIR",
- "value": "C:/projects/opencv",
- "type": "PATH"
- },
- {
- "name": "OPENVINO_LIB",
- "value": "C:/projetcs/inference_engine",
- "type": "PATH"
- }
- ]
+ "name": "OPENCV_DIR",
+ "value": "/path/to/opencv",
+ "type": "PATH"
+ },
+ {
+ "name": "OPENVINO_DIR",
+ "value": "C:/Program Files (x86)/IntelSWTools/openvino/deployment_tools/inference_engine",
+ "type": "PATH"
+ },
+ {
+ "name": "NGRAPH_LIB",
+ "value": "C:/Program Files (x86)/IntelSWTools/openvino/deployment_tools/ngraph/lib",
+ "type": "PATH"
+ },
+ {
+ "name": "GFLAGS_DIR",
+ "value": "/path/to/gflags",
+ "type": "PATH"
+ },
+ {
+ "name": "WITH_STATIC_LIB",
+ "value": "True",
+ "type": "BOOL"
+ },
+ {
+ "name": "GLOG_DIR",
+ "value": "/path/to/glog",
+ "type": "PATH"
}
- ]
-}
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/deploy/openvino/cmake/yaml-cpp.cmake b/deploy/openvino/cmake/yaml-cpp.cmake
old mode 100644
new mode 100755
index 30d904dc76196cf106abccb47c003eed485691f1..726433d904908ce96c51442246fc884d0899de04
--- a/deploy/openvino/cmake/yaml-cpp.cmake
+++ b/deploy/openvino/cmake/yaml-cpp.cmake
@@ -1,4 +1,3 @@
-find_package(Git REQUIRED)
include(ExternalProject)
diff --git a/deploy/openvino/src/classifier.cpp b/deploy/openvino/demo/classifier.cpp
old mode 100644
new mode 100755
similarity index 87%
rename from deploy/openvino/src/classifier.cpp
rename to deploy/openvino/demo/classifier.cpp
index 38c0da9b86d8b6d9c7d248aeb8526dfe1deab148..2180cb40e390affa2dd1ddcd720d900c715aab75
--- a/deploy/openvino/src/classifier.cpp
+++ b/deploy/openvino/demo/classifier.cpp
@@ -22,7 +22,7 @@
#include "include/paddlex/paddlex.h"
DEFINE_string(model_dir, "", "Path of inference model");
-DEFINE_string(cfg_dir, "", "Path of inference model");
+DEFINE_string(cfg_file, "", "Path of PaddelX model yml file");
DEFINE_string(device, "CPU", "Device name");
DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file");
@@ -35,8 +35,8 @@ int main(int argc, char** argv) {
std::cerr << "--model_dir need to be defined" << std::endl;
return -1;
}
- if (FLAGS_cfg_dir == "") {
- std::cerr << "--cfg_dir need to be defined" << std::endl;
+ if (FLAGS_cfg_file == "") {
+ std::cerr << "--cfg_file need to be defined" << std::endl;
return -1;
}
if (FLAGS_image == "" & FLAGS_image_list == "") {
@@ -44,11 +44,11 @@ int main(int argc, char** argv) {
return -1;
}
- // 加载模型
+ // load model
PaddleX::Model model;
- model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device);
+ model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device);
- // 进行预测
+ // predict
if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list);
if (!inf) {
diff --git a/deploy/openvino/demo/detector.cpp b/deploy/openvino/demo/detector.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..66a31cefc0fa500ad77353e0f9bdd43e4564cc81
--- /dev/null
+++ b/deploy/openvino/demo/detector.cpp
@@ -0,0 +1,110 @@
+// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include
+#include
+
+#include
+#include // NOLINT
+#include
+#include
+#include
+#include
+#include
+
+#include "include/paddlex/paddlex.h"
+#include "include/paddlex/visualize.h"
+
+using namespace std::chrono; // NOLINT
+
+DEFINE_string(model_dir, "", "Path of openvino model xml file");
+DEFINE_string(cfg_file, "", "Path of PaddleX model yaml file");
+DEFINE_string(image, "", "Path of test image file");
+DEFINE_string(image_list, "", "Path of test image list file");
+DEFINE_string(device, "CPU", "Device name");
+DEFINE_string(save_dir, "", "Path to save visualized image");
+DEFINE_int32(batch_size, 1, "Batch size of infering");
+DEFINE_double(threshold,
+ 0.5,
+ "The minimum scores of target boxes which are shown");
+
+int main(int argc, char** argv) {
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ if (FLAGS_model_dir == "") {
+ std::cerr << "--model_dir need to be defined" << std::endl;
+ return -1;
+ }
+ if (FLAGS_cfg_file == "") {
+ std::cerr << "--cfg_file need to be defined" << std::endl;
+ return -1;
+ }
+ if (FLAGS_image == "" & FLAGS_image_list == "") {
+ std::cerr << "--image or --image_list need to be defined" << std::endl;
+ return -1;
+ }
+
+ // load model
+ PaddleX::Model model;
+ model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device);
+
+ int imgs = 1;
+ auto colormap = PaddleX::GenerateColorMap(model.labels.size());
+ // predict
+ if (FLAGS_image_list != "") {
+ std::ifstream inf(FLAGS_image_list);
+ if (!inf) {
+ std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
+ return -1;
+ }
+ std::string image_path;
+ while (getline(inf, image_path)) {
+ PaddleX::DetResult result;
+ cv::Mat im = cv::imread(image_path, 1);
+ model.predict(im, &result);
+ if (FLAGS_save_dir != "") {
+ cv::Mat vis_img = PaddleX::Visualize(
+ im, result, model.labels, colormap, FLAGS_threshold);
+ std::string save_path =
+ PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
+ cv::imwrite(save_path, vis_img);
+ std::cout << "Visualized output saved as " << save_path << std::endl;
+ }
+ }
+ } else {
+ PaddleX::DetResult result;
+ cv::Mat im = cv::imread(FLAGS_image, 1);
+ model.predict(im, &result);
+ for (int i = 0; i < result.boxes.size(); ++i) {
+ std::cout << "image file: " << FLAGS_image << std::endl;
+ std::cout << ", predict label: " << result.boxes[i].category
+ << ", label_id:" << result.boxes[i].category_id
+ << ", score: " << result.boxes[i].score
+ << ", box(xmin, ymin, w, h):(" << result.boxes[i].coordinate[0]
+ << ", " << result.boxes[i].coordinate[1] << ", "
+ << result.boxes[i].coordinate[2] << ", "
+ << result.boxes[i].coordinate[3] << ")" << std::endl;
+ }
+ if (FLAGS_save_dir != "") {
+ // visualize
+ cv::Mat vis_img = PaddleX::Visualize(
+ im, result, model.labels, colormap, FLAGS_threshold);
+ std::string save_path =
+ PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
+ cv::imwrite(save_path, vis_img);
+ result.clear();
+ std::cout << "Visualized output saved as " << save_path << std::endl;
+ }
+ }
+ return 0;
+}
diff --git a/deploy/openvino/demo/segmenter.cpp b/deploy/openvino/demo/segmenter.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..bb6886aae8def104a9a3923443f9609684b3b154
--- /dev/null
+++ b/deploy/openvino/demo/segmenter.cpp
@@ -0,0 +1,90 @@
+// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include "include/paddlex/paddlex.h"
+#include "include/paddlex/visualize.h"
+
+
+DEFINE_string(model_dir, "", "Path of openvino model xml file");
+DEFINE_string(cfg_file, "", "Path of PaddleX model yaml file");
+DEFINE_string(image, "", "Path of test image file");
+DEFINE_string(image_list, "", "Path of test image list file");
+DEFINE_string(device, "CPU", "Device name");
+DEFINE_string(save_dir, "", "Path to save visualized image");
+DEFINE_int32(batch_size, 1, "Batch size of infering");
+
+
+int main(int argc, char** argv) {
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ if (FLAGS_model_dir == "") {
+ std::cerr << "--model_dir need to be defined" << std::endl;
+ return -1;
+ }
+ if (FLAGS_cfg_file == "") {
+ std::cerr << "--cfg_file need to be defined" << std::endl;
+ return -1;
+ }
+ if (FLAGS_image == "" & FLAGS_image_list == "") {
+ std::cerr << "--image or --image_list need to be defined" << std::endl;
+ return -1;
+ }
+
+ // load model
+ PaddleX::Model model;
+ model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device);
+ int imgs = 1;
+ auto colormap = PaddleX::GenerateColorMap(model.labels.size());
+
+ if (FLAGS_image_list != "") {
+ std::ifstream inf(FLAGS_image_list);
+ if (!inf) {
+ std::cerr << "Fail to open file " << FLAGS_image_list <
#include
#include
+#include