diff --git a/deploy/cpp/README.md b/deploy/cpp/README.md index c9d1c08de5cbea3c09980ff138c2ff7a417e93bd..790a3a6ae5a28a40416ac0cd940d7760b1b1c509 100644 --- a/deploy/cpp/README.md +++ b/deploy/cpp/README.md @@ -25,7 +25,7 @@ ## 2.主要目录和文件 ``` -inference +cpp ├── demo.cpp # 演示加载模型、读入数据、完成预测任务C++代码 | ├── conf @@ -90,6 +90,8 @@ deeplabv3p_xception65_humanseg DEPLOY: # 是否使用GPU预测 USE_GPU: 1 + # 是否是PaddleSeg 0.3.0新版本模型 + USE_PR : 1 # 模型和参数文件所在目录路径 MODEL_PATH: "/root/projects/models/deeplabv3p_xception65_humanseg" # 模型文件名 @@ -125,11 +127,11 @@ DEPLOY: `Linux` 系统中执行以下命令: ```shell -./demo --conf=/root/projects/PaddleSeg/inference/conf/humanseg.yaml --input_dir=/root/projects/PaddleSeg/inference/images/humanseg/ +./demo --conf=/root/projects/PaddleSeg/deploy/cpp/conf/humanseg.yaml --input_dir=/root/projects/PaddleSeg/deploy/cpp/images/humanseg/ ``` `Windows` 中执行以下命令: ```shell -D:\projects\PaddleSeg\inference\build\Release>demo.exe --conf=D:\\projects\\PaddleSeg\\inference\\conf\\humanseg.yaml --input_dir=D:\\projects\\PaddleSeg\\inference\\images\humanseg\\ +D:\projects\PaddleSeg\deploy\cpp\build\Release>demo.exe --conf=D:\\projects\\PaddleSeg\\deploy\\cpp\\conf\\humanseg.yaml --input_dir=D:\\projects\\PaddleSeg\\deploy\\cpp\\images\humanseg\\ ``` @@ -141,7 +143,7 @@ D:\projects\PaddleSeg\inference\build\Release>demo.exe --conf=D:\\projects\\Padd | input_dir | 需要预测的图片目录 | -配置文件说明请参考上一步,样例程序会扫描input_dir目录下的所有以**jpg或jpeg**为后缀的图片,并生成对应的预测结果(若input_dir目录下没有以**jpg或jpeg**为后缀的图片,程序会报错)。图像分割会对`demo.jpg`的每个像素进行分类,其预测的结果保存在`demo_jpg.png`中。分割预测结果的图不能直接看到效果,必须经过可视化处理。对于二分类的图像分割模型,样例程序自动将预测结果转换成可视化结果,保存在`demo_jpg_scoremap.png`中, 原始尺寸的预测结果在`demo_jpg_recover.png`中,如下图。对于**多分类**的图像分割模型,请参考[可视化脚本使用方法](./docs/vis.md)。 +配置文件说明请参考上一步,样例程序会扫描input_dir目录下的所有以**jpg或jpeg**为后缀的图片,并生成对应的预测结果(若input_dir目录下没有以**jpg或jpeg**为后缀的图片,程序会报错)。图像分割会对`demo.jpg`的每个像素进行分类,其预测的结果保存在`demo_jpg_mask.png`中。分割预测结果的图不能直接看到效果,必须经过可视化处理。对于二分类的图像分割模型。如果需要对预测结果进行**可视化**,请参考[可视化脚本使用方法](./docs/vis.md)。 输入原图 ![avatar](images/humanseg/demo2.jpeg) diff --git a/deploy/cpp/demo.cpp b/deploy/cpp/demo.cpp index 2202b31b739bf18682fdf468b36ffe4e9e434726..729b36dde098b4ddaa6d0ed1d0da320c82e1a304 100644 --- a/deploy/cpp/demo.cpp +++ b/deploy/cpp/demo.cpp @@ -24,7 +24,7 @@ int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) { std::cout << "Usage: ./predictor --conf=/config/path/to/your/model " - << "--input_dir=/directory/of/your/input/images"; + << "--input_dir=/directory/of/your/input/images" << std::endl; return -1; } // 1. create a predictor and init it with conf diff --git a/deploy/cpp/docs/linux_build.md b/deploy/cpp/docs/linux_build.md index 75a16bbea7499eaa007884b0b1f16126eacca56a..b294eb698e75df29f2607f0e73b27075c7b8d397 100644 --- a/deploy/cpp/docs/linux_build.md +++ b/deploy/cpp/docs/linux_build.md @@ -16,7 +16,7 @@ 1. `mkdir -p /root/projects/ && cd /root/projects` 2. `git clone https://github.com/PaddlePaddle/PaddleSeg.git` -`C++`预测代码在`/root/projects/PaddleSeg/inference` 目录,该目录不依赖任何`PaddleSeg`下其他目录。 +`C++`预测代码在`/root/projects/PaddleSeg/deploy/cpp` 目录,该目录不依赖任何`PaddleSeg`下其他目录。 ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference @@ -25,9 +25,9 @@ PaddlePaddle C++ 预测库主要分为CPU版本和GPU版本。其中,针对不 | 版本 | 链接 | | ---- | ---- | -| CPU版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cpu_1.6.1.tgz) | -| CUDA 9.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda97_1.6.1.tgz) | -| CUDA 10.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda10_1.6.1.tgz) | +| CPU版本 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.6.1-cpu-avx-mkl/fluid_inference.tgz) | +| CUDA 9.0版本 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.6.1-gpu-cuda9-cudnn7-avx-mkl/fluid_inference.tgz) | +| CUDA 10.0版本 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.6.1-gpu-cuda10-cudnn7-avx-mkl/fluid_inference.tgz) | 针对不同的CPU类型、不同的指令集,官方提供更多可用的预测库版本,目前已经推出1.6版本的预测库。其余版本具体请参考以下链接:[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html) @@ -75,7 +75,7 @@ make install 在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径: ```shell -cd /root/projects/PaddleSeg/inference +cd /root/projects/PaddleSeg/deploy/cpp mkdir build && cd build cmake .. -DWITH_GPU=ON -DPADDLE_DIR=/root/projects/fluid_inference -DCUDA_LIB=/usr/local/cuda/lib64/ -DOPENCV_DIR=/root/projects/opencv3/ -DCUDNN_LIB=/usr/local/cuda/lib64/ -DWITH_STATIC_LIB=OFF make @@ -83,7 +83,7 @@ make 在使用**CPU版本**预测库进行编译时,可执行下列操作。 ```shell -cd /root/projects/PaddleSeg/inference +cd /root/projects/PaddleSeg/cpp mkdir build && cd build cmake .. -DWITH_GPU=OFF -DPADDLE_DIR=/root/projects/fluid_inference -DOPENCV_DIR=/root/projects/opencv3/ -DWITH_STATIC_LIB=OFF @@ -98,4 +98,4 @@ make ./demo --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory ``` -更详细说明请参考README文档: [预测和可视化部分](../README.md) \ No newline at end of file +更详细说明请参考README文档: [预测和可视化部分](../README.md) diff --git a/deploy/cpp/docs/vis.md b/deploy/cpp/docs/vis.md index bed5c7adba7c9739fa0a10a46d04ff6b060eeeb2..60a588f5f92174c7ffba5706fc62d36a2281c0cb 100644 --- a/deploy/cpp/docs/vis.md +++ b/deploy/cpp/docs/vis.md @@ -12,7 +12,7 @@ cd inference/tools/ # 拷贝保存分割预测结果的图片到本目录 cp XXX/demo_jpg.png . # 运行可视化脚本 -python visualize.py demo.jpg demo_jpg.png vis_result.png +python visualize.py demo.jpg demo_jpg_mask.png vis_result.png ``` 以下为上述运行可视化脚本例子中每个参数的含义,请根据测试机器中图片的**实际路径**修改对应参数。 diff --git a/deploy/cpp/docs/windows_vs2015_build.md b/deploy/cpp/docs/windows_vs2015_build.md index f0c96a18b1204b434653be8cd29dce57d229d10c..1c975015d3967d341051b9c6d5c33abb17aed7b1 100644 --- a/deploy/cpp/docs/windows_vs2015_build.md +++ b/deploy/cpp/docs/windows_vs2015_build.md @@ -15,7 +15,7 @@ 1. 打开`cmd`, 执行 `cd /d D:\projects` 2. `git clone http://gitlab.baidu.com/Paddle/PaddleSeg.git` -`C++`预测库代码在`D:\projects\PaddleSeg\inference` 目录,该目录不依赖任何`PaddleSeg`下其他目录。 +`C++`预测库代码在`D:\projects\PaddleSeg\deploy\cpp` 目录,该目录不依赖任何`PaddleSeg`下其他目录。 ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference @@ -24,9 +24,9 @@ PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其 | 版本 | 链接 | | ---- | ---- | -| CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) | -| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) | -| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) | +| CPU版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.2/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | +| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.2/win-infer/mkl/post97/fluid_inference_install_dir.zip) | +| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.2/win-infer/mkl/post107/fluid_inference_install_dir.zip) | 解压后`D:\projects\fluid_inference`目录包含内容为: ``` @@ -70,19 +70,19 @@ call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd6 ```bash # 切换到预测库所在目录 -cd /d D:\projects\PaddleSeg\inference\ +cd /d D:\projects\PaddleSeg\deply\cpp\ # 创建构建目录, 重新构建只需要删除该目录即可 mkdir build cd build # cmake构建VS项目 -D:\projects\PaddleSeg\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v9.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64 +D:\projects\PaddleSeg\deploy\cpp\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v9.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64 ``` 在使用**CPU版本**预测库进行编译时,可执行下列操作。 ```bash # 切换到预测库所在目录 -cd /d D:\projects\PaddleSeg\inference\ +cd /d D:\projects\PaddleSeg\deploy\cpp\ # 创建构建目录, 重新构建只需要删除该目录即可 mkdir build cd build @@ -102,7 +102,7 @@ D:\projects\PaddleSeg\inference\build> msbuild /m /p:Configuration=Release cpp_i 上述`Visual Studio 2015`编译产出的可执行文件在`build\release`目录下,切换到该目录: ``` -cd /d D:\projects\PaddleSeg\inference\build\release +cd /d D:\projects\PaddleSeg\deploy\cpp\build\release ``` 之后执行命令: diff --git a/deploy/cpp/docs/windows_vs2019_build.md b/deploy/cpp/docs/windows_vs2019_build.md index 890844674db848177c4f859f4a8b8ef8d7360fa7..5862740b19c4d146ce38b458cd8cbe76f7a84747 100644 --- a/deploy/cpp/docs/windows_vs2019_build.md +++ b/deploy/cpp/docs/windows_vs2019_build.md @@ -15,7 +15,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com ### Step1: 下载代码 -1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleSeg/archive/release/v0.2.0.zip) +1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleSeg/archive/release/v0.3.0.zip) 2. 解压,解压后目录重命名为`PaddleSeg` 以下代码目录路径为`D:\projects\PaddleSeg` 为例。 @@ -27,9 +27,9 @@ PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其 | 版本 | 链接 | | ---- | ---- | -| CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) | -| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) | -| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) | +| CPU版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.1/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | +| CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.1/win-infer/mkl/post97/fluid_inference_install_dir.zip) | +| CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.1/win-infer/mkl/post107/fluid_inference_install_dir.zip) | 解压后`D:\projects\fluid_inference`目录包含内容为: ``` @@ -74,6 +74,7 @@ fluid_inference | *CUDA_LIB | CUDA的库路径 | | OPENCV_DIR | OpenCV的安装路径 | | PADDLE_DIR | Paddle预测库的路径 | + **注意**在使用CPU版本预测库时,需要把CUDA_LIB的勾去掉。 ![step4](https://paddleseg.bj.bcebos.com/inference/vs2019_step5.png) @@ -89,7 +90,7 @@ fluid_inference 上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录: ``` -cd /d D:\projects\PaddleSeg\inference\out\build\x64-Release +cd /d D:\projects\PaddleSeg\deploy\cpp\out\build\x64-Release ``` 之后执行命令: diff --git a/deploy/cpp/predictor/seg_predictor.cpp b/deploy/cpp/predictor/seg_predictor.cpp index 788748ff6de2abb1aba89615955334f45ce23205..2580148c52115edfa60219e4f5cf7f572c865d23 100644 --- a/deploy/cpp/predictor/seg_predictor.cpp +++ b/deploy/cpp/predictor/seg_predictor.cpp @@ -83,7 +83,6 @@ namespace PaddleSolution { int blob_out_len = length; int seg_out_len = eval_height * eval_width * eval_num_class; - if (blob_out_len != seg_out_len) { LOG(ERROR) << " [FATAL] unequal: input vs output [" << seg_out_len << "|" << blob_out_len << "]" << std::endl; @@ -99,23 +98,20 @@ namespace PaddleSolution { std::string nname(fname); auto pos = fname.rfind("."); nname[pos] = '_'; - std::string mask_save_name = nname + ".png"; + std::string mask_save_name = nname + "_mask.png"; cv::imwrite(mask_save_name, mask_png); cv::Mat scoremap_png = cv::Mat(eval_height, eval_width, CV_8UC1); scoremap_png.data = _scoremap.data(); - std::string scoremap_save_name = nname - + std::string("_scoremap.png"); + std::string scoremap_save_name = nname + std::string("_scoremap.png"); cv::imwrite(scoremap_save_name, scoremap_png); std::cout << "save mask of [" << fname << "] done" << std::endl; if (height && width) { int recover_height = *height; int recover_width = *width; - cv::Mat recover_png = cv::Mat(recover_height, - recover_width, CV_8UC1); + cv::Mat recover_png = cv::Mat(recover_height, recover_width, CV_8UC1); cv::resize(scoremap_png, recover_png, - cv::Size(recover_width, recover_height), - 0, 0, cv::INTER_CUBIC); + cv::Size(recover_width, recover_height), 0, 0, cv::INTER_CUBIC); std::string recover_name = nname + std::string("_recover.png"); cv::imwrite(recover_name, recover_png); } @@ -176,8 +172,13 @@ namespace PaddleSolution { } paddle::PaddleTensor im_tensor; im_tensor.name = "image"; - im_tensor.shape = std::vector{ batch_size, channels, - eval_height, eval_width }; + if (!_model_config._use_pr) { + im_tensor.shape = std::vector{ batch_size, channels, + eval_height, eval_width }; + } else { + im_tensor.shape = std::vector{ batch_size, eval_height, + eval_width, channels}; + } im_tensor.data.Reset(input_buffer.data(), real_buffer_size * sizeof(float)); im_tensor.dtype = paddle::PaddleDType::FLOAT32; @@ -202,19 +203,45 @@ namespace PaddleSolution { std::cout << _outputs[0].shape[j] << ","; } std::cout << ")" << std::endl; - const size_t nums = _outputs.front().data.length() - / sizeof(float); - if (out_num % batch_size != 0 || out_num != nums) { - LOG(ERROR) << "outputs data size mismatch with shape size."; + + size_t nums = _outputs.front().data.length() / sizeof(float); + if (_model_config._use_pr) { + nums = _outputs.front().data.length() / sizeof(int64_t); + } + // size mismatch checking + bool size_mismatch = out_num % batch_size; + size_mismatch |= (!_model_config._use_pr) && (nums != out_num); + size_mismatch |= _model_config._use_pr && (nums != eval_height * eval_width); + if (size_mismatch) { + LOG(ERROR) << "output with a unexpected size"; return -1; } + if (_model_config._use_pr) { + std::vector out_data; + out_data.resize(out_num); + auto addr = reinterpret_cast(_outputs[0].data.data()); + for (int r = 0; r < out_num; ++r) { + out_data[r] = (int)(addr[r]); + } + for (int r = 0; r < batch_size; ++r) { + cv::Mat mask_png = cv::Mat(eval_height, eval_width, CV_8UC1); + mask_png.data = out_data.data() + eval_height*eval_width*r; + auto name = imgs_batch[r]; + auto pos = name.rfind("."); + name[pos] = '_'; + std::string mask_save_name = name + "_mask.png"; + cv::imwrite(mask_save_name, mask_png); + } + continue; + } + for (int i = 0; i < batch_size; ++i) { float* output_addr = reinterpret_cast( _outputs[0].data.data()) - + i * (out_num / batch_size); + + i * (nums / batch_size); output_mask(imgs_batch[i], output_addr, - out_num / batch_size, + nums / batch_size, &org_height[i], &org_width[i]); } @@ -278,8 +305,14 @@ namespace PaddleSolution { return -1; } auto im_tensor = _main_predictor->GetInputTensor("image"); - im_tensor->Reshape({ batch_size, channels, + if (!_model_config._use_pr) { + im_tensor->Reshape({ batch_size, channels, eval_height, eval_width }); + } else { + im_tensor->Reshape({ batch_size, eval_height, + eval_width, channels}); + } + im_tensor->copy_from_cpu(input_buffer.data()); auto t1 = std::chrono::high_resolution_clock::now(); @@ -292,7 +325,6 @@ namespace PaddleSolution { auto output_names = _main_predictor->GetOutputNames(); auto output_t = _main_predictor->GetOutputTensor( output_names[0]); - std::vector out_data; std::vector output_shape = output_t->shape(); int out_num = 1; @@ -303,6 +335,30 @@ namespace PaddleSolution { } std::cout << ")" << std::endl; + if (_model_config._use_pr) { + std::vector out_data; + out_data.resize(out_num); + output_t->copy_to_cpu(out_data.data()); + + std::vector mask_data; + mask_data.resize(out_num); + auto addr = reinterpret_cast(out_data.data()); + for (int r = 0; r < out_num; ++r) { + mask_data[r] = (int)(addr[r]); + } + for (int r = 0; r < batch_size; ++r) { + cv::Mat mask_png = cv::Mat(eval_height, eval_width, CV_8UC1); + mask_png.data = mask_data.data() + eval_height*eval_width*r; + auto name = imgs_batch[r]; + auto pos = name.rfind("."); + name[pos] = '_'; + std::string mask_save_name = name + "_mask.png"; + cv::imwrite(mask_save_name, mask_png); + } + continue; + } + + std::vector out_data; out_data.resize(out_num); output_t->copy_to_cpu(out_data.data()); for (int i = 0; i < batch_size; ++i) { diff --git a/deploy/cpp/preprocessor/preprocessor_seg.cpp b/deploy/cpp/preprocessor/preprocessor_seg.cpp index 7c74042071143be53e11f1e7915531ea9354f356..11505a139046603b3c25bcd20af151eb871af4be 100644 --- a/deploy/cpp/preprocessor/preprocessor_seg.cpp +++ b/deploy/cpp/preprocessor/preprocessor_seg.cpp @@ -40,14 +40,18 @@ namespace PaddleSolution { LOG(ERROR) << "Only support rgb(gray) and rgba image."; return false; } - cv::Size resize_size(_config->_resize[0], _config->_resize[1]); int rw = resize_size.width; int rh = resize_size.height; if (*ori_h != rh || *ori_w != rw) { cv::resize(im, im, resize_size, 0, 0, cv::INTER_LINEAR); } - utils::normalize(im, data, _config->_mean, _config->_std); + + if (!_config->_use_pr) { + utils::normalize(im, data, _config->_mean, _config->_std); + } else { + utils::flatten_mat(im, data); + } return true; } diff --git a/deploy/cpp/utils/seg_conf_parser.h b/deploy/cpp/utils/seg_conf_parser.h index c217078c84421fa0e292110bf78369caa8c32a07..080a0530f440e12459644f771a7b4e07641323d2 100644 --- a/deploy/cpp/utils/seg_conf_parser.h +++ b/deploy/cpp/utils/seg_conf_parser.h @@ -25,6 +25,7 @@ class PaddleSegModelConfigPaser { :_class_num(0), _channels(0), _use_gpu(0), + _use_pr(0), _batch_size(1), _model_file_name("__model__"), _param_file_name("__params__") { @@ -40,6 +41,7 @@ class PaddleSegModelConfigPaser { _class_num = 0; _channels = 0; _use_gpu = 0; + _use_pr = 0; _batch_size = 1; _model_file_name.clear(); _model_path.clear(); @@ -172,6 +174,12 @@ class PaddleSegModelConfigPaser { std::cerr << "Please set CHANNELS: x" << std::endl; return false; } + // 15. use_pr + if (config["DEPLOY"]["USE_PR"].IsDefined()) { + _use_pr = config["DEPLOY"]["USE_PR"].as(); + } else { + _use_pr = 0; + } return true; } @@ -238,6 +246,8 @@ class PaddleSegModelConfigPaser { std::string _predictor_mode; // DEPLOY.BATCH_SIZE int _batch_size; + // USE_PR: OP Optimized model + int _use_pr; }; } // namespace PaddleSolution diff --git a/deploy/cpp/utils/utils.h b/deploy/cpp/utils/utils.h index 7e322daa03c02e704509f032d5709684a341060f..4578a6a8a612c955564cfb6ac279b0f92c9f3923 100644 --- a/deploy/cpp/utils/utils.h +++ b/deploy/cpp/utils/utils.h @@ -23,7 +23,8 @@ #include #ifdef _WIN32 -#include +#define GLOG_NO_ABBREVIATED_SEVERITIES +#include #else #include #include @@ -67,15 +68,21 @@ namespace utils { // scan a directory and get all files with input extensions inline std::vector get_directory_images( const std::string& path, const std::string& exts) { + std::string pattern(path); + pattern.append("\\*"); std::vector imgs; - for (const auto& item : - std::experimental::filesystem::directory_iterator(path)) { - auto suffix = item.path().extension().string(); - if (exts.find(suffix) != std::string::npos && suffix.size() > 0) { - auto fullname = path_join(path, - item.path().filename().string()); - imgs.push_back(item.path().string()); - } + WIN32_FIND_DATA data; + HANDLE hFind; + if ((hFind = FindFirstFile(pattern.c_str(), &data)) != INVALID_HANDLE_VALUE) { + do { + auto fname = std::string(data.cFileName); + auto pos = fname.rfind("."); + auto ext = fname.substr(pos + 1); + if (ext.size() > 1 && exts.find(ext) != std::string::npos) { + imgs.push_back(path + "\\" + data.cFileName); + } + } while (FindNextFile(hFind, &data) != 0); + FindClose(hFind); } return imgs; } @@ -103,6 +110,25 @@ namespace utils { } } + // flatten a cv::mat + inline void flatten_mat(cv::Mat& im, float* data) { + int rh = im.rows; + int rw = im.cols; + int rc = im.channels(); + #pragma omp parallel for + for (int h = 0; h < rh; ++h) { + const uchar* ptr = im.ptr(h); + int im_index = 0; + int top_index = h * rw * rc; + for (int w = 0; w < rw; ++w) { + for (int c = 0; c < rc; ++c) { + float pixel = static_cast(ptr[im_index++]); + data[top_index++] = pixel; + } + } + } + } + // argmax inline void argmax(float* out, std::vector& shape, std::vector& mask, std::vector& scoremap) { diff --git a/pdseg/export_model.py b/pdseg/export_model.py index 410cfc33432255fda8ccc5fada1ec0433f11f243..93e4b4949fa3f18c934844edbad4693813e67f15 100644 --- a/pdseg/export_model.py +++ b/pdseg/export_model.py @@ -52,6 +52,7 @@ def parse_args(): def export_inference_config(): deploy_cfg = '''DEPLOY: USE_GPU : 1 + USE_PR : 1 MODEL_PATH : "%s" MODEL_FILENAME : "%s" PARAMS_FILENAME : "%s"