提交 ad743834 编写于 作者: J joey12300

Merge branch 'master' into release/v0.2.0

modify the inference lib link
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h> #include <glog/logging.h>
#include <utils/utils.h> #include <utils/utils.h>
#include <predictor/seg_predictor.h> #include <predictor/seg_predictor.h>
...@@ -9,7 +23,8 @@ int main(int argc, char** argv) { ...@@ -9,7 +23,8 @@ int main(int argc, char** argv) {
// 0. parse args // 0. parse args
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) { if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) {
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model --input_dir=/directory/of/your/input/images"; std::cout << "Usage: ./predictor --conf=/config/path/to/your/model "
<< "--input_dir=/directory/of/your/input/images";
return -1; return -1;
} }
// 1. create a predictor and init it with conf // 1. create a predictor and init it with conf
...@@ -20,7 +35,8 @@ int main(int argc, char** argv) { ...@@ -20,7 +35,8 @@ int main(int argc, char** argv) {
} }
// 2. get all the images with extension '.jpeg' at input_dir // 2. get all the images with extension '.jpeg' at input_dir
auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir, ".jpeg|.jpg"); auto imgs = PaddleSolution::utils::get_directory_images(FLAGS_input_dir,
".jpeg|.jpg");
// 3. predict // 3. predict
predictor.predict(imgs); predictor.predict(imgs);
return 0; return 0;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
## 前置条件 ## 前置条件
* G++ 4.8.2 ~ 4.9.4 * G++ 4.8.2 ~ 4.9.4
* CMake 3.0+ * CMake 3.0+
* CUDA 8.0 / CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要) * CUDA 9.0 / CUDA 10.0, cudnn 7+ (仅在使用GPU版本的预测库时需要)
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示** 请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `/root/projects/`演示**
...@@ -20,17 +20,16 @@ ...@@ -20,17 +20,16 @@
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库主要分为CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 8、CUDA 9和CUDA 10版本预测库。以下为各版本C++预测库的下载链接: PaddlePaddle C++ 预测库主要分为CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。以下为各版本C++预测库的下载链接:
| 版本 | 链接 | | 版本 | 链接 |
| ---- | ---- | | ---- | ---- |
| CPU版本 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/latest-cpu-avx-mkl/fluid_inference.tgz) | | CPU版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cpu_1.6.1.tgz) |
| CUDA 8版本 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/latest-gpu-cuda8-cudnn7-avx-mkl/fluid_inference.tgz) | | CUDA 9.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda97_1.6.1.tgz) |
| CUDA 9版本 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/latest-gpu-cuda9-cudnn7-avx-mkl/fluid_inference.tgz) | | CUDA 10.0版本 | [fluid_inference.tgz](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_linux_cuda10_1.6.1.tgz) |
| CUDA 10版本 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/latest-gpu-cuda10-cudnn7-avx-mkl/fluid_inference.tgz) |
针对不同的CPU类型、不同的指令集,官方提供更多可用的预测库版本,目前已经推出1.6版本的预测库具体请参考以下链接:[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html) 针对不同的CPU类型、不同的指令集,官方提供更多可用的预测库版本,目前已经推出1.6版本的预测库。其余版本具体请参考以下链接:[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_usage/deploy/inference/build_and_install_lib_cn.html)
下载并解压后`/root/projects/fluid_inference`目录包含内容为: 下载并解压后`/root/projects/fluid_inference`目录包含内容为:
...@@ -63,7 +62,7 @@ make install ...@@ -63,7 +62,7 @@ make install
### Step4: 编译 ### Step4: 编译
`CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下:(带*表示仅在使用**GPU版本**预测库时指定) `CMake`编译时,涉及到四个编译参数用于指定核心依赖库的路径, 他们的定义如下:(带*表示仅在使用**GPU版本**预测库时指定,其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1版本CUDA库**
| 参数名 | 含义 | | 参数名 | 含义 |
| ---- | ---- | | ---- | ---- |
...@@ -84,6 +83,7 @@ make ...@@ -84,6 +83,7 @@ make
在使用**CPU版本**预测库进行编译时,可执行下列操作。 在使用**CPU版本**预测库进行编译时,可执行下列操作。
```shell ```shell
cd /root/projects/PaddleSeg/inference cd /root/projects/PaddleSeg/inference
mkdir build && cd build mkdir build && cd build
cmake .. -DWITH_GPU=OFF -DPADDLE_DIR=/root/projects/fluid_inference -DOPENCV_DIR=/root/projects/opencv3/ -DWITH_STATIC_LIB=OFF cmake .. -DWITH_GPU=OFF -DPADDLE_DIR=/root/projects/fluid_inference -DOPENCV_DIR=/root/projects/opencv3/ -DWITH_STATIC_LIB=OFF
make make
...@@ -97,4 +97,4 @@ make ...@@ -97,4 +97,4 @@ make
./demo --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory ./demo --conf=/path/to/your/conf --input_dir=/path/to/your/input/data/directory
``` ```
更详细说明请参考README文档: [预测和可视化部分](../README.md) 更详细说明请参考README文档: [预测和可视化部分](../README.md)
\ No newline at end of file
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
## 前置条件 ## 前置条件
* Visual Studio 2015 * Visual Studio 2015
* CUDA 8.0/ CUDA 9.0/ CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要) * CUDA 9.0 / CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+ * CMake 3.0+
请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `D:\projects`演示** 请确保系统已经安装好上述基本软件,**下面所有示例以工作目录为 `D:\projects`演示**
...@@ -20,14 +20,13 @@ ...@@ -20,14 +20,13 @@
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 8、CUDA 9和CUDA 10版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库(CUDA 8版本基于1.5版本的预测库,其余均基于1.6版本的预测库)的下载链接: PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为两个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接:
| 版本 | 链接 | | 版本 | 链接 |
| ---- | ---- | | ---- | ---- |
| CPU版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.0/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | | CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) |
| CUDA 8版本 | [fluid_inference_install_dir.zip](https://paddle-inference-lib.bj.bcebos.com/1.5.1-win/gpu_mkl_avx_8.0/fluid_inference_install_dir.zip) | | CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) |
| CUDA 9版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.0/win-infer/mkl/post97/fluid_inference_install_dir.zip) | | CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) |
| CUDA 10版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.0/win-infer/mkl/post107/fluid_inference_install_dir.zip) |
解压后`D:\projects\fluid_inference`目录包含内容为: 解压后`D:\projects\fluid_inference`目录包含内容为:
``` ```
...@@ -59,31 +58,36 @@ fluid_inference ...@@ -59,31 +58,36 @@ fluid_inference
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
``` ```
* CMAKE编译工程 (带*表示仅在使用**GPU版本**预测库时指定) 三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
* PADDLE_DIR: fluid_inference预测库路径
* *CUDA_LIB: CUDA动态库目录, 请根据实际安装情况调整
* OPENCV_DIR: OpenCV解压目录
在使用**GPU版本**预测库进行编译时,可执行下列操作。 | 参数名 | 含义 |
``` | ---- | ---- |
| *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 |
在使用**GPU版本**预测库进行编译时,可执行下列操作。**注意**把对应的参数改为你的上述依赖库实际路径:
```bash
# 切换到预测库所在目录 # 切换到预测库所在目录
cd /d D:\projects\PaddleSeg\inference\ cd /d D:\projects\PaddleSeg\inference\
# 创建构建目录, 重新构建只需要删除该目录即可 # 创建构建目录, 重新构建只需要删除该目录即可
mkdir build mkdir build
cd build cd build
# cmake构建VS项目 # cmake构建VS项目
D:\projects\PaddleSeg\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v8.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64 D:\projects\PaddleSeg\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DCUDA_LIB=D:\projects\cudalib\v9.0\lib\x64 -DOPENCV_DIR=D:\projects\opencv -T host=x64
``` ```
在使用**CPU版本**预测库进行编译时,可执行下列操作。 在使用**CPU版本**预测库进行编译时,可执行下列操作。
```
```bash
# 切换到预测库所在目录 # 切换到预测库所在目录
cd /d D:\projects\PaddleSeg\inference\ cd /d D:\projects\PaddleSeg\inference\
# 创建构建目录, 重新构建只需要删除该目录即可 # 创建构建目录, 重新构建只需要删除该目录即可
mkdir build mkdir build
cd build cd build
# cmake构建VS项目 # cmake构建VS项目
D:\projects\PaddleSeg\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DPADDLE_DIR=D:\projects\fluid_inference -DOPENCV_DIR=D:\projects\opencv -T host=x64 D:\projects\PaddleSeg\inference\build> cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=OFF -DPADDLE_DIR=D:\projects\fluid_inference -DOPENCV_DIR=D:\projects\opencv -T host=x64
``` ```
这里的`cmake`参数`-G`, 表示生成对应的VS版本的工程,可以根据自己的`VS`版本调整,具体请参考[cmake文档](https://cmake.org/cmake/help/v3.15/manual/cmake-generators.7.html) 这里的`cmake`参数`-G`, 表示生成对应的VS版本的工程,可以根据自己的`VS`版本调整,具体请参考[cmake文档](https://cmake.org/cmake/help/v3.15/manual/cmake-generators.7.html)
......
...@@ -6,7 +6,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com ...@@ -6,7 +6,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com
## 前置条件 ## 前置条件
* Visual Studio 2019 * Visual Studio 2019
* CUDA 8.0/ CUDA 9.0/ CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要) * CUDA 9.0/ CUDA 10.0,cudnn 7+ (仅在使用GPU版本的预测库时需要)
* CMake 3.0+ * CMake 3.0+
请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。 请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。
...@@ -15,7 +15,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com ...@@ -15,7 +15,7 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com
### Step1: 下载代码 ### Step1: 下载代码
1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleSeg/archive/master.zip) 1. 点击下载源代码:[下载地址](https://github.com/PaddlePaddle/PaddleSeg/archive/release/v0.2.0.zip)
2. 解压,解压后目录重命名为`PaddleSeg` 2. 解压,解压后目录重命名为`PaddleSeg`
以下代码目录路径为`D:\projects\PaddleSeg` 为例。 以下代码目录路径为`D:\projects\PaddleSeg` 为例。
...@@ -23,14 +23,13 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com ...@@ -23,14 +23,13 @@ Windows 平台下,我们使用`Visual Studio 2015` 和 `Visual Studio 2019 Com
### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference
PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 8、CUDA 9和CUDA 10版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库(CUDA 8版本基于1.5版本的预测库,其余均基于1.6版本的预测库)的下载链接: PaddlePaddle C++ 预测库主要分为两大版本:CPU版本和GPU版本。其中,针对不同的CUDA版本,GPU版本预测库又分为三个版本预测库:CUDA 9.0和CUDA 10.0版本预测库。根据Windows环境,下载相应版本的PaddlePaddle预测库,并解压到`D:\projects\`目录。以下为各版本C++预测库的下载链接:
| 版本 | 链接 | | 版本 | 链接 |
| ---- | ---- | | ---- | ---- |
| CPU版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.0/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | | CPU版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_install_dir_win_cpu_1.6.zip) |
| CUDA 8版本 | [fluid_inference_install_dir.zip](https://paddle-inference-lib.bj.bcebos.com/1.5.1-win/gpu_mkl_avx_8.0/fluid_inference_install_dir.zip) | | CUDA 9.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda9_1.6.1.zip) |
| CUDA 9版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.0/win-infer/mkl/post97/fluid_inference_install_dir.zip) | | CUDA 10.0版本 | [fluid_inference_install_dir.zip](https://bj.bcebos.com/paddlehub/paddle_inference_lib/fluid_inference_install_dir_win_cuda10_1.6.1.zip) |
| CUDA 10版本 | [fluid_inference_install_dir.zip](https://paddle-wheel.bj.bcebos.com/1.6.0/win-infer/mkl/post107/fluid_inference_install_dir.zip) |
解压后`D:\projects\fluid_inference`目录包含内容为: 解压后`D:\projects\fluid_inference`目录包含内容为:
``` ```
...@@ -68,11 +67,11 @@ fluid_inference ...@@ -68,11 +67,11 @@ fluid_inference
4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径 4. 点击`浏览`,分别设置编译选项指定`CUDA`、`OpenCV`、`Paddle预测库`的路径
三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定): 三个编译参数的含义说明如下(带*表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
| 参数名 | 含义 | | 参数名 | 含义 |
| ---- | ---- | | ---- | ---- |
| *CUDA_LIB | cuda的库路径 | | *CUDA_LIB | CUDA的库路径 |
| OPENCV_DIR | OpenCV的安装路径, | | OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 | | PADDLE_DIR | Paddle预测库的路径 |
**注意**在使用CPU版本预测库时,需要把CUDA_LIB的勾去掉。 **注意**在使用CPU版本预测库时,需要把CUDA_LIB的勾去掉。
...@@ -90,7 +89,7 @@ fluid_inference ...@@ -90,7 +89,7 @@ fluid_inference
上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录: 上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
``` ```
cd /d D:\projects\PaddleSeg\inference\out\x64-Release cd /d D:\projects\PaddleSeg\inference\out\build\x64-Release
``` ```
之后执行命令: 之后执行命令:
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once #pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <thread> #include <thread>
#include <chrono> #include <chrono>
#include <algorithm> #include <algorithm>
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <opencv2/opencv.hpp>
#include <paddle_inference_api.h>
#include <utils/seg_conf_parser.h> #include <paddle_inference_api.h>
#include <utils/utils.h> #include <opencv2/opencv.hpp>
#include <preprocessor/preprocessor.h> #include "utils/seg_conf_parser.h"
#include "utils/utils.h"
#include "preprocessor/preprocessor.h"
namespace PaddleSolution { namespace PaddleSolution {
class Predictor { class Predictor {
public: public:
// init a predictor with a yaml config file // init a predictor with a yaml config file
int init(const std::string& conf); int init(const std::string& conf);
// predict api // predict api
int predict(const std::vector<std::string>& imgs); int predict(const std::vector<std::string>& imgs);
private:
private: int output_mask(const std::string& fname, float* p_out, int length,
int output_mask( int* height = NULL, int* width = NULL);
const std::string& fname, int native_predict(const std::vector<std::string>& imgs);
float* p_out, int analysis_predict(const std::vector<std::string>& imgs);
int length, private:
int* height = NULL, std::vector<float> _buffer;
int* width = NULL); std::vector<int> _org_width;
int native_predict(const std::vector<std::string>& imgs); std::vector<int> _org_height;
int analysis_predict(const std::vector<std::string>& imgs); std::vector<std::string> _imgs_batch;
private: std::vector<paddle::PaddleTensor> _outputs;
std::vector<float> _buffer;
std::vector<int> _org_width;
std::vector<int> _org_height;
std::vector<std::string> _imgs_batch;
std::vector<paddle::PaddleTensor> _outputs;
std::vector<uchar> _mask; std::vector<uchar> _mask;
std::vector<uchar> _scoremap; std::vector<uchar> _scoremap;
PaddleSolution::PaddleSegModelConfigPaser _model_config; PaddleSolution::PaddleSegModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor; std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor; std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
}; };
} } // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h> #include <glog/logging.h>
...@@ -7,9 +21,10 @@ ...@@ -7,9 +21,10 @@
namespace PaddleSolution { namespace PaddleSolution {
std::shared_ptr<ImagePreProcessor> create_processor(const std::string& conf_file) { std::shared_ptr<ImagePreProcessor> create_processor(
const std::string& conf_file) {
auto config = std::make_shared<PaddleSolution::PaddleSegModelConfigPaser>(); auto config = std::make_shared<PaddleSolution::
PaddleSegModelConfigPaser>();
if (!config->load_config(conf_file)) { if (!config->load_config(conf_file)) {
LOG(FATAL) << "fail to laod conf file [" << conf_file << "]"; LOG(FATAL) << "fail to laod conf file [" << conf_file << "]";
return nullptr; return nullptr;
...@@ -23,9 +38,9 @@ namespace PaddleSolution { ...@@ -23,9 +38,9 @@ namespace PaddleSolution {
return p; return p;
} }
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "]"; LOG(FATAL) << "unknown processor_name [" << config->_pre_processor
<< "]";
return nullptr; return nullptr;
} }
} } // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once #pragma once
#include <vector> #include <vector>
#include <string> #include <string>
...@@ -12,18 +26,19 @@ ...@@ -12,18 +26,19 @@
namespace PaddleSolution { namespace PaddleSolution {
class ImagePreProcessor { class ImagePreProcessor {
protected: protected:
ImagePreProcessor() {}; ImagePreProcessor() {}
public:
public:
virtual ~ImagePreProcessor() {} virtual ~ImagePreProcessor() {}
virtual bool single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) = 0; virtual bool single_process(const std::string& fname, float* data,
int* ori_w, int* ori_h) = 0;
virtual bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) = 0;
}; // end of class ImagePreProcessor virtual bool batch_process(const std::vector<std::string>& imgs,
float* data, int* ori_w, int* ori_h) = 0;
}; // end of class ImagePreProcessor
std::shared_ptr<ImagePreProcessor> create_processor(const std::string &config_file); std::shared_ptr<ImagePreProcessor> create_processor(
const std::string &config_file);
} // end of namespace paddle_solution } // namespace PaddleSolution
#include <thread> // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "preprocessor_seg.h"
#include <glog/logging.h> #include <glog/logging.h>
#include "preprocessor_seg.h" #include <thread>
namespace PaddleSolution { namespace PaddleSolution {
bool SegPreProcessor::single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) { bool SegPreProcessor::single_process(const std::string& fname,
float* data, int* ori_w, int* ori_h) {
cv::Mat im = cv::imread(fname, -1); cv::Mat im = cv::imread(fname, -1);
if (im.data == nullptr || im.empty()) { if (im.data == nullptr || im.empty()) {
LOG(ERROR) << "Failed to open image: " << fname; LOG(ERROR) << "Failed to open image: " << fname;
return false; return false;
} }
int channels = im.channels(); int channels = im.channels();
*ori_w = im.cols; *ori_w = im.cols;
*ori_h = im.rows; *ori_h = im.rows;
...@@ -36,7 +51,8 @@ namespace PaddleSolution { ...@@ -36,7 +51,8 @@ namespace PaddleSolution {
return true; return true;
} }
bool SegPreProcessor::batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) { bool SegPreProcessor::batch_process(const std::vector<std::string>& imgs,
float* data, int* ori_w, int* ori_h) {
auto ic = _config->_channels; auto ic = _config->_channels;
auto iw = _config->_resize[0]; auto iw = _config->_resize[0];
auto ih = _config->_resize[1]; auto ih = _config->_resize[1];
...@@ -58,9 +74,9 @@ namespace PaddleSolution { ...@@ -58,9 +74,9 @@ namespace PaddleSolution {
return true; return true;
} }
bool SegPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config) { bool SegPreProcessor::init(
std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config) {
_config = config; _config = config;
return true; return true;
} }
} // namespace PaddleSolution
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once #pragma once
#include <string>
#include <vector>
#include <memory>
#include "preprocessor.h" #include "preprocessor.h"
#include "utils/utils.h" #include "utils/utils.h"
namespace PaddleSolution { namespace PaddleSolution {
class SegPreProcessor : public ImagePreProcessor { class SegPreProcessor : public ImagePreProcessor {
public:
SegPreProcessor() : _config(nullptr) {}
public: bool init(
SegPreProcessor() : _config(nullptr){ std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config);
};
bool init(std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config);
bool single_process(const std::string &fname, float* data, int* ori_w, int* ori_h); bool single_process(const std::string &fname, float* data,
int* ori_w, int* ori_h);
bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h); bool batch_process(const std::vector<std::string>& imgs, float* data,
int* ori_w, int* ori_h);
private: private:
std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> _config; std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> _config;
}; };
} // namespace PaddleSolution
}
import cv2 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
import sys #
# Licensed under the Apache License, Version 2.0 (the "License");
# ColorMap for visualization more clearly # you may not use this file except in compliance with the License.
color_map = [[128, 64, 128], [244, 35, 231], [69, 69, 69], [102, 102, 156], # You may obtain a copy of the License at
[190, 153, 153], [153, 153, 153], [250, 170, 29], [219, 219, 0], #
[106, 142, 35], [152, 250, 152], [69, 129, 180], [219, 19, 60], # http://www.apache.org/licenses/LICENSE-2.0
[255, 0, 0], [0, 0, 142], [0, 0, 69], [0, 60, 100], [0, 79, 100], #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import sys
# ColorMap for visualization more clearly
color_map = [[128, 64, 128], [244, 35, 231], [69, 69, 69], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 29], [219, 219, 0],
[106, 142, 35], [152, 250, 152], [69, 129, 180], [219, 19, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 69], [0, 60, 100], [0, 79, 100],
[0, 0, 230], [119, 10, 32]] [0, 0, 230], [119, 10, 32]]
# python visualize.py demo1.jpg demo1_jpg.png vis_result.png # python visualize.py demo1.jpg demo1_jpg.png vis_result.png
if __name__ == "__main__": if __name__ == "__main__":
if len(sys.argv) != 4: if len(sys.argv) != 4:
print( print(
"Usage: python visualize.py demo1.jpg demo1_jpg.png vis_result.png") "Usage: python visualize.py demo1.jpg demo1_jpg.png vis_result.png")
else: else:
ori_im = cv2.imread(sys.argv[1]) ori_im = cv2.imread(sys.argv[1])
ori_shape = ori_im.shape ori_shape = ori_im.shape
print(ori_shape) print(ori_shape)
im = cv2.imread(sys.argv[2]) im = cv2.imread(sys.argv[2])
shape = im.shape shape = im.shape
print("visualizing...") print("visualizing...")
for i in range(0, shape[0]): for i in range(0, shape[0]):
for j in range(0, shape[1]): for j in range(0, shape[1]):
im[i, j] = color_map[im[i, j, 0]] im[i, j] = color_map[im[i, j, 0]]
im = cv2.resize(im, (ori_shape[1], ori_shape[0])) im = cv2.resize(im, (ori_shape[1], ori_shape[0]))
cv2.imwrite(sys.argv[3], im) cv2.imwrite(sys.argv[3], im)
print("visualizing done!") print("visualizing done!")
#pragma once // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#include <iostream> //
#include <vector> // Licensed under the Apache License, Version 2.0 (the "License");
#include <string> // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
#include <yaml-cpp/yaml.h> //
namespace PaddleSolution { // http://www.apache.org/licenses/LICENSE-2.0
//
class PaddleSegModelConfigPaser { // Unless required by applicable law or agreed to in writing, software
public: // distributed under the License is distributed on an "AS IS" BASIS,
PaddleSegModelConfigPaser() // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:_class_num(0), // See the License for the specific language governing permissions and
_channels(0), // limitations under the License.
_use_gpu(0),
_batch_size(1), #pragma once
_model_file_name("__model__"), #include <yaml-cpp/yaml.h>
_param_file_name("__params__") { #include <iostream>
} #include <vector>
~PaddleSegModelConfigPaser() { #include <string>
}
namespace PaddleSolution {
void reset() { class PaddleSegModelConfigPaser {
_resize.clear(); public:
_mean.clear(); PaddleSegModelConfigPaser()
_std.clear(); :_class_num(0),
_img_type.clear(); _channels(0),
_class_num = 0; _use_gpu(0),
_channels = 0; _batch_size(1),
_use_gpu = 0; _model_file_name("__model__"),
_batch_size = 1; _param_file_name("__params__") {
_model_file_name.clear(); }
_model_path.clear(); ~PaddleSegModelConfigPaser() {
_param_file_name.clear(); }
}
void reset() {
std::string process_parenthesis(const std::string& str) { _resize.clear();
if (str.size() < 2) { _mean.clear();
return str; _std.clear();
} _img_type.clear();
std::string nstr(str); _class_num = 0;
if (str[0] == '(' && str.back() == ')') { _channels = 0;
nstr[0] = '['; _use_gpu = 0;
nstr[str.size() - 1] = ']'; _batch_size = 1;
} _model_file_name.clear();
return nstr; _model_path.clear();
} _param_file_name.clear();
}
template <typename T>
std::vector<T> parse_str_to_vec(const std::string& str) { std::string process_parenthesis(const std::string& str) {
std::vector<T> data; if (str.size() < 2) {
auto node = YAML::Load(str); return str;
for (const auto& item : node) { }
data.push_back(item.as<T>()); std::string nstr(str);
} if (str[0] == '(' && str.back() == ')') {
return data; nstr[0] = '[';
} nstr[str.size() - 1] = ']';
}
bool load_config(const std::string& conf_file) { return nstr;
}
reset();
template <typename T>
YAML::Node config = YAML::LoadFile(conf_file); std::vector<T> parse_str_to_vec(const std::string& str) {
// 1. get resize std::vector<T> data;
auto str = config["DEPLOY"]["EVAL_CROP_SIZE"].as<std::string>(); auto node = YAML::Load(str);
_resize = parse_str_to_vec<int>(process_parenthesis(str)); for (const auto& item : node) {
data.push_back(item.as<T>());
// 2. get mean }
for (const auto& item : config["DEPLOY"]["MEAN"]) { return data;
_mean.push_back(item.as<float>()); }
}
bool load_config(const std::string& conf_file) {
// 3. get std reset();
for (const auto& item : config["DEPLOY"]["STD"]) {
_std.push_back(item.as<float>()); YAML::Node config = YAML::LoadFile(conf_file);
} // 1. get resize
auto str = config["DEPLOY"]["EVAL_CROP_SIZE"].as<std::string>();
// 4. get image type _resize = parse_str_to_vec<int>(process_parenthesis(str));
_img_type = config["DEPLOY"]["IMAGE_TYPE"].as<std::string>();
// 5. get class number // 2. get mean
_class_num = config["DEPLOY"]["NUM_CLASSES"].as<int>(); for (const auto& item : config["DEPLOY"]["MEAN"]) {
// 7. set model path _mean.push_back(item.as<float>());
_model_path = config["DEPLOY"]["MODEL_PATH"].as<std::string>(); }
// 8. get model file_name
_model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as<std::string>(); // 3. get std
// 9. get model param file name for (const auto& item : config["DEPLOY"]["STD"]) {
_param_file_name = config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>(); _std.push_back(item.as<float>());
// 10. get pre_processor }
_pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as<std::string>();
// 11. use_gpu // 4. get image type
_use_gpu = config["DEPLOY"]["USE_GPU"].as<int>(); _img_type = config["DEPLOY"]["IMAGE_TYPE"].as<std::string>();
// 12. predictor_mode // 5. get class number
_predictor_mode = config["DEPLOY"]["PREDICTOR_MODE"].as<std::string>(); _class_num = config["DEPLOY"]["NUM_CLASSES"].as<int>();
// 13. batch_size // 7. set model path
_batch_size = config["DEPLOY"]["BATCH_SIZE"].as<int>(); _model_path = config["DEPLOY"]["MODEL_PATH"].as<std::string>();
// 14. channels // 8. get model file_name
_channels = config["DEPLOY"]["CHANNELS"].as<int>(); _model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as<std::string>();
return true; // 9. get model param file name
} _param_file_name =
config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>();
void debug() const { // 10. get pre_processor
_pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as<std::string>();
std::cout << "EVAL_CROP_SIZE: (" << _resize[0] << ", " << _resize[1] << ")" << std::endl; // 11. use_gpu
_use_gpu = config["DEPLOY"]["USE_GPU"].as<int>();
std::cout << "MEAN: ["; // 12. predictor_mode
for (int i = 0; i < _mean.size(); ++i) { _predictor_mode = config["DEPLOY"]["PREDICTOR_MODE"].as<std::string>();
if (i != _mean.size() - 1) { // 13. batch_size
std::cout << _mean[i] << ", "; _batch_size = config["DEPLOY"]["BATCH_SIZE"].as<int>();
} else { // 14. channels
std::cout << _mean[i]; _channels = config["DEPLOY"]["CHANNELS"].as<int>();
} return true;
} }
std::cout << "]" << std::endl;
void debug() const {
std::cout << "STD: ["; std::cout << "EVAL_CROP_SIZE: ("
for (int i = 0; i < _std.size(); ++i) { << _resize[0] << ", " << _resize[1]
if (i != _std.size() - 1) { << ")" << std::endl;
std::cout << _std[i] << ", "; std::cout << "MEAN: [";
} for (int i = 0; i < _mean.size(); ++i) {
else { if (i != _mean.size() - 1) {
std::cout << _std[i]; std::cout << _mean[i] << ", ";
} } else {
} std::cout << _mean[i];
std::cout << "]" << std::endl; }
}
std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl; std::cout << "]" << std::endl;
std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl;
std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl; std::cout << "STD: [";
std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl; for (int i = 0; i < _std.size(); ++i) {
std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name << std::endl; if (i != _std.size() - 1) {
std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name << std::endl; std::cout << _std[i] << ", ";
std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl; } else {
std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl; std::cout << _std[i];
std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl; }
std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl; }
} std::cout << "]" << std::endl;
// DEPLOY.EVAL_CROP_SIZE std::cout << "DEPLOY.IMAGE_TYPE: " << _img_type << std::endl;
std::vector<int> _resize; std::cout << "DEPLOY.NUM_CLASSES: " << _class_num << std::endl;
// DEPLOY.MEAN std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl;
std::vector<float> _mean; std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl;
// DEPLOY.STD std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name << std::endl;
std::vector<float> _std; std::cout << "DEPLOY.PARAMS_FILENAME: "
// DEPLOY.IMAGE_TYPE << _param_file_name << std::endl;
std::string _img_type; std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl;
// DEPLOY.NUM_CLASSES std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl;
int _class_num; std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl;
// DEPLOY.CHANNELS std::cout << "DEPLOY.BATCH_SIZE: " << _batch_size << std::endl;
int _channels; }
// DEPLOY.MODEL_PATH
std::string _model_path; // DEPLOY.EVAL_CROP_SIZE
// DEPLOY.MODEL_FILENAME std::vector<int> _resize;
std::string _model_file_name; // DEPLOY.MEAN
// DEPLOY.PARAMS_FILENAME std::vector<float> _mean;
std::string _param_file_name; // DEPLOY.STD
// DEPLOY.PRE_PROCESSOR std::vector<float> _std;
std::string _pre_processor; // DEPLOY.IMAGE_TYPE
// DEPLOY.USE_GPU std::string _img_type;
int _use_gpu; // DEPLOY.NUM_CLASSES
// DEPLOY.PREDICTOR_MODE int _class_num;
std::string _predictor_mode; // DEPLOY.CHANNELS
// DEPLOY.BATCH_SIZE int _channels;
int _batch_size; // DEPLOY.MODEL_PATH
}; std::string _model_path;
// DEPLOY.MODEL_FILENAME
} std::string _model_file_name;
// DEPLOY.PARAMS_FILENAME
std::string _param_file_name;
// DEPLOY.PRE_PROCESSOR
std::string _pre_processor;
// DEPLOY.USE_GPU
int _use_gpu;
// DEPLOY.PREDICTOR_MODE
std::string _predictor_mode;
// DEPLOY.BATCH_SIZE
int _batch_size;
};
} // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once #pragma once
#include <iostream> #include <iostream>
...@@ -16,105 +30,110 @@ ...@@ -16,105 +30,110 @@
#endif #endif
namespace PaddleSolution { namespace PaddleSolution {
namespace utils { namespace utils {
inline std::string path_join(const std::string& dir, const std::string& path) { inline std::string path_join(const std::string& dir,
std::string seperator = "/"; const std::string& path) {
#ifdef _WIN32 std::string seperator = "/";
seperator = "\\"; #ifdef _WIN32
#endif seperator = "\\";
return dir + seperator + path; #endif
return dir + seperator + path;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
} }
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts)
{
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
}
while ((entry = readdir(dir)) != NULL) { while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name; std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.'); auto ext = strrchr(entry->d_name, '.');
if (!ext || std::string(ext) == "." || std::string(ext) == "..") { if (!ext || std::string(ext) == "." || std::string(ext) == "..") {
continue; continue;
} }
if (exts.find(ext) != std::string::npos) { if (exts.find(ext) != std::string::npos) {
imgs.push_back(path_join(path, entry->d_name)); imgs.push_back(path_join(path, entry->d_name));
}
} }
return imgs;
} }
#else return imgs;
// scan a directory and get all files with input extensions }
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts) #else
{ // scan a directory and get all files with input extensions
std::vector<std::string> imgs; inline std::vector<std::string> get_directory_images(
for (const auto& item : std::experimental::filesystem::directory_iterator(path)) { const std::string& path, const std::string& exts) {
auto suffix = item.path().extension().string(); std::vector<std::string> imgs;
if (exts.find(suffix) != std::string::npos && suffix.size() > 0) { for (const auto& item :
auto fullname = path_join(path, item.path().filename().string()); std::experimental::filesystem::directory_iterator(path)) {
imgs.push_back(item.path().string()); auto suffix = item.path().extension().string();
} if (exts.find(suffix) != std::string::npos && suffix.size() > 0) {
auto fullname = path_join(path,
item.path().filename().string());
imgs.push_back(item.path().string());
} }
return imgs;
} }
#endif return imgs;
}
#endif
// normalize and HWC_BGR -> CHW_RGB // normalize and HWC_BGR -> CHW_RGB
inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean, std::vector<float>& fstd) { inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean,
int rh = im.rows; std::vector<float>& fstd) {
int rw = im.cols; int rh = im.rows;
int rc = im.channels(); int rw = im.cols;
double normf = (double)1.0 / 255.0; int rc = im.channels();
#pragma omp parallel for double normf = static_cast<double>(1.0) / 255.0;
for (int h = 0; h < rh; ++h) { #pragma omp parallel for
const uchar* ptr = im.ptr<uchar>(h); for (int h = 0; h < rh; ++h) {
int im_index = 0; const uchar* ptr = im.ptr<uchar>(h);
for (int w = 0; w < rw; ++w) { int im_index = 0;
for (int c = 0; c < rc; ++c) { for (int w = 0; w < rw; ++w) {
int top_index = (c * rh + h) * rw + w; for (int c = 0; c < rc; ++c) {
float pixel = static_cast<float>(ptr[im_index++]); int top_index = (c * rh + h) * rw + w;
pixel = (pixel * normf - fmean[c]) / fstd[c]; float pixel = static_cast<float>(ptr[im_index++]);
data[top_index] = pixel; pixel = (pixel * normf - fmean[c]) / fstd[c];
} data[top_index] = pixel;
} }
} }
} }
}
// argmax // argmax
inline void argmax(float* out, std::vector<int>& shape, std::vector<uchar>& mask, std::vector<uchar>& scoremap) { inline void argmax(float* out, std::vector<int>& shape,
int out_img_len = shape[1] * shape[2]; std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
int blob_out_len = out_img_len * shape[0]; int out_img_len = shape[1] * shape[2];
/* int blob_out_len = out_img_len * shape[0];
Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]); /*
Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0); Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]);
*/ Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0);
float max_value = -1; */
int label = 0; float max_value = -1;
#pragma omp parallel private(label) int label = 0;
for (int i = 0; i < out_img_len; ++i) { #pragma omp parallel private(label)
max_value = -1; for (int i = 0; i < out_img_len; ++i) {
label = 0; max_value = -1;
#pragma omp for reduction(max : max_value) label = 0;
for (int j = 0; j < shape[0]; ++j) { #pragma omp for reduction(max : max_value)
int index = i + j * out_img_len; for (int j = 0; j < shape[0]; ++j) {
if (index >= blob_out_len) { int index = i + j * out_img_len;
continue; if (index >= blob_out_len) {
} continue;
float value = out[index]; }
if (value > max_value) { float value = out[index];
max_value = value; if (value > max_value) {
label = j; max_value = value;
} label = j;
} }
if (label == 0) max_value = 0;
mask[i] = uchar(label);
scoremap[i] = uchar(max_value * 255);
} }
if (label == 0) max_value = 0;
mask[i] = uchar(label);
scoremap[i] = uchar(max_value * 255);
} }
} }
} } // namespace utils
} // namespace PaddleSolution
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册