提交 0fc9b582 编写于 作者: J joey12300

using 'cpplint --filter=-build/include_subdir,-build/c++11' to check cpp code...

using 'cpplint --filter=-build/include_subdir,-build/c++11' to check cpp code style of all *.h, *.cpp
上级 5ac1f499
......@@ -23,7 +23,8 @@ int main(int argc, char** argv) {
// 0. parse args
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) {
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model --input_dir=/directory/of/your/input/images";
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model "
<< "--input_dir=/directory/of/your/input/images";
return -1;
}
// 1. create a predictor and init it with conf
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <memory>
#include <string>
#include <vector>
#include <thread>
#include <chrono>
#include <algorithm>
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <opencv2/opencv.hpp>
#include <paddle_inference_api.h>
#include <utils/seg_conf_parser.h>
#include <utils/utils.h>
#include <preprocessor/preprocessor.h>
#include <paddle_inference_api.h>
#include <opencv2/opencv.hpp>
#include "utils/seg_conf_parser.h"
#include "utils/utils.h"
#include "preprocessor/preprocessor.h"
namespace PaddleSolution {
class Predictor {
public:
// init a predictor with a yaml config file
int init(const std::string& conf);
// predict api
int predict(const std::vector<std::string>& imgs);
private:
int output_mask(
const std::string& fname,
float* p_out,
int length,
int* height = NULL,
int* width = NULL);
int native_predict(const std::vector<std::string>& imgs);
int analysis_predict(const std::vector<std::string>& imgs);
private:
std::vector<float> _buffer;
std::vector<int> _org_width;
std::vector<int> _org_height;
std::vector<std::string> _imgs_batch;
std::vector<paddle::PaddleTensor> _outputs;
class Predictor {
public:
// init a predictor with a yaml config file
int init(const std::string& conf);
// predict api
int predict(const std::vector<std::string>& imgs);
private:
int output_mask(const std::string& fname, float* p_out, int length,
int* height = NULL, int* width = NULL);
int native_predict(const std::vector<std::string>& imgs);
int analysis_predict(const std::vector<std::string>& imgs);
private:
std::vector<float> _buffer;
std::vector<int> _org_width;
std::vector<int> _org_height;
std::vector<std::string> _imgs_batch;
std::vector<paddle::PaddleTensor> _outputs;
std::vector<uchar> _mask;
std::vector<uchar> _scoremap;
std::vector<uchar> _mask;
std::vector<uchar> _scoremap;
PaddleSolution::PaddleSegModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
};
}
PaddleSolution::PaddleSegModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
};
} // namespace PaddleSolution
......@@ -21,9 +21,10 @@
namespace PaddleSolution {
std::shared_ptr<ImagePreProcessor> create_processor(const std::string& conf_file) {
auto config = std::make_shared<PaddleSolution::PaddleSegModelConfigPaser>();
std::shared_ptr<ImagePreProcessor> create_processor(
const std::string& conf_file) {
auto config = std::make_shared<PaddleSolution::
PaddleSegModelConfigPaser>();
if (!config->load_config(conf_file)) {
LOG(FATAL) << "fail to laod conf file [" << conf_file << "]";
return nullptr;
......@@ -37,9 +38,9 @@ namespace PaddleSolution {
return p;
}
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "]";
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor
<< "]";
return nullptr;
}
}
} // namespace PaddleSolution
......@@ -26,18 +26,19 @@
namespace PaddleSolution {
class ImagePreProcessor {
protected:
ImagePreProcessor() {};
public:
protected:
ImagePreProcessor() {}
public:
virtual ~ImagePreProcessor() {}
virtual bool single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) = 0;
virtual bool single_process(const std::string& fname, float* data,
int* ori_w, int* ori_h) = 0;
virtual bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) = 0;
virtual bool batch_process(const std::vector<std::string>& imgs,
float* data, int* ori_w, int* ori_h) = 0;
}; // end of class ImagePreProcessor
}; // end of class ImagePreProcessor
std::shared_ptr<ImagePreProcessor> create_processor(
const std::string &config_file);
std::shared_ptr<ImagePreProcessor> create_processor(const std::string &config_file);
} // end of namespace paddle_solution
} // namespace PaddleSolution
......@@ -12,21 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thread>
#include "preprocessor_seg.h"
#include <glog/logging.h>
#include "preprocessor_seg.h"
#include <thread>
namespace PaddleSolution {
bool SegPreProcessor::single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) {
bool SegPreProcessor::single_process(const std::string& fname,
float* data, int* ori_w, int* ori_h) {
cv::Mat im = cv::imread(fname, -1);
if (im.data == nullptr || im.empty()) {
LOG(ERROR) << "Failed to open image: " << fname;
return false;
}
int channels = im.channels();
*ori_w = im.cols;
*ori_h = im.rows;
......@@ -50,7 +51,8 @@ namespace PaddleSolution {
return true;
}
bool SegPreProcessor::batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) {
bool SegPreProcessor::batch_process(const std::vector<std::string>& imgs,
float* data, int* ori_w, int* ori_h) {
auto ic = _config->_channels;
auto iw = _config->_resize[0];
auto ih = _config->_resize[1];
......@@ -72,9 +74,9 @@ namespace PaddleSolution {
return true;
}
bool SegPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config) {
bool SegPreProcessor::init(
std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config) {
_config = config;
return true;
}
}
} // namespace PaddleSolution
......@@ -14,25 +14,27 @@
#pragma once
#include <string>
#include <vector>
#include <memory>
#include "preprocessor.h"
#include "utils/utils.h"
namespace PaddleSolution {
class SegPreProcessor : public ImagePreProcessor {
public:
SegPreProcessor() : _config(nullptr) {}
public:
SegPreProcessor() : _config(nullptr){
};
bool init(
std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config);
bool init(std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config);
bool single_process(const std::string &fname, float* data,
int* ori_w, int* ori_h);
bool single_process(const std::string &fname, float* data, int* ori_w, int* ori_h);
bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h);
private:
bool batch_process(const std::vector<std::string>& imgs, float* data,
int* ori_w, int* ori_h);
private:
std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> _config;
};
}
} // namespace PaddleSolution
......@@ -12,30 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import sys
# ColorMap for visualization more clearly
color_map = [[128, 64, 128], [244, 35, 231], [69, 69, 69], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 29], [219, 219, 0],
[106, 142, 35], [152, 250, 152], [69, 129, 180], [219, 19, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 69], [0, 60, 100], [0, 79, 100],
import cv2
import sys
# ColorMap for visualization more clearly
color_map = [[128, 64, 128], [244, 35, 231], [69, 69, 69], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 29], [219, 219, 0],
[106, 142, 35], [152, 250, 152], [69, 129, 180], [219, 19, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 69], [0, 60, 100], [0, 79, 100],
[0, 0, 230], [119, 10, 32]]
# python visualize.py demo1.jpg demo1_jpg.png vis_result.png
if __name__ == "__main__":
if len(sys.argv) != 4:
print(
"Usage: python visualize.py demo1.jpg demo1_jpg.png vis_result.png")
else:
ori_im = cv2.imread(sys.argv[1])
ori_shape = ori_im.shape
print(ori_shape)
im = cv2.imread(sys.argv[2])
shape = im.shape
print("visualizing...")
for i in range(0, shape[0]):
for j in range(0, shape[1]):
im[i, j] = color_map[im[i, j, 0]]
im = cv2.resize(im, (ori_shape[1], ori_shape[0]))
cv2.imwrite(sys.argv[3], im)
print("visualizing done!")
# python visualize.py demo1.jpg demo1_jpg.png vis_result.png
if __name__ == "__main__":
if len(sys.argv) != 4:
print(
"Usage: python visualize.py demo1.jpg demo1_jpg.png vis_result.png")
else:
ori_im = cv2.imread(sys.argv[1])
ori_shape = ori_im.shape
print(ori_shape)
im = cv2.imread(sys.argv[2])
shape = im.shape
print("visualizing...")
for i in range(0, shape[0]):
for j in range(0, shape[1]):
im[i, j] = color_map[im[i, j, 0]]
im = cv2.resize(im, (ori_shape[1], ori_shape[0]))
cv2.imwrite(sys.argv[3], im)
print("visualizing done!")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册