提交 0fc9b582 编写于 作者: J joey12300

using 'cpplint --filter=-build/include_subdir,-build/c++11' to check cpp code...

using 'cpplint --filter=-build/include_subdir,-build/c++11' to check cpp code style of all *.h, *.cpp
上级 5ac1f499
...@@ -23,7 +23,8 @@ int main(int argc, char** argv) { ...@@ -23,7 +23,8 @@ int main(int argc, char** argv) {
// 0. parse args // 0. parse args
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) { if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) {
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model --input_dir=/directory/of/your/input/images"; std::cout << "Usage: ./predictor --conf=/config/path/to/your/model "
<< "--input_dir=/directory/of/your/input/images";
return -1; return -1;
} }
// 1. create a predictor and init it with conf // 1. create a predictor and init it with conf
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at // You may obtain a copy of the License at
// //
// http://www.apache.org/licenses/LICENSE-2.0 // http://www.apache.org/licenses/LICENSE-2.0
// //
// Unless required by applicable law or agreed to in writing, software // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, // distributed under the License is distributed on an "AS IS" BASIS,
...@@ -16,274 +16,302 @@ ...@@ -16,274 +16,302 @@
#include <unsupported/Eigen/CXX11/Tensor> #include <unsupported/Eigen/CXX11/Tensor>
#undef min #undef min
namespace PaddleSolution { namespace PaddleSolution {
using std::chrono::duration_cast;
int Predictor::init(const std::string& conf) {
if (!_model_config.load_config(conf)) {
LOG(FATAL) << "Fail to load config file: [" << conf << "]";
return -1;
}
_preprocessor = PaddleSolution::create_processor(conf);
if (_preprocessor == nullptr) {
LOG(FATAL) << "Failed to create_processor";
return -1;
}
int Predictor::init(const std::string& conf) { int res_size = _model_config._resize[0] * _model_config._resize[1];
if (!_model_config.load_config(conf)) { _mask.resize(res_size);
LOG(FATAL) << "Fail to load config file: [" << conf << "]"; _scoremap.resize(res_size);
return -1;
}
_preprocessor = PaddleSolution::create_processor(conf);
if (_preprocessor == nullptr) {
LOG(FATAL) << "Failed to create_processor";
return -1;
}
_mask.resize(_model_config._resize[0] * _model_config._resize[1]);
_scoremap.resize(_model_config._resize[0] * _model_config._resize[1]);
bool use_gpu = _model_config._use_gpu; bool use_gpu = _model_config._use_gpu;
const auto& model_dir = _model_config._model_path; const auto& model_dir = _model_config._model_path;
const auto& model_filename = _model_config._model_file_name; const auto& model_filename = _model_config._model_file_name;
const auto& params_filename = _model_config._param_file_name; const auto& params_filename = _model_config._param_file_name;
// load paddle model file // load paddle model file
if (_model_config._predictor_mode == "NATIVE") { if (_model_config._predictor_mode == "NATIVE") {
paddle::NativeConfig config; paddle::NativeConfig config;
auto prog_file = utils::path_join(model_dir, model_filename); auto prog_file = utils::path_join(model_dir, model_filename);
auto param_file = utils::path_join(model_dir, params_filename); auto param_file = utils::path_join(model_dir, params_filename);
config.prog_file = prog_file; config.prog_file = prog_file;
config.param_file = param_file; config.param_file = param_file;
config.fraction_of_gpu_memory = 0; config.fraction_of_gpu_memory = 0;
config.use_gpu = use_gpu; config.use_gpu = use_gpu;
config.device = 0; config.device = 0;
_main_predictor = paddle::CreatePaddlePredictor(config); _main_predictor = paddle::CreatePaddlePredictor(config);
} } else if (_model_config._predictor_mode == "ANALYSIS") {
else if (_model_config._predictor_mode == "ANALYSIS") { paddle::AnalysisConfig config;
paddle::AnalysisConfig config; if (use_gpu) {
if (use_gpu) { config.EnableUseGpu(100, 0);
config.EnableUseGpu(100, 0);
}
auto prog_file = utils::path_join(model_dir, model_filename);
auto param_file = utils::path_join(model_dir, params_filename);
config.SetModel(prog_file, param_file);
config.SwitchUseFeedFetchOps(false);
config.SwitchSpecifyInputNames(true);
config.EnableMemoryOptim();
_main_predictor = paddle::CreatePaddlePredictor(config);
}
else {
return -1;
} }
return 0; auto prog_file = utils::path_join(model_dir, model_filename);
auto param_file = utils::path_join(model_dir, params_filename);
config.SetModel(prog_file, param_file);
config.SwitchUseFeedFetchOps(false);
config.SwitchSpecifyInputNames(true);
config.EnableMemoryOptim();
_main_predictor = paddle::CreatePaddlePredictor(config);
} else {
return -1;
}
return 0;
}
int Predictor::predict(const std::vector<std::string>& imgs) {
if (_model_config._predictor_mode == "NATIVE") {
return native_predict(imgs);
} else if (_model_config._predictor_mode == "ANALYSIS") {
return analysis_predict(imgs);
} }
return -1;
}
int Predictor::predict(const std::vector<std::string>& imgs) { int Predictor::output_mask(const std::string& fname, float* p_out,
if (_model_config._predictor_mode == "NATIVE") { int length, int* height, int* width) {
return native_predict(imgs); int eval_width = _model_config._resize[0];
} int eval_height = _model_config._resize[1];
else if (_model_config._predictor_mode == "ANALYSIS") { int eval_num_class = _model_config._class_num;
return analysis_predict(imgs);
} int blob_out_len = length;
int seg_out_len = eval_height * eval_width * eval_num_class;
if (blob_out_len != seg_out_len) {
LOG(ERROR) << " [FATAL] unequal: input vs output [" <<
seg_out_len << "|" << blob_out_len << "]" << std::endl;
return -1; return -1;
} }
// post process
_mask.clear();
_scoremap.clear();
std::vector<int> out_shape{eval_num_class, eval_height, eval_width};
utils::argmax(p_out, out_shape, _mask, _scoremap);
cv::Mat mask_png = cv::Mat(eval_height, eval_width, CV_8UC1);
mask_png.data = _mask.data();
std::string nname(fname);
auto pos = fname.find(".");
nname[pos] = '_';
std::string mask_save_name = nname + ".png";
cv::imwrite(mask_save_name, mask_png);
cv::Mat scoremap_png = cv::Mat(eval_height, eval_width, CV_8UC1);
scoremap_png.data = _scoremap.data();
std::string scoremap_save_name = nname
+ std::string("_scoremap.png");
cv::imwrite(scoremap_save_name, scoremap_png);
std::cout << "save mask of [" << fname << "] done" << std::endl;
int Predictor::output_mask(const std::string& fname, float* p_out, int length, int* height, int* width) { if (height && width) {
int eval_width = _model_config._resize[0]; int recover_height = *height;
int eval_height = _model_config._resize[1]; int recover_width = *width;
int eval_num_class = _model_config._class_num; cv::Mat recover_png = cv::Mat(recover_height,
recover_width, CV_8UC1);
cv::resize(scoremap_png, recover_png,
cv::Size(recover_width, recover_height),
0, 0, cv::INTER_CUBIC);
std::string recover_name = nname + std::string("_recover.png");
cv::imwrite(recover_name, recover_png);
}
return 0;
}
int blob_out_len = length; int Predictor::native_predict(const std::vector<std::string>& imgs) {
int seg_out_len = eval_height * eval_width * eval_num_class; if (imgs.size() == 0) {
LOG(ERROR) << "No image found";
return -1;
}
int config_batch_size = _model_config._batch_size;
if (blob_out_len != seg_out_len) { int channels = _model_config._channels;
LOG(ERROR) << " [FATAL] unequal: input vs output [" << int eval_width = _model_config._resize[0];
seg_out_len << "|" << blob_out_len << "]" << std::endl; int eval_height = _model_config._resize[1];
return -1; std::size_t total_size = imgs.size();
} int default_batch_size = std::min(config_batch_size,
static_cast<int>(total_size));
int batch = total_size / default_batch_size
+ ((total_size % default_batch_size) != 0);
int batch_buffer_size = default_batch_size * channels
* eval_width * eval_height;
//post process auto& input_buffer = _buffer;
_mask.clear(); auto& org_width = _org_width;
_scoremap.clear(); auto& org_height = _org_height;
std::vector<int> out_shape{eval_num_class, eval_height, eval_width}; auto& imgs_batch = _imgs_batch;
utils::argmax(p_out, out_shape, _mask, _scoremap);
cv::Mat mask_png = cv::Mat(eval_height, eval_width, CV_8UC1);
mask_png.data = _mask.data();
std::string nname(fname);
auto pos = fname.find(".");
nname[pos] = '_';
std::string mask_save_name = nname + ".png";
cv::imwrite(mask_save_name, mask_png);
cv::Mat scoremap_png = cv::Mat(eval_height, eval_width, CV_8UC1);
scoremap_png.data = _scoremap.data();
std::string scoremap_save_name = nname + std::string("_scoremap.png");
cv::imwrite(scoremap_save_name, scoremap_png);
std::cout << "save mask of [" << fname << "] done" << std::endl;
if (height && width) { input_buffer.resize(batch_buffer_size);
int recover_height = *height; org_width.resize(default_batch_size);
int recover_width = *width; org_height.resize(default_batch_size);
cv::Mat recover_png = cv::Mat(recover_height, recover_width, CV_8UC1); for (int u = 0; u < batch; ++u) {
cv::resize(scoremap_png, recover_png, cv::Size(recover_width, recover_height), int batch_size = default_batch_size;
0, 0, cv::INTER_CUBIC); if (u == (batch - 1) && (total_size % default_batch_size)) {
std::string recover_name = nname + std::string("_recover.png"); batch_size = total_size % default_batch_size;
cv::imwrite(recover_name, recover_png);
} }
return 0;
}
int Predictor::native_predict(const std::vector<std::string>& imgs) int real_buffer_size = batch_size * channels
{ * eval_width * eval_height;
if (imgs.size() == 0) { std::vector<paddle::PaddleTensor> feeds;
LOG(ERROR) << "No image found"; input_buffer.resize(real_buffer_size);
org_height.resize(batch_size);
org_width.resize(batch_size);
for (int i = 0; i < batch_size; ++i) {
org_width[i] = org_height[i] = 0;
}
imgs_batch.clear();
for (int i = 0; i < batch_size; ++i) {
int idx = u * default_batch_size + i;
imgs_batch.push_back(imgs[idx]);
}
if (!_preprocessor->batch_process(imgs_batch,
input_buffer.data(),
org_width.data(),
org_height.data())) {
return -1; return -1;
} }
int config_batch_size = _model_config._batch_size; paddle::PaddleTensor im_tensor;
im_tensor.name = "image";
int channels = _model_config._channels; im_tensor.shape = std::vector<int>{ batch_size, channels,
int eval_width = _model_config._resize[0]; eval_height, eval_width };
int eval_height = _model_config._resize[1]; im_tensor.data.Reset(input_buffer.data(),
std::size_t total_size = imgs.size(); real_buffer_size * sizeof(float));
int default_batch_size = std::min(config_batch_size, (int)total_size); im_tensor.dtype = paddle::PaddleDType::FLOAT32;
int batch = total_size / default_batch_size + ((total_size % default_batch_size) != 0); feeds.push_back(im_tensor);
int batch_buffer_size = default_batch_size * channels * eval_width * eval_height; _outputs.clear();
auto t1 = std::chrono::high_resolution_clock::now();
auto& input_buffer = _buffer; if (!_main_predictor->Run(feeds, &_outputs, batch_size)) {
auto& org_width = _org_width; LOG(ERROR) <<
auto& org_height = _org_height; "Failed: NativePredictor->Run() return false at batch: "
auto& imgs_batch = _imgs_batch; << u;
continue;
input_buffer.resize(batch_buffer_size); }
org_width.resize(default_batch_size); auto t2 = std::chrono::high_resolution_clock::now();
org_height.resize(default_batch_size); auto duration = duration_cast<std::chrono::microseconds>
for (int u = 0; u < batch; ++u) { (t2 - t1).count();
int batch_size = default_batch_size; std::cout << "runtime = " << duration << std::endl;
if (u == (batch - 1) && (total_size % default_batch_size)) { int out_num = 1;
batch_size = total_size % default_batch_size; // print shape of first output tensor for debugging
} std::cout << "size of outputs[" << 0 << "]: (";
for (int j = 0; j < _outputs[0].shape.size(); ++j) {
int real_buffer_size = batch_size * channels * eval_width * eval_height; out_num *= _outputs[0].shape[j];
std::vector<paddle::PaddleTensor> feeds; std::cout << _outputs[0].shape[j] << ",";
input_buffer.resize(real_buffer_size); }
org_height.resize(batch_size); std::cout << ")" << std::endl;
org_width.resize(batch_size); const size_t nums = _outputs.front().data.length()
for (int i = 0; i < batch_size; ++i) { / sizeof(float);
org_width[i] = org_height[i] = 0; if (out_num % batch_size != 0 || out_num != nums) {
} LOG(ERROR) << "outputs data size mismatch with shape size.";
imgs_batch.clear(); return -1;
for (int i = 0; i < batch_size; ++i) {
int idx = u * default_batch_size + i;
imgs_batch.push_back(imgs[idx]);
}
if (!_preprocessor->batch_process(imgs_batch, input_buffer.data(), org_width.data(), org_height.data())) {
return -1;
}
paddle::PaddleTensor im_tensor;
im_tensor.name = "image";
im_tensor.shape = std::vector<int>({ batch_size, channels, eval_height, eval_width });
im_tensor.data.Reset(input_buffer.data(), real_buffer_size * sizeof(float));
im_tensor.dtype = paddle::PaddleDType::FLOAT32;
feeds.push_back(im_tensor);
_outputs.clear();
auto t1 = std::chrono::high_resolution_clock::now();
if (!_main_predictor->Run(feeds, &_outputs, batch_size)) {
LOG(ERROR) << "Failed: NativePredictor->Run() return false at batch: " << u;
continue;
}
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
std::cout << "runtime = " << duration << std::endl;
int out_num = 1;
// print shape of first output tensor for debugging
std::cout << "size of outputs[" << 0 << "]: (";
for (int j = 0; j < _outputs[0].shape.size(); ++j) {
out_num *= _outputs[0].shape[j];
std::cout << _outputs[0].shape[j] << ",";
}
std::cout << ")" << std::endl;
const size_t nums = _outputs.front().data.length() / sizeof(float);
if (out_num % batch_size != 0 || out_num != nums) {
LOG(ERROR) << "outputs data size mismatch with shape size.";
return -1;
}
for (int i = 0; i < batch_size; ++i) {
float* output_addr = (float*)(_outputs[0].data.data()) + i * (out_num / batch_size);
output_mask(imgs_batch[i], output_addr, out_num / batch_size, &org_height[i], &org_width[i]);
}
} }
return 0; for (int i = 0; i < batch_size; ++i) {
float* output_addr = reinterpret_cast<float*>(
_outputs[0].data.data())
+ i * (out_num / batch_size);
output_mask(imgs_batch[i], output_addr,
out_num / batch_size,
&org_height[i],
&org_width[i]);
}
} }
int Predictor::analysis_predict(const std::vector<std::string>& imgs) { return 0;
}
if (imgs.size() == 0) { int Predictor::analysis_predict(const std::vector<std::string>& imgs) {
LOG(ERROR) << "No image found"; if (imgs.size() == 0) {
return -1; LOG(ERROR) << "No image found";
} return -1;
}
int config_batch_size = _model_config._batch_size; int config_batch_size = _model_config._batch_size;
int channels = _model_config._channels; int channels = _model_config._channels;
int eval_width = _model_config._resize[0]; int eval_width = _model_config._resize[0];
int eval_height = _model_config._resize[1]; int eval_height = _model_config._resize[1];
auto total_size = imgs.size(); auto total_size = imgs.size();
int default_batch_size = std::min(config_batch_size, (int)total_size); int default_batch_size = std::min(config_batch_size,
int batch = total_size / default_batch_size + ((total_size % default_batch_size) != 0); static_cast<int>(total_size));
int batch_buffer_size = default_batch_size * channels * eval_width * eval_height; int batch = total_size / default_batch_size
+ ((total_size % default_batch_size) != 0);
int batch_buffer_size = default_batch_size * channels
* eval_width * eval_height;
auto& input_buffer = _buffer; auto& input_buffer = _buffer;
auto& org_width = _org_width; auto& org_width = _org_width;
auto& org_height = _org_height; auto& org_height = _org_height;
auto& imgs_batch = _imgs_batch; auto& imgs_batch = _imgs_batch;
input_buffer.resize(batch_buffer_size); input_buffer.resize(batch_buffer_size);
org_width.resize(default_batch_size); org_width.resize(default_batch_size);
org_height.resize(default_batch_size); org_height.resize(default_batch_size);
for (int u = 0; u < batch; ++u) { for (int u = 0; u < batch; ++u) {
int batch_size = default_batch_size; int batch_size = default_batch_size;
if (u == (batch - 1) && (total_size % default_batch_size)) { if (u == (batch - 1) && (total_size % default_batch_size)) {
batch_size = total_size % default_batch_size; batch_size = total_size % default_batch_size;
} }
int real_buffer_size = batch_size * channels * eval_width * eval_height; int real_buffer_size = batch_size * channels
std::vector<paddle::PaddleTensor> feeds; * eval_width * eval_height;
input_buffer.resize(real_buffer_size); std::vector<paddle::PaddleTensor> feeds;
org_height.resize(batch_size); input_buffer.resize(real_buffer_size);
org_width.resize(batch_size); org_height.resize(batch_size);
for (int i = 0; i < batch_size; ++i) { org_width.resize(batch_size);
org_width[i] = org_height[i] = 0; for (int i = 0; i < batch_size; ++i) {
} org_width[i] = org_height[i] = 0;
imgs_batch.clear(); }
for (int i = 0; i < batch_size; ++i) { imgs_batch.clear();
int idx = u * default_batch_size + i; for (int i = 0; i < batch_size; ++i) {
imgs_batch.push_back(imgs[idx]); int idx = u * default_batch_size + i;
} imgs_batch.push_back(imgs[idx]);
}
if (!_preprocessor->batch_process(imgs_batch, input_buffer.data(), org_width.data(), org_height.data())) { if (!_preprocessor->batch_process(imgs_batch,
return -1; input_buffer.data(),
} org_width.data(),
auto im_tensor = _main_predictor->GetInputTensor("image"); org_height.data())) {
im_tensor->Reshape({ batch_size, channels, eval_height, eval_width }); return -1;
im_tensor->copy_from_cpu(input_buffer.data()); }
auto im_tensor = _main_predictor->GetInputTensor("image");
im_tensor->Reshape({ batch_size, channels,
eval_height, eval_width });
im_tensor->copy_from_cpu(input_buffer.data());
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
_main_predictor->ZeroCopyRun(); _main_predictor->ZeroCopyRun();
auto t2 = std::chrono::high_resolution_clock::now(); auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count(); auto duration = duration_cast<std::chrono::microseconds>
std::cout << "runtime = " << duration << std::endl; (t2 - t1).count();
std::cout << "runtime = " << duration << std::endl;
auto output_names = _main_predictor->GetOutputNames(); auto output_names = _main_predictor->GetOutputNames();
auto output_t = _main_predictor->GetOutputTensor(output_names[0]); auto output_t = _main_predictor->GetOutputTensor(
std::vector<float> out_data; output_names[0]);
std::vector<int> output_shape = output_t->shape(); std::vector<float> out_data;
std::vector<int> output_shape = output_t->shape();
int out_num = 1; int out_num = 1;
std::cout << "size of outputs[" << 0 << "]: ("; std::cout << "size of outputs[" << 0 << "]: (";
for (int j = 0; j < output_shape.size(); ++j) { for (int j = 0; j < output_shape.size(); ++j) {
out_num *= output_shape[j]; out_num *= output_shape[j];
std::cout << output_shape[j] << ","; std::cout << output_shape[j] << ",";
} }
std::cout << ")" << std::endl; std::cout << ")" << std::endl;
out_data.resize(out_num); out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data()); output_t->copy_to_cpu(out_data.data());
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
float* out_addr = out_data.data() + (out_num / batch_size) * i; float* out_addr = out_data.data()
output_mask(imgs_batch[i], out_addr, out_num / batch_size, &org_height[i], &org_width[i]); + (out_num / batch_size) * i;
} output_mask(imgs_batch[i], out_addr, out_num / batch_size,
&org_height[i], &org_width[i]);
} }
return 0;
} }
} return 0;
}
} // namespace PaddleSolution
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once #pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <thread> #include <thread>
#include <chrono> #include <chrono>
#include <algorithm> #include <algorithm>
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <opencv2/opencv.hpp>
#include <paddle_inference_api.h>
#include <utils/seg_conf_parser.h> #include <paddle_inference_api.h>
#include <utils/utils.h> #include <opencv2/opencv.hpp>
#include <preprocessor/preprocessor.h> #include "utils/seg_conf_parser.h"
#include "utils/utils.h"
#include "preprocessor/preprocessor.h"
namespace PaddleSolution { namespace PaddleSolution {
class Predictor { class Predictor {
public: public:
// init a predictor with a yaml config file // init a predictor with a yaml config file
int init(const std::string& conf); int init(const std::string& conf);
// predict api // predict api
int predict(const std::vector<std::string>& imgs); int predict(const std::vector<std::string>& imgs);
private:
private: int output_mask(const std::string& fname, float* p_out, int length,
int output_mask( int* height = NULL, int* width = NULL);
const std::string& fname, int native_predict(const std::vector<std::string>& imgs);
float* p_out, int analysis_predict(const std::vector<std::string>& imgs);
int length, private:
int* height = NULL, std::vector<float> _buffer;
int* width = NULL); std::vector<int> _org_width;
int native_predict(const std::vector<std::string>& imgs); std::vector<int> _org_height;
int analysis_predict(const std::vector<std::string>& imgs); std::vector<std::string> _imgs_batch;
private: std::vector<paddle::PaddleTensor> _outputs;
std::vector<float> _buffer;
std::vector<int> _org_width;
std::vector<int> _org_height;
std::vector<std::string> _imgs_batch;
std::vector<paddle::PaddleTensor> _outputs;
std::vector<uchar> _mask; std::vector<uchar> _mask;
std::vector<uchar> _scoremap; std::vector<uchar> _scoremap;
PaddleSolution::PaddleSegModelConfigPaser _model_config; PaddleSolution::PaddleSegModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor; std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor; std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
}; };
} } // namespace PaddleSolution
...@@ -21,9 +21,10 @@ ...@@ -21,9 +21,10 @@
namespace PaddleSolution { namespace PaddleSolution {
std::shared_ptr<ImagePreProcessor> create_processor(const std::string& conf_file) { std::shared_ptr<ImagePreProcessor> create_processor(
const std::string& conf_file) {
auto config = std::make_shared<PaddleSolution::PaddleSegModelConfigPaser>(); auto config = std::make_shared<PaddleSolution::
PaddleSegModelConfigPaser>();
if (!config->load_config(conf_file)) { if (!config->load_config(conf_file)) {
LOG(FATAL) << "fail to laod conf file [" << conf_file << "]"; LOG(FATAL) << "fail to laod conf file [" << conf_file << "]";
return nullptr; return nullptr;
...@@ -37,9 +38,9 @@ namespace PaddleSolution { ...@@ -37,9 +38,9 @@ namespace PaddleSolution {
return p; return p;
} }
LOG(FATAL) << "unknown processor_name [" << config->_pre_processor << "]"; LOG(FATAL) << "unknown processor_name [" << config->_pre_processor
<< "]";
return nullptr; return nullptr;
} }
} } // namespace PaddleSolution
...@@ -26,18 +26,19 @@ ...@@ -26,18 +26,19 @@
namespace PaddleSolution { namespace PaddleSolution {
class ImagePreProcessor { class ImagePreProcessor {
protected: protected:
ImagePreProcessor() {}; ImagePreProcessor() {}
public:
public:
virtual ~ImagePreProcessor() {} virtual ~ImagePreProcessor() {}
virtual bool single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) = 0; virtual bool single_process(const std::string& fname, float* data,
int* ori_w, int* ori_h) = 0;
virtual bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) = 0; virtual bool batch_process(const std::vector<std::string>& imgs,
float* data, int* ori_w, int* ori_h) = 0;
}; // end of class ImagePreProcessor
}; // end of class ImagePreProcessor std::shared_ptr<ImagePreProcessor> create_processor(
const std::string &config_file);
std::shared_ptr<ImagePreProcessor> create_processor(const std::string &config_file); } // namespace PaddleSolution
} // end of namespace paddle_solution
...@@ -12,21 +12,22 @@ ...@@ -12,21 +12,22 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <thread> #include "preprocessor_seg.h"
#include <glog/logging.h> #include <glog/logging.h>
#include "preprocessor_seg.h" #include <thread>
namespace PaddleSolution { namespace PaddleSolution {
bool SegPreProcessor::single_process(const std::string& fname, float* data, int* ori_w, int* ori_h) { bool SegPreProcessor::single_process(const std::string& fname,
float* data, int* ori_w, int* ori_h) {
cv::Mat im = cv::imread(fname, -1); cv::Mat im = cv::imread(fname, -1);
if (im.data == nullptr || im.empty()) { if (im.data == nullptr || im.empty()) {
LOG(ERROR) << "Failed to open image: " << fname; LOG(ERROR) << "Failed to open image: " << fname;
return false; return false;
} }
int channels = im.channels(); int channels = im.channels();
*ori_w = im.cols; *ori_w = im.cols;
*ori_h = im.rows; *ori_h = im.rows;
...@@ -50,7 +51,8 @@ namespace PaddleSolution { ...@@ -50,7 +51,8 @@ namespace PaddleSolution {
return true; return true;
} }
bool SegPreProcessor::batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h) { bool SegPreProcessor::batch_process(const std::vector<std::string>& imgs,
float* data, int* ori_w, int* ori_h) {
auto ic = _config->_channels; auto ic = _config->_channels;
auto iw = _config->_resize[0]; auto iw = _config->_resize[0];
auto ih = _config->_resize[1]; auto ih = _config->_resize[1];
...@@ -72,9 +74,9 @@ namespace PaddleSolution { ...@@ -72,9 +74,9 @@ namespace PaddleSolution {
return true; return true;
} }
bool SegPreProcessor::init(std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config) { bool SegPreProcessor::init(
std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config) {
_config = config; _config = config;
return true; return true;
} }
} // namespace PaddleSolution
}
...@@ -14,25 +14,27 @@ ...@@ -14,25 +14,27 @@
#pragma once #pragma once
#include <string>
#include <vector>
#include <memory>
#include "preprocessor.h" #include "preprocessor.h"
#include "utils/utils.h" #include "utils/utils.h"
namespace PaddleSolution { namespace PaddleSolution {
class SegPreProcessor : public ImagePreProcessor { class SegPreProcessor : public ImagePreProcessor {
public:
SegPreProcessor() : _config(nullptr) {}
public: bool init(
SegPreProcessor() : _config(nullptr){ std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config);
};
bool init(std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> config); bool single_process(const std::string &fname, float* data,
int* ori_w, int* ori_h);
bool single_process(const std::string &fname, float* data, int* ori_w, int* ori_h); bool batch_process(const std::vector<std::string>& imgs, float* data,
int* ori_w, int* ori_h);
bool batch_process(const std::vector<std::string>& imgs, float* data, int* ori_w, int* ori_h); private:
private:
std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> _config; std::shared_ptr<PaddleSolution::PaddleSegModelConfigPaser> _config;
}; };
} // namespace PaddleSolution
}
...@@ -12,30 +12,30 @@ ...@@ -12,30 +12,30 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import cv2 import cv2
import sys import sys
# ColorMap for visualization more clearly # ColorMap for visualization more clearly
color_map = [[128, 64, 128], [244, 35, 231], [69, 69, 69], [102, 102, 156], color_map = [[128, 64, 128], [244, 35, 231], [69, 69, 69], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 29], [219, 219, 0], [190, 153, 153], [153, 153, 153], [250, 170, 29], [219, 219, 0],
[106, 142, 35], [152, 250, 152], [69, 129, 180], [219, 19, 60], [106, 142, 35], [152, 250, 152], [69, 129, 180], [219, 19, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 69], [0, 60, 100], [0, 79, 100], [255, 0, 0], [0, 0, 142], [0, 0, 69], [0, 60, 100], [0, 79, 100],
[0, 0, 230], [119, 10, 32]] [0, 0, 230], [119, 10, 32]]
# python visualize.py demo1.jpg demo1_jpg.png vis_result.png # python visualize.py demo1.jpg demo1_jpg.png vis_result.png
if __name__ == "__main__": if __name__ == "__main__":
if len(sys.argv) != 4: if len(sys.argv) != 4:
print( print(
"Usage: python visualize.py demo1.jpg demo1_jpg.png vis_result.png") "Usage: python visualize.py demo1.jpg demo1_jpg.png vis_result.png")
else: else:
ori_im = cv2.imread(sys.argv[1]) ori_im = cv2.imread(sys.argv[1])
ori_shape = ori_im.shape ori_shape = ori_im.shape
print(ori_shape) print(ori_shape)
im = cv2.imread(sys.argv[2]) im = cv2.imread(sys.argv[2])
shape = im.shape shape = im.shape
print("visualizing...") print("visualizing...")
for i in range(0, shape[0]): for i in range(0, shape[0]):
for j in range(0, shape[1]): for j in range(0, shape[1]):
im[i, j] = color_map[im[i, j, 0]] im[i, j] = color_map[im[i, j, 0]]
im = cv2.resize(im, (ori_shape[1], ori_shape[0])) im = cv2.resize(im, (ori_shape[1], ori_shape[0]))
cv2.imwrite(sys.argv[3], im) cv2.imwrite(sys.argv[3], im)
print("visualizing done!") print("visualizing done!")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册