提交 12f5205c 编写于 作者: S sjtubinlong

c++ infer compitable to new export_model

上级 b17b6c9a
...@@ -142,7 +142,7 @@ if(WITH_MKL) ...@@ -142,7 +142,7 @@ if(WITH_MKL)
if (WIN32) if (WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else () else ()
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.1)
endif () endif ()
endif() endif()
else() else()
......
...@@ -24,7 +24,7 @@ int main(int argc, char** argv) { ...@@ -24,7 +24,7 @@ int main(int argc, char** argv) {
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) { if (FLAGS_conf.empty() || FLAGS_input_dir.empty()) {
std::cout << "Usage: ./predictor --conf=/config/path/to/your/model " std::cout << "Usage: ./predictor --conf=/config/path/to/your/model "
<< "--input_dir=/directory/of/your/input/images"; << "--input_dir=/directory/of/your/input/images" << std::endl;
return -1; return -1;
} }
// 1. create a predictor and init it with conf // 1. create a predictor and init it with conf
......
...@@ -83,7 +83,6 @@ namespace PaddleSolution { ...@@ -83,7 +83,6 @@ namespace PaddleSolution {
int blob_out_len = length; int blob_out_len = length;
int seg_out_len = eval_height * eval_width * eval_num_class; int seg_out_len = eval_height * eval_width * eval_num_class;
if (blob_out_len != seg_out_len) { if (blob_out_len != seg_out_len) {
LOG(ERROR) << " [FATAL] unequal: input vs output [" << LOG(ERROR) << " [FATAL] unequal: input vs output [" <<
seg_out_len << "|" << blob_out_len << "]" << std::endl; seg_out_len << "|" << blob_out_len << "]" << std::endl;
...@@ -99,23 +98,20 @@ namespace PaddleSolution { ...@@ -99,23 +98,20 @@ namespace PaddleSolution {
std::string nname(fname); std::string nname(fname);
auto pos = fname.rfind("."); auto pos = fname.rfind(".");
nname[pos] = '_'; nname[pos] = '_';
std::string mask_save_name = nname + ".png"; std::string mask_save_name = nname + "_mask.png";
cv::imwrite(mask_save_name, mask_png); cv::imwrite(mask_save_name, mask_png);
cv::Mat scoremap_png = cv::Mat(eval_height, eval_width, CV_8UC1); cv::Mat scoremap_png = cv::Mat(eval_height, eval_width, CV_8UC1);
scoremap_png.data = _scoremap.data(); scoremap_png.data = _scoremap.data();
std::string scoremap_save_name = nname std::string scoremap_save_name = nname + std::string("_scoremap.png");
+ std::string("_scoremap.png");
cv::imwrite(scoremap_save_name, scoremap_png); cv::imwrite(scoremap_save_name, scoremap_png);
std::cout << "save mask of [" << fname << "] done" << std::endl; std::cout << "save mask of [" << fname << "] done" << std::endl;
if (height && width) { if (height && width) {
int recover_height = *height; int recover_height = *height;
int recover_width = *width; int recover_width = *width;
cv::Mat recover_png = cv::Mat(recover_height, cv::Mat recover_png = cv::Mat(recover_height, recover_width, CV_8UC1);
recover_width, CV_8UC1);
cv::resize(scoremap_png, recover_png, cv::resize(scoremap_png, recover_png,
cv::Size(recover_width, recover_height), cv::Size(recover_width, recover_height), 0, 0, cv::INTER_CUBIC);
0, 0, cv::INTER_CUBIC);
std::string recover_name = nname + std::string("_recover.png"); std::string recover_name = nname + std::string("_recover.png");
cv::imwrite(recover_name, recover_png); cv::imwrite(recover_name, recover_png);
} }
...@@ -176,8 +172,13 @@ namespace PaddleSolution { ...@@ -176,8 +172,13 @@ namespace PaddleSolution {
} }
paddle::PaddleTensor im_tensor; paddle::PaddleTensor im_tensor;
im_tensor.name = "image"; im_tensor.name = "image";
im_tensor.shape = std::vector<int>{ batch_size, channels, if (!_model_config._use_pr) {
eval_height, eval_width }; im_tensor.shape = std::vector<int>{ batch_size, channels,
eval_height, eval_width };
} else {
im_tensor.shape = std::vector<int>{ batch_size, eval_height,
eval_width, channels};
}
im_tensor.data.Reset(input_buffer.data(), im_tensor.data.Reset(input_buffer.data(),
real_buffer_size * sizeof(float)); real_buffer_size * sizeof(float));
im_tensor.dtype = paddle::PaddleDType::FLOAT32; im_tensor.dtype = paddle::PaddleDType::FLOAT32;
...@@ -202,19 +203,45 @@ namespace PaddleSolution { ...@@ -202,19 +203,45 @@ namespace PaddleSolution {
std::cout << _outputs[0].shape[j] << ","; std::cout << _outputs[0].shape[j] << ",";
} }
std::cout << ")" << std::endl; std::cout << ")" << std::endl;
const size_t nums = _outputs.front().data.length()
/ sizeof(float); size_t nums = _outputs.front().data.length() / sizeof(float);
if (out_num % batch_size != 0 || out_num != nums) { if (_model_config._use_pr) {
LOG(ERROR) << "outputs data size mismatch with shape size."; nums = _outputs.front().data.length() / sizeof(int64_t);
}
// size mismatch checking
bool size_mismatch = out_num % batch_size;
size_mismatch |= (!_model_config._use_pr) && (nums != out_num);
size_mismatch |= _model_config._use_pr && (nums != eval_height * eval_width);
if (size_mismatch) {
LOG(ERROR) << "output with a unexpected size";
return -1; return -1;
} }
if (_model_config._use_pr) {
std::vector<uchar> out_data;
out_data.resize(out_num);
auto addr = reinterpret_cast<int64_t*>(_outputs[0].data.data());
for (int r = 0; r < out_num; ++r) {
out_data[r] = (int)(addr[r]);
}
for (int r = 0; r < batch_size; ++r) {
cv::Mat mask_png = cv::Mat(eval_height, eval_width, CV_8UC1);
mask_png.data = out_data.data() + eval_height*eval_width*r;
auto name = imgs_batch[r];
auto pos = name.rfind(".");
name[pos] = '_';
std::string mask_save_name = name + "_mask.png";
cv::imwrite(mask_save_name, mask_png);
}
continue;
}
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
float* output_addr = reinterpret_cast<float*>( float* output_addr = reinterpret_cast<float*>(
_outputs[0].data.data()) _outputs[0].data.data())
+ i * (out_num / batch_size); + i * (nums / batch_size);
output_mask(imgs_batch[i], output_addr, output_mask(imgs_batch[i], output_addr,
out_num / batch_size, nums / batch_size,
&org_height[i], &org_height[i],
&org_width[i]); &org_width[i]);
} }
...@@ -278,8 +305,14 @@ namespace PaddleSolution { ...@@ -278,8 +305,14 @@ namespace PaddleSolution {
return -1; return -1;
} }
auto im_tensor = _main_predictor->GetInputTensor("image"); auto im_tensor = _main_predictor->GetInputTensor("image");
im_tensor->Reshape({ batch_size, channels, if (!_model_config._use_pr) {
im_tensor->Reshape({ batch_size, channels,
eval_height, eval_width }); eval_height, eval_width });
} else {
im_tensor->Reshape({ batch_size, eval_height,
eval_width, channels});
}
im_tensor->copy_from_cpu(input_buffer.data()); im_tensor->copy_from_cpu(input_buffer.data());
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
...@@ -292,7 +325,6 @@ namespace PaddleSolution { ...@@ -292,7 +325,6 @@ namespace PaddleSolution {
auto output_names = _main_predictor->GetOutputNames(); auto output_names = _main_predictor->GetOutputNames();
auto output_t = _main_predictor->GetOutputTensor( auto output_t = _main_predictor->GetOutputTensor(
output_names[0]); output_names[0]);
std::vector<float> out_data;
std::vector<int> output_shape = output_t->shape(); std::vector<int> output_shape = output_t->shape();
int out_num = 1; int out_num = 1;
...@@ -303,6 +335,30 @@ namespace PaddleSolution { ...@@ -303,6 +335,30 @@ namespace PaddleSolution {
} }
std::cout << ")" << std::endl; std::cout << ")" << std::endl;
if (_model_config._use_pr) {
std::vector<int64_t> out_data;
out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data());
std::vector<uchar> mask_data;
mask_data.resize(out_num);
auto addr = reinterpret_cast<int64_t*>(out_data.data());
for (int r = 0; r < out_num; ++r) {
mask_data[r] = (int)(addr[r]);
}
for (int r = 0; r < batch_size; ++r) {
cv::Mat mask_png = cv::Mat(eval_height, eval_width, CV_8UC1);
mask_png.data = mask_data.data() + eval_height*eval_width*r;
auto name = imgs_batch[r];
auto pos = name.rfind(".");
name[pos] = '_';
std::string mask_save_name = name + "_mask.png";
cv::imwrite(mask_save_name, mask_png);
}
continue;
}
std::vector<float> out_data;
out_data.resize(out_num); out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data()); output_t->copy_to_cpu(out_data.data());
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
......
...@@ -40,14 +40,18 @@ namespace PaddleSolution { ...@@ -40,14 +40,18 @@ namespace PaddleSolution {
LOG(ERROR) << "Only support rgb(gray) and rgba image."; LOG(ERROR) << "Only support rgb(gray) and rgba image.";
return false; return false;
} }
cv::Size resize_size(_config->_resize[0], _config->_resize[1]); cv::Size resize_size(_config->_resize[0], _config->_resize[1]);
int rw = resize_size.width; int rw = resize_size.width;
int rh = resize_size.height; int rh = resize_size.height;
if (*ori_h != rh || *ori_w != rw) { if (*ori_h != rh || *ori_w != rw) {
cv::resize(im, im, resize_size, 0, 0, cv::INTER_LINEAR); cv::resize(im, im, resize_size, 0, 0, cv::INTER_LINEAR);
} }
utils::normalize(im, data, _config->_mean, _config->_std);
if (!_config->_use_pr) {
utils::normalize(im, data, _config->_mean, _config->_std);
} else {
utils::flatten_mat(im, data);
}
return true; return true;
} }
......
...@@ -25,6 +25,7 @@ class PaddleSegModelConfigPaser { ...@@ -25,6 +25,7 @@ class PaddleSegModelConfigPaser {
:_class_num(0), :_class_num(0),
_channels(0), _channels(0),
_use_gpu(0), _use_gpu(0),
_use_pr(0),
_batch_size(1), _batch_size(1),
_model_file_name("__model__"), _model_file_name("__model__"),
_param_file_name("__params__") { _param_file_name("__params__") {
...@@ -40,6 +41,7 @@ class PaddleSegModelConfigPaser { ...@@ -40,6 +41,7 @@ class PaddleSegModelConfigPaser {
_class_num = 0; _class_num = 0;
_channels = 0; _channels = 0;
_use_gpu = 0; _use_gpu = 0;
_use_pr = 0;
_batch_size = 1; _batch_size = 1;
_model_file_name.clear(); _model_file_name.clear();
_model_path.clear(); _model_path.clear();
...@@ -172,6 +174,12 @@ class PaddleSegModelConfigPaser { ...@@ -172,6 +174,12 @@ class PaddleSegModelConfigPaser {
std::cerr << "Please set CHANNELS: x" << std::endl; std::cerr << "Please set CHANNELS: x" << std::endl;
return false; return false;
} }
// 15. use_pr
if (config["DEPLOY"]["USE_PR"].IsDefined()) {
_use_pr = config["DEPLOY"]["USE_PR"].as<int>();
} else {
_use_pr = 0;
}
return true; return true;
} }
...@@ -238,6 +246,8 @@ class PaddleSegModelConfigPaser { ...@@ -238,6 +246,8 @@ class PaddleSegModelConfigPaser {
std::string _predictor_mode; std::string _predictor_mode;
// DEPLOY.BATCH_SIZE // DEPLOY.BATCH_SIZE
int _batch_size; int _batch_size;
// USE_PR: OP Optimized model
int _use_pr;
}; };
} // namespace PaddleSolution } // namespace PaddleSolution
...@@ -103,6 +103,24 @@ namespace utils { ...@@ -103,6 +103,24 @@ namespace utils {
} }
} }
// flatten a cv::mat
inline void flatten_mat(cv::Mat& im, float* data) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
int top_index = 0;
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < rc; ++c) {
float pixel = static_cast<float>(ptr[im_index++]);
data[top_index++] = pixel;
}
}
}
}
// argmax // argmax
inline void argmax(float* out, std::vector<int>& shape, inline void argmax(float* out, std::vector<int>& shape,
std::vector<uchar>& mask, std::vector<uchar>& scoremap) { std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册